diff --git a/cmd/resync.go b/cmd/resync.go
new file mode 100644
index 00000000..83aad59a
--- /dev/null
+++ b/cmd/resync.go
@@ -0,0 +1,101 @@
+// Copyright © 2020 Vulcanize, Inc
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package cmd
+
+import (
+ log "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/resync"
+)
+
+// resyncCmd represents the resync command
+var resyncCmd = &cobra.Command{
+ Use: "resync",
+ Short: "Resync historical data",
+ Long: `Use this command to fill in sections of missing data in the super node`,
+ Run: func(cmd *cobra.Command, args []string) {
+ subCommand = cmd.CalledAs()
+ logWithCommand = *log.WithField("SubCommand", subCommand)
+ rsyncCmdCommand()
+ },
+}
+
+func rsyncCmdCommand() {
+ rConfig, err := resync.NewReSyncConfig()
+ if err != nil {
+ logWithCommand.Fatal(err)
+ }
+ if err := ipfs.InitIPFSPlugins(); err != nil {
+ logWithCommand.Fatal(err)
+ }
+ rService, err := resync.NewResyncService(rConfig)
+ if err != nil {
+ logWithCommand.Fatal(err)
+ }
+ if err := rService.Resync(); err != nil {
+ logWithCommand.Fatal(err)
+ }
+ logWithCommand.Infof("%s %s resync finished", rConfig.Chain.String(), rConfig.ResyncType.String())
+}
+
+func init() {
+ rootCmd.AddCommand(resyncCmd)
+
+ // flags
+ resyncCmd.PersistentFlags().String("ipfs-path", "", "ipfs repository path")
+
+ resyncCmd.PersistentFlags().String("resync-chain", "", "which chain to support, options are currently Ethereum or Bitcoin.")
+ resyncCmd.PersistentFlags().String("resync-type", "", "which type of data to resync")
+ resyncCmd.PersistentFlags().Int("resync-start", 0, "block height to start resync")
+ resyncCmd.PersistentFlags().Int("resync-stop", 0, "block height to stop resync")
+ resyncCmd.PersistentFlags().Int("resync-batch-size", 0, "data fetching batch size")
+ resyncCmd.PersistentFlags().Int("resync-batch-number", 0, "how many goroutines to fetch data concurrently")
+ resyncCmd.PersistentFlags().Bool("resync-clear-old-cache", false, "if true, clear out old data of the provided type within the resync range before resyncing")
+
+ resyncCmd.PersistentFlags().String("btc-http-path", "", "http url for bitcoin node")
+ resyncCmd.PersistentFlags().String("btc-password", "", "password for btc node")
+ resyncCmd.PersistentFlags().String("btc-username", "", "username for btc node")
+ resyncCmd.PersistentFlags().String("btc-node-id", "", "btc node id")
+ resyncCmd.PersistentFlags().String("btc-client-name", "", "btc client name")
+ resyncCmd.PersistentFlags().String("btc-genesis-block", "", "btc genesis block hash")
+ resyncCmd.PersistentFlags().String("btc-network-id", "", "btc network id")
+
+ resyncCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node")
+
+ // and their bindings
+ viper.BindPFlag("ipfs.path", resyncCmd.PersistentFlags().Lookup("ipfs-path"))
+
+ viper.BindPFlag("resync.chain", resyncCmd.PersistentFlags().Lookup("resync-chain"))
+ viper.BindPFlag("resync.type", resyncCmd.PersistentFlags().Lookup("resync-type"))
+ viper.BindPFlag("resync.start", resyncCmd.PersistentFlags().Lookup("resync-start"))
+ viper.BindPFlag("resync.stop", resyncCmd.PersistentFlags().Lookup("resync-stop"))
+ viper.BindPFlag("resync.batchSize", resyncCmd.PersistentFlags().Lookup("resync-batch-size"))
+ viper.BindPFlag("resync.batchNumber", resyncCmd.PersistentFlags().Lookup("resync-batch-number"))
+ viper.BindPFlag("resync.clearOldCache", resyncCmd.PersistentFlags().Lookup("resync-clear-old-cache"))
+
+ viper.BindPFlag("bitcoin.httpPath", resyncCmd.PersistentFlags().Lookup("btc-http-path"))
+ viper.BindPFlag("bitcoin.pass", resyncCmd.PersistentFlags().Lookup("btc-password"))
+ viper.BindPFlag("bitcoin.user", resyncCmd.PersistentFlags().Lookup("btc-username"))
+ viper.BindPFlag("bitcoin.nodeID", resyncCmd.PersistentFlags().Lookup("btc-node-id"))
+ viper.BindPFlag("bitcoin.clientName", resyncCmd.PersistentFlags().Lookup("btc-client-name"))
+ viper.BindPFlag("bitcoin.genesisBlock", resyncCmd.PersistentFlags().Lookup("btc-genesis-block"))
+ viper.BindPFlag("bitcoin.networkID", resyncCmd.PersistentFlags().Lookup("btc-network-id"))
+
+ viper.BindPFlag("ethereum.httpPath", resyncCmd.PersistentFlags().Lookup("eth-http-path"))
+}
diff --git a/cmd/root.go b/cmd/root.go
index 1b02abd1..056329bd 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -41,12 +41,9 @@ var (
databaseConfig config.Database
genConfig config.Plugin
ipc string
- levelDbPath string
queueRecheckInterval time.Duration
startingBlockNumber int64
storageDiffsPath string
- syncAll bool
- endingBlockNumber int64
recheckHeadersArg bool
subCommand string
logWithCommand log.Entry
@@ -81,7 +78,6 @@ func initFuncs(cmd *cobra.Command, args []string) {
func setViperConfigs() {
ipc = viper.GetString("client.ipcpath")
- levelDbPath = viper.GetString("client.leveldbpath")
storageDiffsPath = viper.GetString("filesystem.storageDiffsPath")
storageDiffsSource = viper.GetString("storageDiffs.source")
databaseConfig = config.Database{
@@ -114,6 +110,7 @@ func init() {
viper.AutomaticEnv()
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location")
+ rootCmd.PersistentFlags().String("logfile", "", "file path for logging")
rootCmd.PersistentFlags().String("database-name", "vulcanize_public", "database name")
rootCmd.PersistentFlags().Int("database-port", 5432, "database port")
rootCmd.PersistentFlags().String("database-hostname", "localhost", "database hostname")
@@ -126,6 +123,7 @@ func init() {
rootCmd.PersistentFlags().String("exporter-name", "exporter", "name of exporter plugin")
rootCmd.PersistentFlags().String("log-level", log.InfoLevel.String(), "Log level (trace, debug, info, warn, error, fatal, panic")
+ viper.BindPFlag("logfile", rootCmd.PersistentFlags().Lookup("logfile"))
viper.BindPFlag("database.name", rootCmd.PersistentFlags().Lookup("database-name"))
viper.BindPFlag("database.port", rootCmd.PersistentFlags().Lookup("database-port"))
viper.BindPFlag("database.hostname", rootCmd.PersistentFlags().Lookup("database-hostname"))
diff --git a/cmd/streamEthSubscribe.go b/cmd/streamEthSubscribe.go
index 20051b97..e46aee52 100644
--- a/cmd/streamEthSubscribe.go
+++ b/cmd/streamEthSubscribe.go
@@ -67,7 +67,11 @@ func streamEthSubscription() {
payloadChan := make(chan super_node.SubscriptionPayload, 20000)
// Subscribe to the super node service with the given config/filter parameters
- sub, err := str.Stream(payloadChan, ethSubConfig)
+ rlpParams, err := rlp.EncodeToBytes(ethSubConfig)
+ if err != nil {
+ logWithCommand.Fatal(err)
+ }
+ sub, err := str.Stream(payloadChan, rlpParams)
if err != nil {
logWithCommand.Fatal(err)
}
@@ -80,24 +84,22 @@ func streamEthSubscription() {
logWithCommand.Error(payload.Err)
continue
}
- data, ok := payload.Data.(eth.StreamResponse)
- if !ok {
- logWithCommand.Warnf("payload data expected type %T got %T", eth.StreamResponse{}, payload.Data)
+ var ethData eth.IPLDs
+ if err := rlp.DecodeBytes(payload.Data, ðData); err != nil {
+ logWithCommand.Error(err)
continue
}
- for _, headerRlp := range data.HeadersRlp {
- var header types.Header
- err = rlp.Decode(bytes.NewBuffer(headerRlp), &header)
- if err != nil {
- logWithCommand.Error(err)
- continue
- }
- fmt.Printf("Header number %d, hash %s\n", header.Number.Int64(), header.Hash().Hex())
- fmt.Printf("header: %v\n", header)
+ var header types.Header
+ err = rlp.Decode(bytes.NewBuffer(ethData.Header.Data), &header)
+ if err != nil {
+ logWithCommand.Error(err)
+ continue
}
- for _, trxRlp := range data.TransactionsRlp {
+ fmt.Printf("Header number %d, hash %s\n", header.Number.Int64(), header.Hash().Hex())
+ fmt.Printf("header: %v\n", header)
+ for _, trxRlp := range ethData.Transactions {
var trx types.Transaction
- buff := bytes.NewBuffer(trxRlp)
+ buff := bytes.NewBuffer(trxRlp.Data)
stream := rlp.NewStream(buff, 0)
err := trx.DecodeRLP(stream)
if err != nil {
@@ -107,9 +109,9 @@ func streamEthSubscription() {
fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex())
fmt.Printf("trx: %v\n", trx)
}
- for _, rctRlp := range data.ReceiptsRlp {
- var rct types.ReceiptForStorage
- buff := bytes.NewBuffer(rctRlp)
+ for _, rctRlp := range ethData.Receipts {
+ var rct types.Receipt
+ buff := bytes.NewBuffer(rctRlp.Data)
stream := rlp.NewStream(buff, 0)
err = rct.DecodeRLP(stream)
if err != nil {
@@ -129,40 +131,34 @@ func streamEthSubscription() {
}
}
// This assumes leafs only
- for key, stateRlp := range data.StateNodesRlp {
+ for _, stateNode := range ethData.StateNodes {
var acct state.Account
- err = rlp.Decode(bytes.NewBuffer(stateRlp), &acct)
+ err = rlp.DecodeBytes(stateNode.IPLD.Data, &acct)
if err != nil {
logWithCommand.Error(err)
continue
}
- fmt.Printf("Account for key %s, and root %s, with balance %d\n",
- key.Hex(), acct.Root.Hex(), acct.Balance.Int64())
- fmt.Printf("state account: %v\n", acct)
+ fmt.Printf("Account for key %s, and root %s, with balance %s\n",
+ stateNode.StateLeafKey.Hex(), acct.Root.Hex(), acct.Balance.String())
+ fmt.Printf("state account: %+v\n", acct)
}
- for stateKey, mappedRlp := range data.StorageNodesRlp {
- fmt.Printf("Storage for state key %s ", stateKey.Hex())
- for storageKey, storageRlp := range mappedRlp {
- fmt.Printf("with storage key %s\n", storageKey.Hex())
- var i []interface{}
- err := rlp.DecodeBytes(storageRlp, &i)
- if err != nil {
- logWithCommand.Error(err)
+ for _, storageNode := range ethData.StorageNodes {
+ fmt.Printf("Storage for state key %s ", storageNode.StateLeafKey.Hex())
+ fmt.Printf("with storage key %s\n", storageNode.StorageLeafKey.Hex())
+ var i []interface{}
+ err := rlp.DecodeBytes(storageNode.IPLD.Data, &i)
+ if err != nil {
+ logWithCommand.Error(err)
+ continue
+ }
+ // if a value node
+ if len(i) == 1 {
+ valueBytes, ok := i[0].([]byte)
+ if !ok {
continue
}
- // if a leaf node
- if len(i) == 2 {
- keyBytes, ok := i[0].([]byte)
- if !ok {
- continue
- }
- valueBytes, ok := i[1].([]byte)
- if !ok {
- continue
- }
- fmt.Printf("Storage leaf key: %s, and value hash: %s\n",
- common.BytesToHash(keyBytes).Hex(), common.BytesToHash(valueBytes).Hex())
- }
+ fmt.Printf("Storage leaf key: %s, and value hash: %s\n",
+ storageNode.StorageLeafKey.Hex(), common.BytesToHash(valueBytes).Hex())
}
}
case err = <-sub.Err():
diff --git a/cmd/superNode.go b/cmd/superNode.go
index 0ba43996..b1239fd2 100644
--- a/cmd/superNode.go
+++ b/cmd/superNode.go
@@ -18,12 +18,13 @@ package cmd
import (
"sync"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/spf13/viper"
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
@@ -50,12 +51,8 @@ and fill in gaps in the data
},
}
-func init() {
- rootCmd.AddCommand(superNodeCmd)
-}
-
func superNode() {
- superNodeConfigs, err := shared.NewSuperNodeConfigs()
+ superNodeConfig, err := super_node.NewSuperNodeConfig()
if err != nil {
logWithCommand.Fatal(err)
}
@@ -63,36 +60,34 @@ func superNode() {
logWithCommand.Fatal(err)
}
wg := &sync.WaitGroup{}
- for _, superNodeConfig := range superNodeConfigs {
- superNode, err := super_node.NewSuperNode(superNodeConfig)
+ superNode, err := super_node.NewSuperNode(superNodeConfig)
+ if err != nil {
+ logWithCommand.Fatal(err)
+ }
+ var forwardPayloadChan chan shared.ConvertedData
+ if superNodeConfig.Serve {
+ forwardPayloadChan = make(chan shared.ConvertedData, super_node.PayloadChanBufferSize)
+ superNode.FilterAndServe(wg, forwardPayloadChan)
+ if err := startServers(superNode, superNodeConfig); err != nil {
+ logWithCommand.Fatal(err)
+ }
+ }
+ if superNodeConfig.Sync {
+ if err := superNode.ProcessData(wg, forwardPayloadChan); err != nil {
+ logWithCommand.Fatal(err)
+ }
+ }
+ if superNodeConfig.BackFill {
+ backFiller, err := super_node.NewBackFillService(superNodeConfig, forwardPayloadChan)
if err != nil {
logWithCommand.Fatal(err)
}
- var forwardPayloadChan chan shared.StreamedIPLDs
- if superNodeConfig.Serve {
- forwardPayloadChan = make(chan shared.StreamedIPLDs, super_node.PayloadChanBufferSize)
- superNode.ScreenAndServe(wg, forwardPayloadChan)
- if err := startServers(superNode, superNodeConfig); err != nil {
- logWithCommand.Fatal(err)
- }
- }
- if superNodeConfig.Sync {
- if err := superNode.SyncAndPublish(wg, forwardPayloadChan); err != nil {
- logWithCommand.Fatal(err)
- }
- }
- if superNodeConfig.BackFill {
- backFiller, err := super_node.NewBackFillService(superNodeConfig, forwardPayloadChan)
- if err != nil {
- logWithCommand.Fatal(err)
- }
- backFiller.FillGaps(wg)
- }
+ backFiller.FillGapsInSuperNode(wg)
}
wg.Wait()
}
-func startServers(superNode super_node.SuperNode, settings *shared.SuperNodeConfig) error {
+func startServers(superNode super_node.SuperNode, settings *super_node.Config) error {
_, _, err := rpc.StartIPCEndpoint(settings.IPCEndpoint, superNode.APIs())
if err != nil {
return err
@@ -104,3 +99,61 @@ func startServers(superNode super_node.SuperNode, settings *shared.SuperNodeConf
_, _, err = rpc.StartHTTPEndpoint(settings.HTTPEndpoint, superNode.APIs(), []string{settings.Chain.API()}, nil, nil, rpc.HTTPTimeouts{})
return err
}
+
+func init() {
+ rootCmd.AddCommand(superNodeCmd)
+
+ // flags
+ superNodeCmd.PersistentFlags().String("ipfs-path", "", "ipfs repository path")
+
+ superNodeCmd.PersistentFlags().String("supernode-chain", "", "which chain to support, options are currently Ethereum or Bitcoin.")
+ superNodeCmd.PersistentFlags().Bool("supernode-server", false, "turn vdb server on or off")
+ superNodeCmd.PersistentFlags().String("supernode-ws-path", "", "vdb server ws path")
+ superNodeCmd.PersistentFlags().String("supernode-http-path", "", "vdb server http path")
+ superNodeCmd.PersistentFlags().String("supernode-ipc-path", "", "vdb server ipc path")
+ superNodeCmd.PersistentFlags().Bool("supernode-sync", false, "turn vdb sync on or off")
+ superNodeCmd.PersistentFlags().Int("supernode-workers", 0, "how many worker goroutines to publish and index data")
+ superNodeCmd.PersistentFlags().Bool("supernode-back-fill", false, "turn vdb backfill on or off")
+ superNodeCmd.PersistentFlags().Int("supernode-frequency", 0, "how often (in seconds) the backfill process checks for gaps")
+ superNodeCmd.PersistentFlags().Int("supernode-batch-size", 0, "data fetching batch size")
+ superNodeCmd.PersistentFlags().Int("supernode-batch-number", 0, "how many goroutines to fetch data concurrently")
+
+ superNodeCmd.PersistentFlags().String("btc-ws-path", "", "ws url for bitcoin node")
+ superNodeCmd.PersistentFlags().String("btc-http-path", "", "http url for bitcoin node")
+ superNodeCmd.PersistentFlags().String("btc-password", "", "password for btc node")
+ superNodeCmd.PersistentFlags().String("btc-username", "", "username for btc node")
+ superNodeCmd.PersistentFlags().String("btc-node-id", "", "btc node id")
+ superNodeCmd.PersistentFlags().String("btc-client-name", "", "btc client name")
+ superNodeCmd.PersistentFlags().String("btc-genesis-block", "", "btc genesis block hash")
+ superNodeCmd.PersistentFlags().String("btc-network-id", "", "btc network id")
+
+ superNodeCmd.PersistentFlags().String("eth-ws-path", "", "ws url for ethereum node")
+ superNodeCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node")
+
+ // and their bindings
+ viper.BindPFlag("ipfs.path", superNodeCmd.PersistentFlags().Lookup("ipfs-path"))
+
+ viper.BindPFlag("superNode.chain", superNodeCmd.PersistentFlags().Lookup("supernode-chain"))
+ viper.BindPFlag("superNode.server", superNodeCmd.PersistentFlags().Lookup("supernode-server"))
+ viper.BindPFlag("superNode.wsPath", superNodeCmd.PersistentFlags().Lookup("supernode-ws-path"))
+ viper.BindPFlag("superNode.httpPath", superNodeCmd.PersistentFlags().Lookup("supernode-http-path"))
+ viper.BindPFlag("superNode.ipcPath", superNodeCmd.PersistentFlags().Lookup("supernode-ipc-path"))
+ viper.BindPFlag("superNode.sync", superNodeCmd.PersistentFlags().Lookup("supernode-sync"))
+ viper.BindPFlag("superNode.workers", superNodeCmd.PersistentFlags().Lookup("supernode-workers"))
+ viper.BindPFlag("superNode.backFill", superNodeCmd.PersistentFlags().Lookup("supernode-back-fill"))
+ viper.BindPFlag("superNode.frequency", superNodeCmd.PersistentFlags().Lookup("supernode-frequency"))
+ viper.BindPFlag("superNode.batchSize", superNodeCmd.PersistentFlags().Lookup("supernode-batch-size"))
+ viper.BindPFlag("superNode.batchNumber", superNodeCmd.PersistentFlags().Lookup("supernode-batch-number"))
+
+ viper.BindPFlag("bitcoin.wsPath", superNodeCmd.PersistentFlags().Lookup("btc-ws-path"))
+ viper.BindPFlag("bitcoin.httpPath", superNodeCmd.PersistentFlags().Lookup("btc-http-path"))
+ viper.BindPFlag("bitcoin.pass", superNodeCmd.PersistentFlags().Lookup("btc-password"))
+ viper.BindPFlag("bitcoin.user", superNodeCmd.PersistentFlags().Lookup("btc-username"))
+ viper.BindPFlag("bitcoin.nodeID", superNodeCmd.PersistentFlags().Lookup("btc-node-id"))
+ viper.BindPFlag("bitcoin.clientName", superNodeCmd.PersistentFlags().Lookup("btc-client-name"))
+ viper.BindPFlag("bitcoin.genesisBlock", superNodeCmd.PersistentFlags().Lookup("btc-genesis-block"))
+ viper.BindPFlag("bitcoin.networkID", superNodeCmd.PersistentFlags().Lookup("btc-network-id"))
+
+ viper.BindPFlag("ethereum.wsPath", superNodeCmd.PersistentFlags().Lookup("eth-ws-path"))
+ viper.BindPFlag("ethereum.httpPath", superNodeCmd.PersistentFlags().Lookup("eth-http-path"))
+}
diff --git a/cmd/watch.go b/cmd/watch.go
new file mode 100644
index 00000000..0978e425
--- /dev/null
+++ b/cmd/watch.go
@@ -0,0 +1,43 @@
+// Copyright © 2020 Vulcanize, Inc
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+// watchCmd represents the watch command
+var watchCmd = &cobra.Command{
+ Use: "watch",
+ Short: "Watch and transform data from a chain source",
+ Long: `This command allows one to configure a set of wasm functions and SQL trigger functions
+that call them to watch and transform data from the specified chain source.
+
+A watcher is composed of four parts:
+1) Go execution engine- this command- which fetches raw chain data and adds it to the Postres queued ready data tables
+2) TOML config file which specifies what subset of chain data to fetch and from where and contains references to the below
+3) Set of WASM binaries which are loaded into Postgres and used by
+4) Set of PostgreSQL trigger functions which automatically act on data as it is inserted into the queued ready data tables`,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("watch called")
+ },
+}
+
+func init() {
+ rootCmd.AddCommand(watchCmd)
+}
diff --git a/db/migrations/00012_create_eth_header_cids_table.sql b/db/migrations/00012_create_eth_header_cids_table.sql
index a676b12e..c689ec40 100644
--- a/db/migrations/00012_create_eth_header_cids_table.sql
+++ b/db/migrations/00012_create_eth_header_cids_table.sql
@@ -7,6 +7,7 @@ CREATE TABLE eth.header_cids (
cid TEXT NOT NULL,
td NUMERIC NOT NULL,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
+ reward NUMERIC NOT NULL,
UNIQUE (block_number, block_hash)
);
diff --git a/db/migrations/00013_create_eth_uncle_cids_table.sql b/db/migrations/00013_create_eth_uncle_cids_table.sql
index d79976df..8e372e4c 100644
--- a/db/migrations/00013_create_eth_uncle_cids_table.sql
+++ b/db/migrations/00013_create_eth_uncle_cids_table.sql
@@ -5,6 +5,7 @@ CREATE TABLE eth.uncle_cids (
block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
+ reward NUMERIC NOT NULL,
UNIQUE (header_id, block_hash)
);
diff --git a/db/migrations/00024_create_eth_queued_data_table.sql b/db/migrations/00024_create_eth_queued_data_table.sql
new file mode 100644
index 00000000..87f58ad8
--- /dev/null
+++ b/db/migrations/00024_create_eth_queued_data_table.sql
@@ -0,0 +1,9 @@
+-- +goose Up
+CREATE TABLE eth.queue_data (
+ id SERIAL PRIMARY KEY,
+ data BYTEA NOT NULL,
+ height BIGINT UNIQUE NOT NULL
+);
+
+-- +goose Down
+DROP TABLE eth.queue_data;
\ No newline at end of file
diff --git a/db/migrations/00025_create_btc_queued_data_table.sql b/db/migrations/00025_create_btc_queued_data_table.sql
new file mode 100644
index 00000000..c1344f86
--- /dev/null
+++ b/db/migrations/00025_create_btc_queued_data_table.sql
@@ -0,0 +1,9 @@
+-- +goose Up
+CREATE TABLE btc.queue_data (
+ id SERIAL PRIMARY KEY,
+ data BYTEA NOT NULL,
+ height BIGINT UNIQUE NOT NULL
+);
+
+-- +goose Down
+DROP TABLE btc.queue_data;
\ No newline at end of file
diff --git a/db/migrations/00026_create_postgraphile_comments.sql b/db/migrations/00026_create_postgraphile_comments.sql
new file mode 100644
index 00000000..65711df0
--- /dev/null
+++ b/db/migrations/00026_create_postgraphile_comments.sql
@@ -0,0 +1,13 @@
+-- +goose Up
+COMMENT ON TABLE public.nodes IS E'@name NodeInfo';
+COMMENT ON TABLE btc.header_cids IS E'@name BtcHeaderCids';
+COMMENT ON TABLE btc.transaction_cids IS E'@name BtcTransactionCids';
+COMMENT ON TABLE btc.queue_data IS E'@name BtcQueueData';
+COMMENT ON TABLE eth.transaction_cids IS E'@name EthTransactionCids';
+COMMENT ON TABLE eth.header_cids IS E'@name EthHeaderCids';
+COMMENT ON TABLE eth.queue_data IS E'@name EthQueueData';
+COMMENT ON TABLE public.headers IS E'@name EthHeaders';
+COMMENT ON COLUMN public.headers.node_id IS E'@name EthNodeID';
+COMMENT ON COLUMN public.nodes.node_id IS E'@name ChainNodeID';
+COMMENT ON COLUMN eth.header_cids.node_id IS E'@name EthNodeID';
+COMMENT ON COLUMN btc.header_cids.node_id IS E'@name BtcNodeID';
\ No newline at end of file
diff --git a/db/migrations/00027_update_state_cids.sql b/db/migrations/00027_update_state_cids.sql
new file mode 100644
index 00000000..daea294b
--- /dev/null
+++ b/db/migrations/00027_update_state_cids.sql
@@ -0,0 +1,37 @@
+-- +goose Up
+ALTER TABLE eth.state_cids
+ADD COLUMN state_path BYTEA;
+
+ALTER TABLE eth.state_cids
+DROP COLUMN leaf;
+
+ALTER TABLE eth.state_cids
+ADD COLUMN node_type INTEGER;
+
+ALTER TABLE eth.state_cids
+ALTER COLUMN state_key DROP NOT NULL;
+
+ALTER TABLE eth.state_cids
+DROP CONSTRAINT state_cids_header_id_state_key_key;
+
+ALTER TABLE eth.state_cids
+ADD CONSTRAINT state_cids_header_id_state_path_key UNIQUE (header_id, state_path);
+
+-- +goose Down
+ALTER TABLE eth.state_cids
+ADD CONSTRAINT state_cids_header_id_state_key_key UNIQUE (header_id, state_key);
+
+ALTER TABLE eth.state_cids
+DROP CONSTRAINT state_cids_header_id_state_path_key;
+
+ALTER TABLE eth.state_cids
+ALTER COLUMN state_key SET NOT NULL;
+
+ALTER TABLE eth.state_cids
+DROP COLUMN node_type;
+
+ALTER TABLE eth.state_cids
+ADD COLUMN leaf BOOLEAN NOT NULL;
+
+ALTER TABLE eth.state_cids
+DROP COLUMN state_path;
\ No newline at end of file
diff --git a/db/migrations/00028_update_storage_cids.sql b/db/migrations/00028_update_storage_cids.sql
new file mode 100644
index 00000000..385f2468
--- /dev/null
+++ b/db/migrations/00028_update_storage_cids.sql
@@ -0,0 +1,37 @@
+-- +goose Up
+ALTER TABLE eth.storage_cids
+ADD COLUMN storage_path BYTEA;
+
+ALTER TABLE eth.storage_cids
+DROP COLUMN leaf;
+
+ALTER TABLE eth.storage_cids
+ADD COLUMN node_type INTEGER;
+
+ALTER TABLE eth.storage_cids
+ALTER COLUMN storage_key DROP NOT NULL;
+
+ALTER TABLE eth.storage_cids
+DROP CONSTRAINT storage_cids_state_id_storage_key_key;
+
+ALTER TABLE eth.storage_cids
+ADD CONSTRAINT storage_cids_state_id_storage_path_key UNIQUE (state_id, storage_path);
+
+-- +goose Down
+ALTER TABLE eth.storage_cids
+DROP CONSTRAINT storage_cids_state_id_storage_path_key;
+
+ALTER TABLE eth.storage_cids
+ADD CONSTRAINT storage_cids_state_id_storage_key_key UNIQUE (state_id, storage_key);
+
+ALTER TABLE eth.storage_cids
+ALTER COLUMN storage_key SET NOT NULL;
+
+ALTER TABLE eth.storage_cids
+DROP COLUMN node_type;
+
+ALTER TABLE eth.storage_cids
+ADD COLUMN leaf BOOLEAN NOT NULL;
+
+ALTER TABLE eth.storage_cids
+DROP COLUMN storage_path;
\ No newline at end of file
diff --git a/db/migrations/00029_update_header_cids.sql b/db/migrations/00029_update_header_cids.sql
new file mode 100644
index 00000000..1c69c11c
--- /dev/null
+++ b/db/migrations/00029_update_header_cids.sql
@@ -0,0 +1,37 @@
+-- +goose Up
+ALTER TABLE eth.header_cids
+ADD COLUMN state_root VARCHAR(66);
+
+ALTER TABLE eth.header_cids
+ADD COLUMN tx_root VARCHAR(66);
+
+ALTER TABLE eth.header_cids
+ADD COLUMN receipt_root VARCHAR(66);
+
+ALTER TABLE eth.header_cids
+ADD COLUMN uncle_root VARCHAR(66);
+
+ALTER TABLE eth.header_cids
+ADD COLUMN bloom BYTEA;
+
+ALTER TABLE eth.header_cids
+ADD COLUMN timestamp NUMERIC;
+
+-- +goose Down
+ALTER TABLE eth.header_cids
+DROP COLUMN timestamp;
+
+ALTER TABLE eth.header_cids
+DROP COLUMN bloom;
+
+ALTER TABLE eth.header_cids
+DROP COLUMN uncle_root;
+
+ALTER TABLE eth.header_cids
+DROP COLUMN receipt_root;
+
+ALTER TABLE eth.header_cids
+DROP COLUMN tx_root;
+
+ALTER TABLE eth.header_cids
+DROP COLUMN state_root;
\ No newline at end of file
diff --git a/db/migrations/00030_create_eth_state_accouts_table.sql b/db/migrations/00030_create_eth_state_accouts_table.sql
new file mode 100644
index 00000000..322d948f
--- /dev/null
+++ b/db/migrations/00030_create_eth_state_accouts_table.sql
@@ -0,0 +1,13 @@
+-- +goose Up
+CREATE TABLE eth.state_accounts (
+ id SERIAL PRIMARY KEY,
+ state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE,
+ balance NUMERIC NOT NULL,
+ nonce INTEGER NOT NULL,
+ code_hash BYTEA NOT NULL,
+ storage_root VARCHAR(66) NOT NULL,
+ UNIQUE (state_id)
+);
+
+-- +goose Down
+DROP TABLE eth.state_accounts;
\ No newline at end of file
diff --git a/db/migrations/00031_rename_to_leaf_key.sql b/db/migrations/00031_rename_to_leaf_key.sql
new file mode 100644
index 00000000..0bcf28bf
--- /dev/null
+++ b/db/migrations/00031_rename_to_leaf_key.sql
@@ -0,0 +1,13 @@
+-- +goose Up
+ALTER TABLE eth.state_cids
+RENAME COLUMN state_key TO state_leaf_key;
+
+ALTER TABLE eth.storage_cids
+RENAME COLUMN storage_key TO storage_leaf_key;
+
+-- +goose Down
+ALTER TABLE eth.storage_cids
+RENAME COLUMN storage_leaf_key TO storage_key;
+
+ALTER TABLE eth.state_cids
+RENAME COLUMN state_leaf_key TO state_key;
\ No newline at end of file
diff --git a/db/migrations/maaaybe/00026_create_eth_headers_table.sql b/db/migrations/maaaybe/00026_create_eth_headers_table.sql
new file mode 100644
index 00000000..e69de29b
diff --git a/db/migrations/maaaybe/00027_create_eth_uncles_table.sql b/db/migrations/maaaybe/00027_create_eth_uncles_table.sql
new file mode 100644
index 00000000..e69de29b
diff --git a/db/migrations/maaaybe/00028_create_eth_transactions_table.sql b/db/migrations/maaaybe/00028_create_eth_transactions_table.sql
new file mode 100644
index 00000000..e69de29b
diff --git a/db/migrations/maaaybe/00029_create_eth_receipts_table.sql b/db/migrations/maaaybe/00029_create_eth_receipts_table.sql
new file mode 100644
index 00000000..e69de29b
diff --git a/db/migrations/maaaybe/00030_create_eth_logs_table.sql b/db/migrations/maaaybe/00030_create_eth_logs_table.sql
new file mode 100644
index 00000000..e69de29b
diff --git a/db/migrations/maaaybe/00031_create_eth_accounts_table.sql b/db/migrations/maaaybe/00031_create_eth_accounts_table.sql
new file mode 100644
index 00000000..e69de29b
diff --git a/db/migrations/maaaybe/00032_create_eth_storage_leaf_table.sql b/db/migrations/maaaybe/00032_create_eth_storage_leaf_table.sql
new file mode 100644
index 00000000..e69de29b
diff --git a/db/migrations/maaaybe/00033_create_btc_headers_table.sql b/db/migrations/maaaybe/00033_create_btc_headers_table.sql
new file mode 100644
index 00000000..e69de29b
diff --git a/db/migrations/maaaybe/00034_create_btc_transactions_table.sql.go b/db/migrations/maaaybe/00034_create_btc_transactions_table.sql.go
new file mode 100644
index 00000000..f7436b85
--- /dev/null
+++ b/db/migrations/maaaybe/00034_create_btc_transactions_table.sql.go
@@ -0,0 +1 @@
+package maaaybe
diff --git a/db/schema.sql b/db/schema.sql
index d0cef093..4b287ed9 100644
--- a/db/schema.sql
+++ b/db/schema.sql
@@ -3,7 +3,7 @@
--
-- Dumped from database version 10.10
--- Dumped by pg_dump version 12.1
+-- Dumped by pg_dump version 10.10
SET statement_timeout = 0;
SET lock_timeout = 0;
@@ -30,8 +30,24 @@ CREATE SCHEMA btc;
CREATE SCHEMA eth;
+--
+-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: -
+--
+
+CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
+
+
+--
+-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: -
+--
+
+COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
+
+
SET default_tablespace = '';
+SET default_with_oids = false;
+
--
-- Name: header_cids; Type: TABLE; Schema: btc; Owner: -
--
@@ -48,6 +64,20 @@ CREATE TABLE btc.header_cids (
);
+--
+-- Name: TABLE header_cids; Type: COMMENT; Schema: btc; Owner: -
+--
+
+COMMENT ON TABLE btc.header_cids IS '@name BtcHeaderCids';
+
+
+--
+-- Name: COLUMN header_cids.node_id; Type: COMMENT; Schema: btc; Owner: -
+--
+
+COMMENT ON COLUMN btc.header_cids.node_id IS '@name BtcNodeID';
+
+
--
-- Name: header_cids_id_seq; Type: SEQUENCE; Schema: btc; Owner: -
--
@@ -68,6 +98,44 @@ CREATE SEQUENCE btc.header_cids_id_seq
ALTER SEQUENCE btc.header_cids_id_seq OWNED BY btc.header_cids.id;
+--
+-- Name: queue_data; Type: TABLE; Schema: btc; Owner: -
+--
+
+CREATE TABLE btc.queue_data (
+ id integer NOT NULL,
+ data bytea NOT NULL,
+ height bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE queue_data; Type: COMMENT; Schema: btc; Owner: -
+--
+
+COMMENT ON TABLE btc.queue_data IS '@name BtcQueueData';
+
+
+--
+-- Name: queue_data_id_seq; Type: SEQUENCE; Schema: btc; Owner: -
+--
+
+CREATE SEQUENCE btc.queue_data_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+--
+-- Name: queue_data_id_seq; Type: SEQUENCE OWNED BY; Schema: btc; Owner: -
+--
+
+ALTER SEQUENCE btc.queue_data_id_seq OWNED BY btc.queue_data.id;
+
+
--
-- Name: transaction_cids; Type: TABLE; Schema: btc; Owner: -
--
@@ -83,6 +151,13 @@ CREATE TABLE btc.transaction_cids (
);
+--
+-- Name: TABLE transaction_cids; Type: COMMENT; Schema: btc; Owner: -
+--
+
+COMMENT ON TABLE btc.transaction_cids IS '@name BtcTransactionCids';
+
+
--
-- Name: transaction_cids_id_seq; Type: SEQUENCE; Schema: btc; Owner: -
--
@@ -185,10 +260,31 @@ CREATE TABLE eth.header_cids (
parent_hash character varying(66) NOT NULL,
cid text NOT NULL,
td numeric NOT NULL,
- node_id integer NOT NULL
+ node_id integer NOT NULL,
+ reward numeric NOT NULL,
+ state_root character varying(66),
+ tx_root character varying(66),
+ receipt_root character varying(66),
+ uncle_root character varying(66),
+ bloom bytea,
+ "timestamp" numeric
);
+--
+-- Name: TABLE header_cids; Type: COMMENT; Schema: eth; Owner: -
+--
+
+COMMENT ON TABLE eth.header_cids IS '@name EthHeaderCids';
+
+
+--
+-- Name: COLUMN header_cids.node_id; Type: COMMENT; Schema: eth; Owner: -
+--
+
+COMMENT ON COLUMN eth.header_cids.node_id IS '@name EthNodeID';
+
+
--
-- Name: header_cids_id_seq; Type: SEQUENCE; Schema: eth; Owner: -
--
@@ -209,6 +305,44 @@ CREATE SEQUENCE eth.header_cids_id_seq
ALTER SEQUENCE eth.header_cids_id_seq OWNED BY eth.header_cids.id;
+--
+-- Name: queue_data; Type: TABLE; Schema: eth; Owner: -
+--
+
+CREATE TABLE eth.queue_data (
+ id integer NOT NULL,
+ data bytea NOT NULL,
+ height bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE queue_data; Type: COMMENT; Schema: eth; Owner: -
+--
+
+COMMENT ON TABLE eth.queue_data IS '@name EthQueueData';
+
+
+--
+-- Name: queue_data_id_seq; Type: SEQUENCE; Schema: eth; Owner: -
+--
+
+CREATE SEQUENCE eth.queue_data_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+--
+-- Name: queue_data_id_seq; Type: SEQUENCE OWNED BY; Schema: eth; Owner: -
+--
+
+ALTER SEQUENCE eth.queue_data_id_seq OWNED BY eth.queue_data.id;
+
+
--
-- Name: receipt_cids; Type: TABLE; Schema: eth; Owner: -
--
@@ -245,6 +379,40 @@ CREATE SEQUENCE eth.receipt_cids_id_seq
ALTER SEQUENCE eth.receipt_cids_id_seq OWNED BY eth.receipt_cids.id;
+--
+-- Name: state_accounts; Type: TABLE; Schema: eth; Owner: -
+--
+
+CREATE TABLE eth.state_accounts (
+ id integer NOT NULL,
+ state_id integer NOT NULL,
+ balance numeric NOT NULL,
+ nonce integer NOT NULL,
+ code_hash bytea NOT NULL,
+ storage_root character varying(66) NOT NULL
+);
+
+
+--
+-- Name: state_accounts_id_seq; Type: SEQUENCE; Schema: eth; Owner: -
+--
+
+CREATE SEQUENCE eth.state_accounts_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+--
+-- Name: state_accounts_id_seq; Type: SEQUENCE OWNED BY; Schema: eth; Owner: -
+--
+
+ALTER SEQUENCE eth.state_accounts_id_seq OWNED BY eth.state_accounts.id;
+
+
--
-- Name: state_cids; Type: TABLE; Schema: eth; Owner: -
--
@@ -252,9 +420,10 @@ ALTER SEQUENCE eth.receipt_cids_id_seq OWNED BY eth.receipt_cids.id;
CREATE TABLE eth.state_cids (
id integer NOT NULL,
header_id integer NOT NULL,
- state_key character varying(66) NOT NULL,
- leaf boolean NOT NULL,
- cid text NOT NULL
+ state_leaf_key character varying(66),
+ cid text NOT NULL,
+ state_path bytea,
+ node_type integer
);
@@ -285,9 +454,10 @@ ALTER SEQUENCE eth.state_cids_id_seq OWNED BY eth.state_cids.id;
CREATE TABLE eth.storage_cids (
id integer NOT NULL,
state_id integer NOT NULL,
- storage_key character varying(66) NOT NULL,
- leaf boolean NOT NULL,
- cid text NOT NULL
+ storage_leaf_key character varying(66),
+ cid text NOT NULL,
+ storage_path bytea,
+ node_type integer
);
@@ -326,6 +496,13 @@ CREATE TABLE eth.transaction_cids (
);
+--
+-- Name: TABLE transaction_cids; Type: COMMENT; Schema: eth; Owner: -
+--
+
+COMMENT ON TABLE eth.transaction_cids IS '@name EthTransactionCids';
+
+
--
-- Name: transaction_cids_id_seq; Type: SEQUENCE; Schema: eth; Owner: -
--
@@ -355,7 +532,8 @@ CREATE TABLE eth.uncle_cids (
header_id integer NOT NULL,
block_hash character varying(66) NOT NULL,
parent_hash character varying(66) NOT NULL,
- cid text NOT NULL
+ cid text NOT NULL,
+ reward numeric NOT NULL
);
@@ -616,6 +794,20 @@ CREATE TABLE public.headers (
);
+--
+-- Name: TABLE headers; Type: COMMENT; Schema: public; Owner: -
+--
+
+COMMENT ON TABLE public.headers IS '@name EthHeaders';
+
+
+--
+-- Name: COLUMN headers.node_id; Type: COMMENT; Schema: public; Owner: -
+--
+
+COMMENT ON COLUMN public.headers.node_id IS '@name EthNodeID';
+
+
--
-- Name: headers_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
@@ -649,6 +841,20 @@ CREATE TABLE public.nodes (
);
+--
+-- Name: TABLE nodes; Type: COMMENT; Schema: public; Owner: -
+--
+
+COMMENT ON TABLE public.nodes IS '@name NodeInfo';
+
+
+--
+-- Name: COLUMN nodes.node_id; Type: COMMENT; Schema: public; Owner: -
+--
+
+COMMENT ON COLUMN public.nodes.node_id IS '@name ChainNodeID';
+
+
--
-- Name: nodes_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
@@ -771,6 +977,13 @@ ALTER SEQUENCE public.watched_logs_id_seq OWNED BY public.watched_logs.id;
ALTER TABLE ONLY btc.header_cids ALTER COLUMN id SET DEFAULT nextval('btc.header_cids_id_seq'::regclass);
+--
+-- Name: queue_data id; Type: DEFAULT; Schema: btc; Owner: -
+--
+
+ALTER TABLE ONLY btc.queue_data ALTER COLUMN id SET DEFAULT nextval('btc.queue_data_id_seq'::regclass);
+
+
--
-- Name: transaction_cids id; Type: DEFAULT; Schema: btc; Owner: -
--
@@ -799,6 +1012,13 @@ ALTER TABLE ONLY btc.tx_outputs ALTER COLUMN id SET DEFAULT nextval('btc.tx_outp
ALTER TABLE ONLY eth.header_cids ALTER COLUMN id SET DEFAULT nextval('eth.header_cids_id_seq'::regclass);
+--
+-- Name: queue_data id; Type: DEFAULT; Schema: eth; Owner: -
+--
+
+ALTER TABLE ONLY eth.queue_data ALTER COLUMN id SET DEFAULT nextval('eth.queue_data_id_seq'::regclass);
+
+
--
-- Name: receipt_cids id; Type: DEFAULT; Schema: eth; Owner: -
--
@@ -806,6 +1026,13 @@ ALTER TABLE ONLY eth.header_cids ALTER COLUMN id SET DEFAULT nextval('eth.header
ALTER TABLE ONLY eth.receipt_cids ALTER COLUMN id SET DEFAULT nextval('eth.receipt_cids_id_seq'::regclass);
+--
+-- Name: state_accounts id; Type: DEFAULT; Schema: eth; Owner: -
+--
+
+ALTER TABLE ONLY eth.state_accounts ALTER COLUMN id SET DEFAULT nextval('eth.state_accounts_id_seq'::regclass);
+
+
--
-- Name: state_cids id; Type: DEFAULT; Schema: eth; Owner: -
--
@@ -927,6 +1154,22 @@ ALTER TABLE ONLY btc.header_cids
ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id);
+--
+-- Name: queue_data queue_data_height_key; Type: CONSTRAINT; Schema: btc; Owner: -
+--
+
+ALTER TABLE ONLY btc.queue_data
+ ADD CONSTRAINT queue_data_height_key UNIQUE (height);
+
+
+--
+-- Name: queue_data queue_data_pkey; Type: CONSTRAINT; Schema: btc; Owner: -
+--
+
+ALTER TABLE ONLY btc.queue_data
+ ADD CONSTRAINT queue_data_pkey PRIMARY KEY (id);
+
+
--
-- Name: transaction_cids transaction_cids_pkey; Type: CONSTRAINT; Schema: btc; Owner: -
--
@@ -991,6 +1234,22 @@ ALTER TABLE ONLY eth.header_cids
ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id);
+--
+-- Name: queue_data queue_data_height_key; Type: CONSTRAINT; Schema: eth; Owner: -
+--
+
+ALTER TABLE ONLY eth.queue_data
+ ADD CONSTRAINT queue_data_height_key UNIQUE (height);
+
+
+--
+-- Name: queue_data queue_data_pkey; Type: CONSTRAINT; Schema: eth; Owner: -
+--
+
+ALTER TABLE ONLY eth.queue_data
+ ADD CONSTRAINT queue_data_pkey PRIMARY KEY (id);
+
+
--
-- Name: receipt_cids receipt_cids_pkey; Type: CONSTRAINT; Schema: eth; Owner: -
--
@@ -1000,11 +1259,27 @@ ALTER TABLE ONLY eth.receipt_cids
--
--- Name: state_cids state_cids_header_id_state_key_key; Type: CONSTRAINT; Schema: eth; Owner: -
+-- Name: state_accounts state_accounts_pkey; Type: CONSTRAINT; Schema: eth; Owner: -
+--
+
+ALTER TABLE ONLY eth.state_accounts
+ ADD CONSTRAINT state_accounts_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: state_accounts state_accounts_state_id_key; Type: CONSTRAINT; Schema: eth; Owner: -
+--
+
+ALTER TABLE ONLY eth.state_accounts
+ ADD CONSTRAINT state_accounts_state_id_key UNIQUE (state_id);
+
+
+--
+-- Name: state_cids state_cids_header_id_state_path_key; Type: CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.state_cids
- ADD CONSTRAINT state_cids_header_id_state_key_key UNIQUE (header_id, state_key);
+ ADD CONSTRAINT state_cids_header_id_state_path_key UNIQUE (header_id, state_path);
--
@@ -1024,11 +1299,11 @@ ALTER TABLE ONLY eth.storage_cids
--
--- Name: storage_cids storage_cids_state_id_storage_key_key; Type: CONSTRAINT; Schema: eth; Owner: -
+-- Name: storage_cids storage_cids_state_id_storage_path_key; Type: CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.storage_cids
- ADD CONSTRAINT storage_cids_state_id_storage_key_key UNIQUE (state_id, storage_key);
+ ADD CONSTRAINT storage_cids_state_id_storage_path_key UNIQUE (state_id, storage_path);
--
@@ -1321,6 +1596,14 @@ ALTER TABLE ONLY eth.receipt_cids
ADD CONSTRAINT receipt_cids_tx_id_fkey FOREIGN KEY (tx_id) REFERENCES eth.transaction_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
+--
+-- Name: state_accounts state_accounts_state_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
+--
+
+ALTER TABLE ONLY eth.state_accounts
+ ADD CONSTRAINT state_accounts_state_id_fkey FOREIGN KEY (state_id) REFERENCES eth.state_cids(id) ON DELETE CASCADE;
+
+
--
-- Name: state_cids state_cids_header_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
--
diff --git a/dockerfiles/migrations/Dockerfile b/dockerfiles/migrations/Dockerfile
new file mode 100644
index 00000000..fd4c9139
--- /dev/null
+++ b/dockerfiles/migrations/Dockerfile
@@ -0,0 +1,40 @@
+FROM golang:alpine
+
+RUN apk --update --no-cache add make git g++ linux-headers
+# DEBUG
+RUN apk add busybox-extras
+
+# this is probably a noob move, but I want apk from alpine for the above but need to avoid Go 1.13 below as this error still occurs https://github.com/ipfs/go-ipfs/issues/6603
+FROM golang:1.12.4 as builder
+
+# Get and build vulcanizedb
+ADD . /go/src/github.com/vulcanize/vulcanizedb
+
+# Build migration tool
+RUN go get -u -d github.com/pressly/goose/cmd/goose
+WORKDIR /go/src/github.com/pressly/goose/cmd/goose
+RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -tags='no_mysql no_sqlite' -o goose .
+
+WORKDIR /go/src/github.com/vulcanize/vulcanizedb
+
+# app container
+FROM alpine
+
+ARG USER
+
+RUN adduser -Du 5000 $USER
+WORKDIR /app
+RUN chown $USER /app
+USER $USER
+
+# chown first so dir is writable
+# note: using $USER is merged, but not in the stable release yet
+COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/migrations/startup_script.sh .
+
+
+# keep binaries immutable
+COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
+COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations migrations/vulcanizedb
+# XXX dir is already writeable RUN touch vulcanizedb.log
+
+CMD ["./startup_script.sh"]
\ No newline at end of file
diff --git a/dockerfiles/migrations/startup_script.sh b/dockerfiles/migrations/startup_script.sh
new file mode 100755
index 00000000..ca5c5159
--- /dev/null
+++ b/dockerfiles/migrations/startup_script.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+# Runs the db migrations and starts the super node services
+
+# Exit if the variable tests fail
+set -e
+set +x
+
+# Check the database variables are set
+test $DATABASE_HOSTNAME
+test $DATABASE_NAME
+test $DATABASE_PORT
+test $DATABASE_USER
+test $DATABASE_PASSWORD
+set +e
+
+# Construct the connection string for postgres
+VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
+
+# Run the DB migrations
+echo "Connecting with: $VDB_PG_CONNECT"
+echo "Running database migrations"
+./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up
+
+
+# If the db migrations ran without err
+if [[ $? -eq 0 ]]; then
+ echo "Migrations ran successfully"
+ exit 0
+else
+ echo "Could not run migrations. Are the database details correct?"
+ exit 1
+fi
\ No newline at end of file
diff --git a/dockerfiles/postgraphile/Dockerfile b/dockerfiles/postgraphile/Dockerfile
new file mode 100644
index 00000000..8ce064bb
--- /dev/null
+++ b/dockerfiles/postgraphile/Dockerfile
@@ -0,0 +1,8 @@
+FROM node:alpine
+
+RUN npm install -g postgraphile
+RUN npm install -g postgraphile-plugin-connection-filter
+RUN npm install -g @graphile/pg-pubsub
+
+EXPOSE 5000
+ENTRYPOINT ["postgraphile"]
\ No newline at end of file
diff --git a/dockerfiles/postgraphile/docker-compose.yml b/dockerfiles/postgraphile/docker-compose.yml
new file mode 100644
index 00000000..5ee8059e
--- /dev/null
+++ b/dockerfiles/postgraphile/docker-compose.yml
@@ -0,0 +1,60 @@
+version: '3.2'
+
+services:
+ db:
+ restart: always
+ image: postgres:10.12-alpine
+ environment:
+ POSTGRES_USER: "vdbm"
+ POSTGRES_DB: "vulcanize_public"
+ POSTGRES_PASSWORD: "password"
+ volumes:
+ - vulcanizedb_db_data:/var/lib/postgresql/data
+ expose:
+ - "5432"
+ ports:
+ - "127.0.0.1:8079:5432"
+
+ migrations:
+ restart: on-failure
+ depends_on:
+ - db
+ build:
+ context: ./../../
+ cache_from:
+ - alpine:latest
+ dockerfile: ./dockerfiles/migrations/Dockerfile
+ args:
+ USER: "vdbm"
+ environment:
+ DATABASE_NAME: "vulcanize_public"
+ DATABASE_HOSTNAME: "db"
+ DATABASE_PORT: 5432
+ DATABASE_USER: "vdbm"
+ DATABASE_PASSWORD: "password"
+
+ graphql:
+ restart: always
+ depends_on:
+ - db
+ - migrations
+ build:
+ context: ./../../
+ cache_from:
+ - node:alpine
+ dockerfile: ./dockerfiles/postgraphile/Dockerfile
+ expose:
+ - "5000"
+ ports:
+ - "127.0.0.1:5000:5000"
+ command: ["--plugins", "@graphile/pg-pubsub",
+ "--subscriptions",
+ "--simple-subscriptions",
+ "--connection", "postgres://vdbm:password@db:5432/vulcanize_public",
+ "--port", "5000",
+ "-n", "0.0.0.0",
+ "--schema", "public,btc,eth",
+ "--append-plugins", "postgraphile-plugin-connection-filter"]
+
+volumes:
+ vulcanizedb_db_data:
\ No newline at end of file
diff --git a/dockerfiles/rinkeby/Dockerfile b/dockerfiles/rinkeby/Dockerfile
deleted file mode 100644
index 7f5d5a7b..00000000
--- a/dockerfiles/rinkeby/Dockerfile
+++ /dev/null
@@ -1,9 +0,0 @@
-FROM golang:1.10.3-alpine3.7
-
-RUN apk add --no-cache make gcc musl-dev
-
-ADD . /go/src/github.com/vulcanize/vulcanizedb
-WORKDIR /go/src/github.com/vulcanize/vulcanizedb
-RUN go build -o /app main.go
-
-ENTRYPOINT ["/app"]
diff --git a/dockerfiles/rinkeby/config.toml b/dockerfiles/rinkeby/config.toml
deleted file mode 100644
index 7fa6796e..00000000
--- a/dockerfiles/rinkeby/config.toml
+++ /dev/null
@@ -1,9 +0,0 @@
-[database]
-name = "vulcanizedb"
-hostname = "postgres"
-port = 5432
-user = "postgres"
-password = "postgres"
-
-[client]
-ipcPath = "/geth/geth.ipc"
diff --git a/dockerfiles/rinkeby/docker-compose.yml b/dockerfiles/rinkeby/docker-compose.yml
deleted file mode 100644
index 4c3bd3aa..00000000
--- a/dockerfiles/rinkeby/docker-compose.yml
+++ /dev/null
@@ -1,63 +0,0 @@
-version: '2.2'
-
-services:
-
- vulcanizedb:
- build:
- context: ./../../
- dockerfile: dockerfiles/rinkeby/Dockerfile
- container_name: rinkeby_vulcanizedb
- command: "sync --starting-block-number 0 --config /config.toml"
- volumes:
- - "./config.toml:/config.toml"
- - "vulcanizedb_geth_data:/geth"
- networks:
- vulcanizedb_net:
-
- migrations:
- image: migrate/migrate:v3.3.0
- container_name: rinkeby_vulcanizedb_migrations
- depends_on:
- postgres:
- condition: service_healthy
- command: -database postgresql://postgres:postgres@postgres:5432/vulcanizedb?sslmode=disable -path /migrations up
- volumes:
- - ./../../db/migrations:/migrations
- networks:
- vulcanizedb_net:
-
- postgres:
- image: postgres:9.6.5-alpine
- container_name: rinkeby_vulcanizedb_postgres
- environment:
- POSTGRES_USER: postgres
- POSTGRES_DB: vulcanizedb
- POSTGRES_PASSWORD: postgres
- volumes:
- - "vulcanizedb_db_data:/var/lib/postgresql/data"
- networks:
- vulcanizedb_net:
- healthcheck:
- test: ["CMD", "pg_isready"]
- interval: 5s
- timeout: 5s
- retries: 30
-
- geth:
- image: ethereum/client-go:v1.8.11
- container_name: rinkeby_vulcanizedb_geth
- cpus: 0.3
- hostname: eth
- command: '--rinkeby --rpc --rpcaddr="0.0.0.0" --rpcvhosts="geth"'
- volumes:
- - "vulcanizedb_geth_data:/root/.ethereum/rinkeby"
- networks:
- vulcanizedb_net:
-
-volumes:
- vulcanizedb_geth_data:
- vulcanizedb_db_data:
-
-networks:
- vulcanizedb_net:
- driver: bridge
diff --git a/dockerfiles/super_node/Dockerfile b/dockerfiles/super_node/Dockerfile
index 06b55a83..84ebe54e 100644
--- a/dockerfiles/super_node/Dockerfile
+++ b/dockerfiles/super_node/Dockerfile
@@ -7,8 +7,6 @@ RUN apk add busybox-extras
# this is probably a noob move, but I want apk from alpine for the above but need to avoid Go 1.13 below as this error still occurs https://github.com/ipfs/go-ipfs/issues/6603
FROM golang:1.12.4 as builder
-RUN yum install -y libusb1-devel systemd-devel
-
# Get and build vulcanizedb
ADD . /go/src/github.com/vulcanize/vulcanizedb
WORKDIR /go/src/github.com/vulcanize/vulcanizedb
@@ -19,7 +17,7 @@ RUN go get -u -d github.com/ipfs/go-ipfs
WORKDIR /go/src/github.com/ipfs/go-ipfs
RUN git remote add vulcanize https://github.com/vulcanize/go-ipfs.git
RUN git fetch vulcanize
-RUN git checkout -b pg_ipfs v0.4.22-alpha
+RUN git checkout -b pg_ipfs vulcanize/postgres_update
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipfs ./cmd/ipfs
# Build migration tool
@@ -31,32 +29,32 @@ WORKDIR /go/src/github.com/vulcanize/vulcanizedb
# app container
FROM alpine
-WORKDIR /app
ARG USER
ARG CONFIG_FILE
ARG EXPOSE_PORT_1
ARG EXPOSE_PORT_2
-ARG EXPOSE_PORT_3
-ARG EXPOSE_PORT_4
-RUN adduser -D 5000 $USER
+RUN adduser -Du 5000 $USER
+WORKDIR /app
+RUN chown $USER /app
USER $USER
# chown first so dir is writable
# note: using $USER is merged, but not in the stable release yet
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/$CONFIG_FILE config.toml
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/super_node/startup_script.sh .
+COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/super_node/entrypoint.sh .
+
# keep binaries immutable
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/vulcanizedb vulcanizedb
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations migrations/vulcanizedb
+COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/environments environments
COPY --from=builder /go/src/github.com/ipfs/go-ipfs/ipfs ipfs
EXPOSE $EXPOSE_PORT_1
EXPOSE $EXPOSE_PORT_2
-EXPOSE $EXPOSE_PORT_3
-EXPOSE $EXPOSE_PORT_4
-CMD ["./startup_script.sh"]
+ENTRYPOINT ["/app/entrypoint.sh"]
diff --git a/dockerfiles/super_node/docker-compose.yml b/dockerfiles/super_node/docker-compose.yml
new file mode 100644
index 00000000..b12a514e
--- /dev/null
+++ b/dockerfiles/super_node/docker-compose.yml
@@ -0,0 +1,91 @@
+version: '3.2'
+
+services:
+ db:
+ restart: always
+ image: postgres:10.12-alpine
+ environment:
+ POSTGRES_USER: "vdbm"
+ POSTGRES_DB: "vulcanize_public"
+ POSTGRES_PASSWORD: "password"
+ volumes:
+ - vulcanizedb_db_data:/var/lib/postgresql/data
+ expose:
+ - "5432"
+ ports:
+ - "127.0.0.1:8079:5432"
+
+ btc:
+ depends_on:
+ - db
+ build:
+ context: ./../../
+ cache_from:
+ - alpine:latest
+ - golang:1.12.4
+ dockerfile: ./dockerfiles/super_node/Dockerfile
+ args:
+ USER: "vdbm"
+ CONFIG_FILE: ./environments/superNodeBTC.toml
+ environment:
+ VDB_COMMAND: "superNode"
+ IPFS_INIT: "true"
+ IPFS_PATH: "/root/.btc/.ipfs"
+ DATABASE_NAME: "vulcanize_public"
+ DATABASE_HOSTNAME: "db"
+ DATABASE_PORT: 5432
+ DATABASE_USER: "vdbm"
+ DATABASE_PASSWORD: "password"
+ ports:
+ - "127.0.0.1:8082:8082"
+ - "127.0.0.1:8083:8083"
+
+ eth:
+ depends_on:
+ - db
+ build:
+ context: ./../../
+ cache_from:
+ - alpine:latest
+ - golang:1.12.4
+ dockerfile: ./dockerfiles/super_node/Dockerfile
+ args:
+ USER: "vdbm"
+ CONFIG_FILE: ./environments/superNodeETH.toml
+ environment:
+ VDB_COMMAND: "superNode"
+ IPFS_INIT: "true"
+ IPFS_PATH: "/root/.eth/.ipfs"
+ DATABASE_NAME: "vulcanize_public"
+ DATABASE_HOSTNAME: "db"
+ DATABASE_PORT: 5432
+ DATABASE_USER: "vdbm"
+ DATABASE_PASSWORD: "password"
+ ports:
+ - "127.0.0.1:8080:8080"
+ - "127.0.0.1:8081:8081"
+
+ graphql:
+ restart: always
+ depends_on:
+ - db
+ build:
+ context: ./../../
+ cache_from:
+ - node:alpine
+ dockerfile: ./dockerfiles/postgraphile/Dockerfile
+ expose:
+ - "5000"
+ ports:
+ - "127.0.0.1:5000:5000"
+ command: ["--plugins", "@graphile/pg-pubsub",
+ "--subscriptions",
+ "--simple-subscriptions",
+ "--connection", "postgres://vdbm:password@db:5432/vulcanize_public",
+ "--port", "5000",
+ "-n", "0.0.0.0",
+ "--schema", "public,btc,eth",
+ "--append-plugins", "postgraphile-plugin-connection-filter"]
+
+volumes:
+ vulcanizedb_db_data:
\ No newline at end of file
diff --git a/dockerfiles/super_node/entrypoint.sh b/dockerfiles/super_node/entrypoint.sh
new file mode 100755
index 00000000..321af8e4
--- /dev/null
+++ b/dockerfiles/super_node/entrypoint.sh
@@ -0,0 +1,68 @@
+#!/bin/sh
+# Runs the db migrations and starts the super node services
+
+# Exit if the variable tests fail
+set -e
+set +x
+
+# Check the database variables are set
+# XXX set defaults, don't silently fail
+#test $DATABASE_HOSTNAME
+#test $DATABASE_NAME
+#test $DATABASE_PORT
+#test $DATABASE_USER
+#test $DATABASE_PASSWORD
+#test $IPFS_INIT
+#test $IPFS_PATH
+VDB_COMMAND=${VDB_COMMAND:-superNode}
+set +e
+
+# Construct the connection string for postgres
+VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
+
+# Run the DB migrations
+echo "Connecting with: $VDB_PG_CONNECT"
+echo "Running database migrations"
+./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up
+rv=$?
+
+if [ $rv != 0 ]; then
+ echo "Could not run migrations. Are the database details correct?"
+ exit 1
+fi
+
+# Export our database variables so that the IPFS Postgres plugin can use them
+export IPFS_PGHOST=$DATABASE_HOSTNAME
+export IPFS_PGUSER=$DATABASE_USER
+export IPFS_PGDATABASE=$DATABASE_NAME
+export IPFS_PGPORT=$DATABASE_PORT
+export IPFS_PGPASSWORD=$DATABASE_PASSWORD
+
+
+if [ ! -d "$HOME/.ipfs" ]; then
+ # initialize PG-IPFS
+ echo "Initializing Postgres-IPFS profile"
+ ./ipfs init --profile=postgresds
+
+ rv=$?
+ if [ $rv != 0 ]; then
+ echo "Could not initialize ipfs"
+ exit 1
+ fi
+fi
+
+
+echo "Beginning the vulcanizedb process"
+VDB_CONFIG_FILE=${VDB_CONFIG_FILE:-config.toml}
+DEFAULT_OPTIONS="--config=$VDB_CONFIG_FILE"
+VDB_FULL_CL=${VDB_FULL_CL:-$VDB_COMMAND $DEFAULT_OPTIONS}
+echo running: ./vulcanizedb $VDB_FULL_CL $@
+
+# XXX need to lose the env vars
+./vulcanizedb $@
+rv=$?
+
+if [ $rv != 0 ]; then
+ echo "VulcanizeDB startup failed"
+ exit 1
+fi
\ No newline at end of file
diff --git a/dockerfiles/super_node/startup_script.sh b/dockerfiles/super_node/startup_script.sh
index ddf6c8a1..0f07ea0b 100755
--- a/dockerfiles/super_node/startup_script.sh
+++ b/dockerfiles/super_node/startup_script.sh
@@ -3,24 +3,28 @@
# Exit if the variable tests fail
set -e
+set +x
# Check the database variables are set
-test $VDB_PG_NAME
-test $VDB_PG_HOSTNAME
-test $VDB_PG_PORT
-test $VDB_PG_USER
+test $DATABASE_HOSTNAME
+test $DATABASE_NAME
+test $DATABASE_PORT
+test $DATABASE_USER
+test $DATABASE_PASSWORD
test $IPFS_INIT
+test $IPFS_PATH
+test $VDB_COMMAND
set +e
# Export our database variables so that the IPFS Postgres plugin can use them
-export IPFS_PGHOST=$VDB_PG_HOSTNAME
-export IPFS_PGUSER=$VDB_PG_USER
-export IPFS_PGDATABASE=$VDB_PG_NAME
-export IPFS_PGPORT=$VDB_PG_PORT
-export IPFS_PGPASSWORD=$VDB_PG_PASSWORD
+export IPFS_PGHOST=$DATABASE_HOSTNAME
+export IPFS_PGUSER=$DATABASE_USER
+export IPFS_PGDATABASE=$DATABASE_NAME
+export IPFS_PGPORT=$DATABASE_PORT
+export IPFS_PGPASSWORD=$DATABASE_PASSWORD
# Construct the connection string for postgres
-VDB_PG_CONNECT=postgresql://$VDB_PG_USER:$VDB_PG_PASSWORD@$VDB_PG_HOSTNAME:$VDB_PG_PORT/$VDB_PG_NAME?sslmode=disable
+VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
# Run the DB migrations
echo "Connecting with: $VDB_PG_CONNECT"
@@ -40,28 +44,22 @@ if [[ $? -eq 0 ]]; then
fi
else
echo "Could not run migrations. Are the database details correct?"
- exit
+ exit 1
fi
# If IPFS initialization was successful
if [[ $? -eq 0 ]]; then
- # Wait until block synchronisation has begun
- echo "Waiting for block synchronization to begin"
- ( tail -f -n0 log.txt & ) | grep -q "Block synchronisation started" # this blocks til we see "Block synchronisation started"
- # And then spin up the syncPublishScreenAndServe Vulcanizedb service
- echo "Beginning the syncPublishScreenAndServe vulcanizedb process"
- ./vulcanizedb superNode --config=config.toml 2>&1 | tee -a log.txt &
+ echo "Running the VulcanizeDB process"
+ ./vulcanizedb ${VDB_COMMAND} --config=config.toml
else
- echo "Could not initialize state-diffing Geth."
- exit
+ echo "Could not initialize IPFS."
+ exit 1
fi
-# If Vulcanizedb startup was successful
+# If VulcanizeDB process was successful
if [ $? -eq 0 ]; then
- echo "Seed node successfully booted"
+ echo "VulcanizeDB process ran successfully"
else
- echo "Could not start vulcanizedb syncPublishScreenAndServe process. Is the config file correct?"
- exit
-fi
-
-wait
+ echo "Could not start VulcanizeDB process. Is the config file correct?"
+ exit 1
+fi
\ No newline at end of file
diff --git a/documentation/super_node/architecture.md b/documentation/super_node/architecture.md
new file mode 100644
index 00000000..24660221
--- /dev/null
+++ b/documentation/super_node/architecture.md
@@ -0,0 +1,16 @@
+These are the components of a VulcanizeDB Watcher:
+* Data Fetcher/Streamer sources:
+ * go-ethereum
+ * bitcoind
+ * btcd
+ * IPFS
+* Transformers contain:
+ * converter
+ * publisher
+ * indexer
+* Endpoints contain:
+ * api
+ * backend
+ * filterer
+ * retriever
+ * ipld_server
diff --git a/documentation/super_node/subscription.md b/documentation/super_node/subscription.md
index 09bd1ae0..a9a68ec2 100644
--- a/documentation/super_node/subscription.md
+++ b/documentation/super_node/subscription.md
@@ -17,7 +17,7 @@ The config for `streamEthSubscribe` has a set of parameters to fill the [EthSubs
```toml
[superNode]
[superNode.ethSubscription]
- historicalData = true
+ historicalData = false
historicalDataOnly = false
startingBlock = 0
endingBlock = 0
@@ -27,26 +27,18 @@ The config for `streamEthSubscribe` has a set of parameters to fill the [EthSubs
uncles = false
[superNode.ethSubscription.txFilter]
off = false
- src = [
- "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
- ]
- dst = [
- "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
- ]
+ src = []
+ dst = []
[superNode.ethSubscription.receiptFilter]
off = false
contracts = []
- topics = [
- [
- "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
- "0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
- ]
- ]
+ topic0s = []
+ topic1s = []
+ topic2s = []
+ topic3s = []
[superNode.ethSubscription.stateFilter]
off = false
- addresses = [
- "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
- ]
+ addresses = []
intermediateNodes = false
[superNode.ethSubscription.storageFilter]
off = true
diff --git a/environments/superNode.toml b/environments/superNode.toml
deleted file mode 100644
index 9c77db2a..00000000
--- a/environments/superNode.toml
+++ /dev/null
@@ -1,59 +0,0 @@
-[superNode]
- chains = ["ethereum", "bitcoin"]
- ipfsPath = "/root/.ipfs"
-
- [superNode.ethereum.database]
- name = "vulcanize_public"
- hostname = "localhost"
- port = 5432
- user = "ec2-user"
-
- [superNode.ethereum.sync]
- on = true
- wsPath = "ws://127.0.0.1:8546"
- workers = 1
-
- [superNode.ethereum.server]
- on = true
- ipcPath = "/root/.vulcanize/eth/vulcanize.ipc"
- wsPath = "127.0.0.1:8080"
- httpPath = "127.0.0.1:8081"
-
- [superNode.ethereum.backFill]
- on = true
- httpPath = "http://127.0.0.1:8545"
- frequency = 15
- batchSize = 50
-
- [superNode.bitcoin.database]
- name = "vulcanize_public"
- hostname = "localhost"
- port = 5432
- user = "ec2-user"
-
- [superNode.bitcoin.sync]
- on = true
- wsPath = "127.0.0.1:8332"
- workers = 1
- pass = "password"
- user = "username"
-
- [superNode.bitcoin.server]
- on = true
- ipcPath = "/root/.vulcanize/btc/vulcanize.ipc"
- wsPath = "127.0.0.1:8082"
- httpPath = "127.0.0.1:8083"
-
- [superNode.bitcoin.backFill]
- on = true
- httpPath = "127.0.0.1:8332"
- frequency = 15
- batchSize = 50
- pass = "password"
- user = "username"
-
- [superNode.bitcoin.node]
- nodeID = "ocd0"
- clientName = "Omnicore"
- genesisBlock = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
- networkID = "0xD9B4BEF9"
\ No newline at end of file
diff --git a/environments/superNodeBTC.toml b/environments/superNodeBTC.toml
new file mode 100644
index 00000000..99171a9d
--- /dev/null
+++ b/environments/superNodeBTC.toml
@@ -0,0 +1,41 @@
+[database]
+ name = "vulcanize_public" # $DATABASE_NAME
+ hostname = "localhost" # &DATABASE_HOSTNAME
+ port = 5432 # $DATABASE_PORT
+ user = "vdbm" # $DATABASE_USER
+ password = "" # $DATABASE_PASSWORD
+
+[ipfs]
+ path = "~/.ipfs" # $IPFS_PATH
+
+[resync]
+ chain = "bitcoin" # $RESYNC_CHAIN
+ type = "full" # $RESYNC_TYPE
+ start = 0 # $RESYNC_START
+ stop = 0 # $RESYNC_STOP
+ batchSize = 1 # $RESYNC_BATCH_SIZE
+ batchNumber = 50 # $RESYNC_BATCH_NUMBER
+ clearOldCache = false # $RESYNC_CLEAR_OLD_CACHE
+
+[superNode]
+ chain = "bitcoin" # $SUPERNODE_CHAIN
+ server = true # $SUPERNODE_SERVER
+ ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH
+ wsPath = "127.0.0.1:8082" # $SUPERNODE_WS_PATH
+ httpPath = "127.0.0.1:8083" # $SUPERNODE_HTTP_PATH
+ sync = true # $SUPERNODE_SYNC
+ workers = 1 # $SUPERNODE_WORKERS
+ backFill = true # $SUPERNODE_BACKFILL
+ frequency = 45 # $SUPERNODE_FREQUENCY
+ batchSize = 1 # $SUPERNODE_BATCH_SIZE
+ batchNumber = 50 # $SUPERNODE_BATCH_NUMBER
+
+[bitcoin]
+ wsPath = "127.0.0.1:8332" # $BTC_WS_PATH
+ httpPath = "127.0.0.1:8332" # $BTC_HTTP_PATH
+ pass = "password" # $BTC_NODE_PASSWORD
+ user = "username" # $BTC_NODE_USER
+ nodeID = "ocd0" # $BTC_NODE_ID
+ clientName = "Omnicore" # $BTC_CLIENT_NAME
+ genesisBlock = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" # $BTC_GENESIS_BLOCK
+ networkID = "0xD9B4BEF9" # $BTC_NETWORK_ID
\ No newline at end of file
diff --git a/environments/superNodeETH.toml b/environments/superNodeETH.toml
new file mode 100644
index 00000000..69e3b32d
--- /dev/null
+++ b/environments/superNodeETH.toml
@@ -0,0 +1,35 @@
+[database]
+ name = "vulcanize_public" # $DATABASE_NAME
+ hostname = "localhost" # &DATABASE_HOSTNAME
+ port = 5432 # $DATABASE_PORT
+ user = "vdbm" # $DATABASE_USER
+ password = "" # $DATABASE_PASSWORD
+
+[ipfs]
+ path = "~/.ipfs" # $IPFS_PATH
+
+[resync]
+ chain = "ethereum" # $RESYNC_CHAIN
+ type = "state" # $RESYNC_TYPE
+ start = 0 # $RESYNC_START
+ stop = 0 # $RESYNC_STOP
+ batchSize = 5 # $RESYNC_BATCH_SIZE
+ batchNumber = 50 # $RESYNC_BATCH_NUMBER
+ clearOldCache = true # $RESYNC_CLEAR_OLD_CACHE
+
+[superNode]
+ chain = "ethereum" # $SUPERNODE_CHAIN
+ server = true # $SUPERNODE_SERVER
+ ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH
+ wsPath = "127.0.0.1:8081" # $SUPERNODE_WS_PATH
+ httpPath = "127.0.0.1:8082" # $SUPERNODE_HTTP_PATH
+ sync = true # $SUPERNODE_SYNC
+ workers = 1 # $SUPERNODE_WORKERS
+ backFill = true # $SUPERNODE_BACKFILL
+ frequency = 15 # $SUPERNODE_FREQUENCY
+ batchSize = 5 # $SUPERNODE_BATCH_SIZE
+ batchNumber = 50 # $SUPERNODE_BATCH_NUMBER
+
+[ethereum]
+ wsPath = "127.0.0.1:8546" # $ETH_WS_PATH
+ httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH
\ No newline at end of file
diff --git a/environments/superNodeSubscription.toml b/environments/superNodeSubscription.toml
index b3cea0c0..79f21775 100644
--- a/environments/superNodeSubscription.toml
+++ b/environments/superNodeSubscription.toml
@@ -1,6 +1,6 @@
[superNode]
[superNode.ethSubscription]
- historicalData = true
+ historicalData = false
historicalDataOnly = false
startingBlock = 0
endingBlock = 0
@@ -10,26 +10,18 @@
uncles = false
[superNode.ethSubscription.txFilter]
off = false
- src = [
- "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
- ]
- dst = [
- "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
- ]
+ src = []
+ dst = []
[superNode.ethSubscription.receiptFilter]
off = false
contracts = []
- topics = [
- [
- "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
- "0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
- ]
- ]
+ topic0s = []
+ topic1s = []
+ topic2s = []
+ topic3s = []
[superNode.ethSubscription.stateFilter]
off = false
- addresses = [
- "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
- ]
+ addresses = []
intermediateNodes = false
[superNode.ethSubscription.storageFilter]
off = true
diff --git a/go.mod b/go.mod
index 0ece4815..17360377 100644
--- a/go.mod
+++ b/go.mod
@@ -97,4 +97,4 @@ replace github.com/ipfs/go-ipfs v0.4.22 => github.com/vulcanize/go-ipfs v0.4.22-
replace github.com/ipfs/go-ipfs-config v0.0.3 => github.com/vulcanize/go-ipfs-config v0.0.8-alpha
-replace github.com/ethereum/go-ethereum v1.9.1 => github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8
+replace github.com/ethereum/go-ethereum v1.9.1 => github.com/vulcanize/go-ethereum v1.5.10-0.20200311182536-d07dc803d290
diff --git a/go.sum b/go.sum
index 0ecbfd21..dae0ac07 100644
--- a/go.sum
+++ b/go.sum
@@ -37,6 +37,7 @@ github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015 h1:7ABPr1+uJdqESAdlVevnc/2FJGiC/K3uMg1JiELeF+0=
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d h1:QgeLLoPD3kRVmeu/1al9iIpIANMi9O1zXFm8BnYGCJg=
@@ -105,7 +106,9 @@ github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUn
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/dop251/goja v0.0.0-20200106141417-aaec0e7bde29/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
@@ -134,6 +137,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
+github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
@@ -327,6 +331,7 @@ github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsj
github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10=
github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmoiron/sqlx v0.0.0-20190426154859-38398a30ed85 h1:+LZtdhpMITOXE+MztQPPcwUl+eqYjwlXXLHrd0yWlxw=
github.com/jmoiron/sqlx v0.0.0-20190426154859-38398a30ed85/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
@@ -723,6 +728,8 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8 h1:BHt0OW0rTgndFjSju7brF3dPceXWQuEV0IdtY8BjjT8=
github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8/go.mod h1:a9TqabFudpDu1nucId+k9S8R9whYaHnGBLKFouA5EAo=
+github.com/vulcanize/go-ethereum v1.5.10-0.20200311182536-d07dc803d290 h1:uMWt+x6JhVT7GyL983weZSxv1zDBxvGlI9HNkcTnUeg=
+github.com/vulcanize/go-ethereum v1.5.10-0.20200311182536-d07dc803d290/go.mod h1:7oC0Ni6dosMv5pxMigm6s0hN8g4haJMBnqmmo0D9YfQ=
github.com/vulcanize/go-ipfs v0.4.22-alpha h1:W+6njT14KWllMhABRFtPndqHw8SHCt5SqD4YX528kxM=
github.com/vulcanize/go-ipfs v0.4.22-alpha/go.mod h1:uaekWWeoaA0A9Dv1LObOKCSh9kIzTpZ5RbKW4g5CQHE=
github.com/vulcanize/go-ipfs-config v0.0.8-alpha h1:peaFvbEcPShF6ymOd8flqKkFz4YfcrNr/UOO7FmbWoQ=
diff --git a/pkg/eth/contract_watcher/shared/getter/getter_test.go b/integration_test/getter_test.go
similarity index 99%
rename from pkg/eth/contract_watcher/shared/getter/getter_test.go
rename to integration_test/getter_test.go
index e0cc5a60..a5c05986 100644
--- a/pkg/eth/contract_watcher/shared/getter/getter_test.go
+++ b/integration_test/getter_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package getter_test
+package integration_test
import (
"github.com/ethereum/go-ethereum/ethclient"
diff --git a/libraries/shared/fetcher/geth_rpc_storage_fetcher.go b/libraries/shared/fetcher/geth_rpc_storage_fetcher.go
index b4809cf6..f535c410 100644
--- a/libraries/shared/fetcher/geth_rpc_storage_fetcher.go
+++ b/libraries/shared/fetcher/geth_rpc_storage_fetcher.go
@@ -65,11 +65,11 @@ func (fetcher GethRPCStorageFetcher) FetchStorageDiffs(out chan<- utils.StorageD
accounts := utils.GetAccountsFromDiff(*stateDiff)
logrus.Trace(fmt.Sprintf("iterating through %d accounts on stateDiff for block %d", len(accounts), stateDiff.BlockNumber))
for _, account := range accounts {
- logrus.Trace(fmt.Sprintf("iterating through %d Storage values on account with key %s", len(account.Storage), common.BytesToHash(account.Key).Hex()))
+ logrus.Trace(fmt.Sprintf("iterating through %d Storage values on account with key %s", len(account.Storage), common.BytesToHash(account.LeafKey).Hex()))
for _, storage := range account.Storage {
diff, formatErr := utils.FromGethStateDiff(account, stateDiff, storage)
if formatErr != nil {
- logrus.Error("failed to format utils.StorageDiff from storage with key: ", common.BytesToHash(storage.Key), "from account with key: ", common.BytesToHash(account.Key))
+ logrus.Error("failed to format utils.StorageDiff from storage with key: ", common.BytesToHash(storage.LeafKey), "from account with key: ", common.BytesToHash(account.LeafKey))
errs <- formatErr
continue
}
diff --git a/libraries/shared/storage/backfiller.go b/libraries/shared/storage/backfiller.go
index e9cc6aa5..6584e38d 100644
--- a/libraries/shared/storage/backfiller.go
+++ b/libraries/shared/storage/backfiller.go
@@ -127,11 +127,11 @@ func (bf *backFiller) backFillRange(blockHeights []uint64, diffChan chan utils.S
}
accounts := utils.GetAccountsFromDiff(*stateDiff)
for _, account := range accounts {
- logrus.Trace(fmt.Sprintf("iterating through %d Storage values on account with key %s", len(account.Storage), common.BytesToHash(account.Key).Hex()))
+ logrus.Trace(fmt.Sprintf("iterating through %d Storage values on account with key %s", len(account.Storage), common.BytesToHash(account.LeafKey).Hex()))
for _, storage := range account.Storage {
diff, formatErr := utils.FromGethStateDiff(account, stateDiff, storage)
if formatErr != nil {
- logrus.Error("failed to format utils.StorageDiff from storage with key: ", common.BytesToHash(storage.Key), "from account with key: ", common.BytesToHash(account.Key))
+ logrus.Error("failed to format utils.StorageDiff from storage with key: ", common.BytesToHash(storage.LeafKey), "from account with key: ", common.BytesToHash(account.LeafKey))
errChan <- formatErr
continue
}
diff --git a/libraries/shared/storage/utils/diff.go b/libraries/shared/storage/utils/diff.go
index 331ebf96..e6ad4c78 100644
--- a/libraries/shared/storage/utils/diff.go
+++ b/libraries/shared/storage/utils/diff.go
@@ -59,16 +59,16 @@ func FromParityCsvRow(csvRow []string) (StorageDiffInput, error) {
func FromGethStateDiff(account statediff.AccountDiff, stateDiff *statediff.StateDiff, storage statediff.StorageDiff) (StorageDiffInput, error) {
var decodedValue []byte
- err := rlp.DecodeBytes(storage.Value, &decodedValue)
+ err := rlp.DecodeBytes(storage.NodeValue, &decodedValue)
if err != nil {
return StorageDiffInput{}, err
}
return StorageDiffInput{
- HashedAddress: common.BytesToHash(account.Key),
+ HashedAddress: common.BytesToHash(account.LeafKey),
BlockHash: stateDiff.BlockHash,
BlockHeight: int(stateDiff.BlockNumber.Int64()),
- StorageKey: common.BytesToHash(storage.Key),
+ StorageKey: common.BytesToHash(storage.LeafKey),
StorageValue: common.BytesToHash(decodedValue),
}, nil
}
diff --git a/libraries/shared/storage/utils/diff_test.go b/libraries/shared/storage/utils/diff_test.go
index 32e6b437..ad47a6a7 100644
--- a/libraries/shared/storage/utils/diff_test.go
+++ b/libraries/shared/storage/utils/diff_test.go
@@ -67,7 +67,7 @@ var _ = Describe("Storage row parsing", func() {
Describe("FromGethStateDiff", func() {
var (
- accountDiff = statediff.AccountDiff{Key: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}}
+ accountDiff = statediff.AccountDiff{LeafKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}}
stateDiff = &statediff.StateDiff{
BlockNumber: big.NewInt(rand.Int63()),
BlockHash: fakes.FakeHash,
@@ -80,19 +80,20 @@ var _ = Describe("Storage row parsing", func() {
Expect(encodeErr).NotTo(HaveOccurred())
storageDiff := statediff.StorageDiff{
- Key: []byte{0, 9, 8, 7, 6, 5, 4, 3, 2, 1},
- Value: storageValueRlp,
+ LeafKey: []byte{0, 9, 8, 7, 6, 5, 4, 3, 2, 1},
+ NodeValue: storageValueRlp,
+ NodeType: statediff.Leaf,
}
result, err := utils.FromGethStateDiff(accountDiff, stateDiff, storageDiff)
Expect(err).NotTo(HaveOccurred())
- expectedAddress := common.BytesToHash(accountDiff.Key)
+ expectedAddress := common.BytesToHash(accountDiff.LeafKey)
Expect(result.HashedAddress).To(Equal(expectedAddress))
Expect(result.BlockHash).To(Equal(fakes.FakeHash))
expectedBlockHeight := int(stateDiff.BlockNumber.Int64())
Expect(result.BlockHeight).To(Equal(expectedBlockHeight))
- expectedStorageKey := common.BytesToHash(storageDiff.Key)
+ expectedStorageKey := common.BytesToHash(storageDiff.LeafKey)
Expect(result.StorageKey).To(Equal(expectedStorageKey))
expectedStorageValue := common.BytesToHash(storageValueBytes)
Expect(result.StorageValue).To(Equal(expectedStorageValue))
@@ -104,8 +105,9 @@ var _ = Describe("Storage row parsing", func() {
Expect(encodeErr).NotTo(HaveOccurred())
storageDiff := statediff.StorageDiff{
- Key: []byte{0, 9, 8, 7, 6, 5, 4, 3, 2, 1},
- Value: storageValueRlp,
+ LeafKey: []byte{0, 9, 8, 7, 6, 5, 4, 3, 2, 1},
+ NodeValue: storageValueRlp,
+ NodeType: statediff.Leaf,
}
result, err := utils.FromGethStateDiff(accountDiff, stateDiff, storageDiff)
diff --git a/libraries/shared/streamer/super_node_streamer.go b/libraries/shared/streamer/super_node_streamer.go
index a6bbf7b8..e57f2b44 100644
--- a/libraries/shared/streamer/super_node_streamer.go
+++ b/libraries/shared/streamer/super_node_streamer.go
@@ -22,15 +22,9 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
- "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
-// ISuperNodeStreamer is the interface for streaming SuperNodePayloads from a vulcanizeDB super node
-type ISuperNodeStreamer interface {
- Stream(payloadChan chan super_node.SubscriptionPayload, params shared.SubscriptionSettings) (*rpc.ClientSubscription, error)
-}
-
-// SuperNodeStreamer is the underlying struct for the ISuperNodeStreamer interface
+// SuperNodeStreamer is the underlying struct for the shared.SuperNodeStreamer interface
type SuperNodeStreamer struct {
Client core.RPCClient
}
@@ -43,6 +37,6 @@ func NewSuperNodeStreamer(client core.RPCClient) *SuperNodeStreamer {
}
// Stream is the main loop for subscribing to data from a vulcanizedb super node
-func (sds *SuperNodeStreamer) Stream(payloadChan chan super_node.SubscriptionPayload, params shared.SubscriptionSettings) (*rpc.ClientSubscription, error) {
- return sds.Client.Subscribe("vdb", payloadChan, "stream", params)
+func (sds *SuperNodeStreamer) Stream(payloadChan chan super_node.SubscriptionPayload, rlpParams []byte) (*rpc.ClientSubscription, error) {
+ return sds.Client.Subscribe("vdb", payloadChan, "stream", rlpParams)
}
diff --git a/libraries/shared/test_data/statediff.go b/libraries/shared/test_data/statediff.go
index bc54431f..a7f15758 100644
--- a/libraries/shared/test_data/statediff.go
+++ b/libraries/shared/test_data/statediff.go
@@ -41,22 +41,24 @@ var (
SmallStorageValue = common.Hex2Bytes("03")
SmallStorageValueRlp, _ = rlp.EncodeToBytes(SmallStorageValue)
storageWithSmallValue = []statediff.StorageDiff{{
- Key: StorageKey,
- Value: SmallStorageValueRlp,
- Path: StoragePath,
- Proof: [][]byte{},
+ LeafKey: StorageKey,
+ NodeValue: SmallStorageValueRlp,
+ NodeType: statediff.Leaf,
+ Path: StoragePath,
}}
LargeStorageValue = common.Hex2Bytes("00191b53778c567b14b50ba0000")
LargeStorageValueRlp, _ = rlp.EncodeToBytes(LargeStorageValue)
storageWithLargeValue = []statediff.StorageDiff{{
- Key: StorageKey,
- Value: LargeStorageValueRlp,
- Path: StoragePath,
- Proof: [][]byte{},
+ LeafKey: StorageKey,
+ NodeValue: LargeStorageValueRlp,
+ Path: StoragePath,
+ NodeType: statediff.Leaf,
}}
StorageWithBadValue = statediff.StorageDiff{
- Key: StorageKey,
- Value: []byte{0, 1, 2},
+ LeafKey: StorageKey,
+ NodeValue: []byte{0, 1, 2},
+ NodeType: statediff.Leaf,
+ Path: StoragePath,
// this storage value will fail to be decoded as an RLP with the following error message:
// "input contains more than one value"
}
@@ -74,27 +76,27 @@ var (
valueBytes, _ = rlp.EncodeToBytes(testAccount)
CreatedAccountDiffs = []statediff.AccountDiff{
{
- Key: ContractLeafKey.Bytes(),
- Value: valueBytes,
- Storage: storageWithSmallValue,
+ LeafKey: ContractLeafKey.Bytes(),
+ NodeValue: valueBytes,
+ Storage: storageWithSmallValue,
},
}
UpdatedAccountDiffs = []statediff.AccountDiff{{
- Key: AnotherContractLeafKey.Bytes(),
- Value: valueBytes,
- Storage: storageWithLargeValue,
+ LeafKey: AnotherContractLeafKey.Bytes(),
+ NodeValue: valueBytes,
+ Storage: storageWithLargeValue,
}}
UpdatedAccountDiffs2 = []statediff.AccountDiff{{
- Key: AnotherContractLeafKey.Bytes(),
- Value: valueBytes,
- Storage: storageWithSmallValue,
+ LeafKey: AnotherContractLeafKey.Bytes(),
+ NodeValue: valueBytes,
+ Storage: storageWithSmallValue,
}}
DeletedAccountDiffs = []statediff.AccountDiff{{
- Key: AnotherContractLeafKey.Bytes(),
- Value: valueBytes,
- Storage: storageWithSmallValue,
+ LeafKey: AnotherContractLeafKey.Bytes(),
+ NodeValue: valueBytes,
+ Storage: storageWithSmallValue,
}}
MockStateDiff = statediff.StateDiff{
diff --git a/main.go b/main.go
index e7f8d25b..7bfdd290 100644
--- a/main.go
+++ b/main.go
@@ -1,23 +1,31 @@
package main
import (
- "github.com/vulcanize/vulcanizedb/cmd"
"os"
+ "github.com/spf13/viper"
+
+ "github.com/vulcanize/vulcanizedb/cmd"
+
"github.com/sirupsen/logrus"
)
func main() {
- logrus.SetFormatter(&logrus.JSONFormatter{
- PrettyPrint: true,
+ logrus.SetFormatter(&logrus.TextFormatter{
+ FullTimestamp: true,
})
- file, err := os.OpenFile("vulcanizedb.log",
- os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
- if err == nil {
- logrus.SetOutput(file)
+ logfile := viper.GetString("logfile")
+ if logfile != "" {
+ file, err := os.OpenFile(logfile,
+ os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ if err == nil {
+ logrus.SetOutput(file)
+ } else {
+ logrus.SetOutput(os.Stdout)
+ logrus.Info("Failed to log to file, using default stdout")
+ }
} else {
- logrus.Info("Failed to log to file, using default stderr")
+ logrus.SetOutput(os.Stdout)
}
-
cmd.Execute()
}
diff --git a/pkg/config/database.go b/pkg/config/database.go
index 4e08ca41..57b16faa 100644
--- a/pkg/config/database.go
+++ b/pkg/config/database.go
@@ -16,7 +16,20 @@
package config
-import "fmt"
+import (
+ "fmt"
+
+ "github.com/spf13/viper"
+)
+
+// Env variables
+const (
+ DATABASE_NAME = "DATABASE_NAME"
+ DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
+ DATABASE_PORT = "DATABASE_PORT"
+ DATABASE_USER = "DATABASE_USER"
+ DATABASE_PASSWORD = "DATABASE_PASSWORD"
+)
type Database struct {
Hostname string
@@ -37,3 +50,16 @@ func DbConnectionString(dbConfig Database) string {
}
return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", dbConfig.Hostname, dbConfig.Port, dbConfig.Name)
}
+
+func (d *Database) Init() {
+ viper.BindEnv("database.name", DATABASE_NAME)
+ viper.BindEnv("database.hostname", DATABASE_HOSTNAME)
+ viper.BindEnv("database.port", DATABASE_PORT)
+ viper.BindEnv("database.user", DATABASE_USER)
+ viper.BindEnv("database.password", DATABASE_PASSWORD)
+ d.Name = viper.GetString("database.name")
+ d.Hostname = viper.GetString("database.hostname")
+ d.Port = viper.GetInt("database.port")
+ d.User = viper.GetString("database.user")
+ d.Password = viper.GetString("database.password")
+}
diff --git a/pkg/eth/converters/common/block_rewards.go b/pkg/eth/converters/common/block_rewards.go
index ccd4838d..114c0c92 100644
--- a/pkg/eth/converters/common/block_rewards.go
+++ b/pkg/eth/converters/common/block_rewards.go
@@ -75,3 +75,43 @@ func staticRewardByBlockNumber(blockNumber int64) *big.Int {
}
return staticBlockReward
}
+
+func CalcEthBlockReward(header *types.Header, uncles []*types.Header, txs types.Transactions, receipts types.Receipts) *big.Int {
+ staticBlockReward := staticRewardByBlockNumber(header.Number.Int64())
+ transactionFees := calcEthTransactionFees(txs, receipts)
+ uncleInclusionRewards := calcEthUncleInclusionRewards(header, uncles)
+ tmp := transactionFees.Add(transactionFees, uncleInclusionRewards)
+ return tmp.Add(tmp, staticBlockReward)
+}
+
+func CalcUncleMinerReward(blockNumber, uncleBlockNumber int64) *big.Int {
+ staticBlockReward := staticRewardByBlockNumber(blockNumber)
+ rewardDiv8 := staticBlockReward.Div(staticBlockReward, big.NewInt(8))
+ mainBlock := big.NewInt(blockNumber)
+ uncleBlock := big.NewInt(uncleBlockNumber)
+ uncleBlockPlus8 := uncleBlock.Add(uncleBlock, big.NewInt(8))
+ uncleBlockPlus8MinusMainBlock := uncleBlockPlus8.Sub(uncleBlockPlus8, mainBlock)
+ return rewardDiv8.Mul(rewardDiv8, uncleBlockPlus8MinusMainBlock)
+}
+
+func calcEthTransactionFees(txs types.Transactions, receipts types.Receipts) *big.Int {
+ transactionFees := new(big.Int)
+ for i, transaction := range txs {
+ receipt := receipts[i]
+ gasPrice := big.NewInt(transaction.GasPrice().Int64())
+ gasUsed := big.NewInt(int64(receipt.GasUsed))
+ transactionFee := gasPrice.Mul(gasPrice, gasUsed)
+ transactionFees = transactionFees.Add(transactionFees, transactionFee)
+ }
+ return transactionFees
+}
+
+func calcEthUncleInclusionRewards(header *types.Header, uncles []*types.Header) *big.Int {
+ uncleInclusionRewards := new(big.Int)
+ for range uncles {
+ staticBlockReward := staticRewardByBlockNumber(header.Number.Int64())
+ staticBlockReward.Div(staticBlockReward, big.NewInt(32))
+ uncleInclusionRewards.Add(uncleInclusionRewards, staticBlockReward)
+ }
+ return uncleInclusionRewards
+}
diff --git a/pkg/eth/node/node.go b/pkg/eth/node/node.go
index 3ca2b0e1..e918d2a5 100644
--- a/pkg/eth/node/node.go
+++ b/pkg/eth/node/node.go
@@ -83,6 +83,10 @@ func makePropertiesReader(client core.RPCClient) IPropertiesReader {
}
func getNodeType(client core.RPCClient) core.NodeType {
+ // TODO: fix this
+ // This heuristics for figuring out the node type are not usefull...
+ // for example we often port forward remote nodes to localhost
+ // and geth does not have to expose the admin api...
if strings.Contains(client.IpcPath(), "infura") {
return core.INFURA
}
diff --git a/pkg/ipfs/dag_putters/btc_header.go b/pkg/ipfs/dag_putters/btc_header.go
index 4687becb..36aeba42 100644
--- a/pkg/ipfs/dag_putters/btc_header.go
+++ b/pkg/ipfs/dag_putters/btc_header.go
@@ -18,13 +18,18 @@ package dag_putters
import (
"fmt"
+ "strings"
- "github.com/btcsuite/btcd/wire"
+ node "github.com/ipfs/go-ipld-format"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
)
+var (
+ duplicateKeyErrorString = "pq: duplicate key value violates unique constraint"
+)
+
type BtcHeaderDagPutter struct {
adder *ipfs.IPFS
}
@@ -33,17 +38,13 @@ func NewBtcHeaderDagPutter(adder *ipfs.IPFS) *BtcHeaderDagPutter {
return &BtcHeaderDagPutter{adder: adder}
}
-func (bhdp *BtcHeaderDagPutter) DagPut(raw interface{}) ([]string, error) {
- header, ok := raw.(*wire.BlockHeader)
+func (bhdp *BtcHeaderDagPutter) DagPut(n node.Node) (string, error) {
+ header, ok := n.(*ipld.BtcHeader)
if !ok {
- return nil, fmt.Errorf("BtcHeaderDagPutter expected input type %T got %T", &wire.BlockHeader{}, raw)
+ return "", fmt.Errorf("BtcHeaderDagPutter expected input type %T got %T", &ipld.BtcHeader{}, n)
}
- node, err := ipld.NewBtcHeader(header)
- if err != nil {
- return nil, err
+ if err := bhdp.adder.Add(header); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
+ return "", err
}
- if err := bhdp.adder.Add(node); err != nil {
- return nil, err
- }
- return []string{node.Cid().String()}, nil
+ return header.Cid().String(), nil
}
diff --git a/pkg/ipfs/dag_putters/btc_tx.go b/pkg/ipfs/dag_putters/btc_tx.go
index 77f0dcb7..52cc7bdd 100644
--- a/pkg/ipfs/dag_putters/btc_tx.go
+++ b/pkg/ipfs/dag_putters/btc_tx.go
@@ -18,8 +18,9 @@ package dag_putters
import (
"fmt"
+ "strings"
- "github.com/btcsuite/btcutil"
+ node "github.com/ipfs/go-ipld-format"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
@@ -33,21 +34,13 @@ func NewBtcTxDagPutter(adder *ipfs.IPFS) *BtcTxDagPutter {
return &BtcTxDagPutter{adder: adder}
}
-func (etdp *BtcTxDagPutter) DagPut(raw interface{}) ([]string, error) {
- transactions, ok := raw.([]*btcutil.Tx)
+func (etdp *BtcTxDagPutter) DagPut(n node.Node) (string, error) {
+ transaction, ok := n.(*ipld.BtcTx)
if !ok {
- return nil, fmt.Errorf("BtcTxDagPutter expected input type %T got %T", []*btcutil.Tx{}, raw)
+ return "", fmt.Errorf("BtcTxDagPutter expected input type %T got %T", &ipld.BtcTx{}, n)
}
- cids := make([]string, len(transactions))
- for i, transaction := range transactions {
- node, err := ipld.NewBtcTx(transaction.MsgTx())
- if err != nil {
- return nil, err
- }
- if err := etdp.adder.Add(node); err != nil {
- return nil, err
- }
- cids[i] = node.Cid().String()
+ if err := etdp.adder.Add(transaction); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
+ return "", err
}
- return cids, nil
+ return transaction.Cid().String(), nil
}
diff --git a/pkg/ipfs/dag_putters/btc_tx_trie.go b/pkg/ipfs/dag_putters/btc_tx_trie.go
new file mode 100644
index 00000000..039e2d98
--- /dev/null
+++ b/pkg/ipfs/dag_putters/btc_tx_trie.go
@@ -0,0 +1,46 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package dag_putters
+
+import (
+ "fmt"
+ "strings"
+
+ node "github.com/ipfs/go-ipld-format"
+
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
+)
+
+type BtcTxTrieDagPutter struct {
+ adder *ipfs.IPFS
+}
+
+func NewBtcTxTrieDagPutter(adder *ipfs.IPFS) *BtcTxTrieDagPutter {
+ return &BtcTxTrieDagPutter{adder: adder}
+}
+
+func (etdp *BtcTxTrieDagPutter) DagPut(n node.Node) (string, error) {
+ txTrieNode, ok := n.(*ipld.BtcTxTrie)
+ if !ok {
+ return "", fmt.Errorf("BtcTxTrieDagPutter expected input type %T got %T", &ipld.BtcTxTrie{}, n)
+ }
+ if err := etdp.adder.Add(txTrieNode); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
+ return "", err
+ }
+ return txTrieNode.Cid().String(), nil
+}
diff --git a/pkg/ipfs/dag_putters/eth_header.go b/pkg/ipfs/dag_putters/eth_header.go
index b06fd086..630db6b3 100644
--- a/pkg/ipfs/dag_putters/eth_header.go
+++ b/pkg/ipfs/dag_putters/eth_header.go
@@ -18,8 +18,9 @@ package dag_putters
import (
"fmt"
+ "strings"
- "github.com/ethereum/go-ethereum/core/types"
+ node "github.com/ipfs/go-ipld-format"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
@@ -33,17 +34,13 @@ func NewEthBlockHeaderDagPutter(adder *ipfs.IPFS) *EthHeaderDagPutter {
return &EthHeaderDagPutter{adder: adder}
}
-func (bhdp *EthHeaderDagPutter) DagPut(raw interface{}) ([]string, error) {
- header, ok := raw.(*types.Header)
+func (bhdp *EthHeaderDagPutter) DagPut(n node.Node) (string, error) {
+ header, ok := n.(*ipld.EthHeader)
if !ok {
- return nil, fmt.Errorf("EthHeaderDagPutter expected input type %T got %T", &types.Header{}, raw)
+ return "", fmt.Errorf("EthHeaderDagPutter expected input type %T got %T", &ipld.EthHeader{}, n)
}
- node, err := ipld.NewEthHeader(header)
- if err != nil {
- return nil, err
+ if err := bhdp.adder.Add(header); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
+ return "", err
}
- if err := bhdp.adder.Add(node); err != nil {
- return nil, err
- }
- return []string{node.Cid().String()}, nil
+ return header.Cid().String(), nil
}
diff --git a/pkg/ipfs/dag_putters/eth_receipt.go b/pkg/ipfs/dag_putters/eth_receipt.go
index a153e36d..c099f8c8 100644
--- a/pkg/ipfs/dag_putters/eth_receipt.go
+++ b/pkg/ipfs/dag_putters/eth_receipt.go
@@ -18,8 +18,9 @@ package dag_putters
import (
"fmt"
+ "strings"
- "github.com/ethereum/go-ethereum/core/types"
+ node "github.com/ipfs/go-ipld-format"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
@@ -33,21 +34,13 @@ func NewEthReceiptDagPutter(adder *ipfs.IPFS) *EthReceiptDagPutter {
return &EthReceiptDagPutter{adder: adder}
}
-func (erdp *EthReceiptDagPutter) DagPut(raw interface{}) ([]string, error) {
- receipts, ok := raw.(types.Receipts)
+func (erdp *EthReceiptDagPutter) DagPut(n node.Node) (string, error) {
+ receipt, ok := n.(*ipld.EthReceipt)
if !ok {
- return nil, fmt.Errorf("EthReceiptDagPutter expected input type %T got type %T", types.Receipts{}, raw)
+ return "", fmt.Errorf("EthReceiptDagPutter expected input type %T got type %T", &ipld.EthReceipt{}, n)
}
- cids := make([]string, len(receipts))
- for i, receipt := range receipts {
- node, err := ipld.NewReceipt((*types.ReceiptForStorage)(receipt))
- if err != nil {
- return nil, err
- }
- if err := erdp.adder.Add(node); err != nil {
- return nil, err
- }
- cids[i] = node.Cid().String()
+ if err := erdp.adder.Add(receipt); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
+ return "", err
}
- return cids, nil
+ return receipt.Cid().String(), nil
}
diff --git a/pkg/ipfs/dag_putters/eth_receipt_trie.go b/pkg/ipfs/dag_putters/eth_receipt_trie.go
new file mode 100644
index 00000000..6d982b3a
--- /dev/null
+++ b/pkg/ipfs/dag_putters/eth_receipt_trie.go
@@ -0,0 +1,46 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package dag_putters
+
+import (
+ "fmt"
+ "strings"
+
+ node "github.com/ipfs/go-ipld-format"
+
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
+)
+
+type EthRctTrieDagPutter struct {
+ adder *ipfs.IPFS
+}
+
+func NewEthRctTrieDagPutter(adder *ipfs.IPFS) *EthRctTrieDagPutter {
+ return &EthRctTrieDagPutter{adder: adder}
+}
+
+func (etdp *EthRctTrieDagPutter) DagPut(n node.Node) (string, error) {
+ rctTrieNode, ok := n.(*ipld.EthRctTrie)
+ if !ok {
+ return "", fmt.Errorf("EthRctTrieDagPutter expected input type %T got %T", &ipld.EthRctTrie{}, n)
+ }
+ if err := etdp.adder.Add(rctTrieNode); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
+ return "", err
+ }
+ return rctTrieNode.Cid().String(), nil
+}
diff --git a/pkg/ipfs/dag_putters/eth_state.go b/pkg/ipfs/dag_putters/eth_state.go
index b031f66c..9bfdbe44 100644
--- a/pkg/ipfs/dag_putters/eth_state.go
+++ b/pkg/ipfs/dag_putters/eth_state.go
@@ -18,6 +18,9 @@ package dag_putters
import (
"fmt"
+ "strings"
+
+ node "github.com/ipfs/go-ipld-format"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
@@ -31,17 +34,13 @@ func NewEthStateDagPutter(adder *ipfs.IPFS) *EthStateDagPutter {
return &EthStateDagPutter{adder: adder}
}
-func (erdp *EthStateDagPutter) DagPut(raw interface{}) ([]string, error) {
- stateNodeRLP, ok := raw.([]byte)
+func (erdp *EthStateDagPutter) DagPut(n node.Node) (string, error) {
+ stateNode, ok := n.(*ipld.EthStateTrie)
if !ok {
- return nil, fmt.Errorf("EthStateDagPutter expected input type %T got %T", []byte{}, raw)
+ return "", fmt.Errorf("EthStateDagPutter expected input type %T got %T", &ipld.EthStateTrie{}, n)
}
- node, err := ipld.FromStateTrieRLP(stateNodeRLP)
- if err != nil {
- return nil, err
+ if err := erdp.adder.Add(stateNode); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
+ return "", err
}
- if err := erdp.adder.Add(node); err != nil {
- return nil, err
- }
- return []string{node.Cid().String()}, nil
+ return stateNode.Cid().String(), nil
}
diff --git a/pkg/ipfs/dag_putters/eth_storage.go b/pkg/ipfs/dag_putters/eth_storage.go
index f1c20c9b..828de1f9 100644
--- a/pkg/ipfs/dag_putters/eth_storage.go
+++ b/pkg/ipfs/dag_putters/eth_storage.go
@@ -18,6 +18,9 @@ package dag_putters
import (
"fmt"
+ "strings"
+
+ node "github.com/ipfs/go-ipld-format"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
@@ -31,17 +34,13 @@ func NewEthStorageDagPutter(adder *ipfs.IPFS) *EthStorageDagPutter {
return &EthStorageDagPutter{adder: adder}
}
-func (erdp *EthStorageDagPutter) DagPut(raw interface{}) ([]string, error) {
- storageNodeRLP, ok := raw.([]byte)
+func (erdp *EthStorageDagPutter) DagPut(n node.Node) (string, error) {
+ storageNode, ok := n.(*ipld.EthStorageTrie)
if !ok {
- return nil, fmt.Errorf("EthStorageDagPutter expected input type %T got %T", []byte{}, raw)
+ return "", fmt.Errorf("EthStorageDagPutter expected input type %T got %T", &ipld.EthStorageTrie{}, n)
}
- node, err := ipld.FromStorageTrieRLP(storageNodeRLP)
- if err != nil {
- return nil, err
+ if err := erdp.adder.Add(storageNode); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
+ return "", err
}
- if err := erdp.adder.Add(node); err != nil {
- return nil, err
- }
- return []string{node.Cid().String()}, nil
+ return storageNode.Cid().String(), nil
}
diff --git a/pkg/ipfs/dag_putters/eth_tx.go b/pkg/ipfs/dag_putters/eth_tx.go
index b4339ac0..906e1abf 100644
--- a/pkg/ipfs/dag_putters/eth_tx.go
+++ b/pkg/ipfs/dag_putters/eth_tx.go
@@ -18,8 +18,9 @@ package dag_putters
import (
"fmt"
+ "strings"
- "github.com/ethereum/go-ethereum/core/types"
+ node "github.com/ipfs/go-ipld-format"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
@@ -33,21 +34,13 @@ func NewEthTxsDagPutter(adder *ipfs.IPFS) *EthTxsDagPutter {
return &EthTxsDagPutter{adder: adder}
}
-func (etdp *EthTxsDagPutter) DagPut(raw interface{}) ([]string, error) {
- transactions, ok := raw.(types.Transactions)
+func (etdp *EthTxsDagPutter) DagPut(n node.Node) (string, error) {
+ transaction, ok := n.(*ipld.EthTx)
if !ok {
- return nil, fmt.Errorf("EthTxsDagPutter expected input type %T got %T", types.Transactions{}, raw)
+ return "", fmt.Errorf("EthTxsDagPutter expected input type %T got %T", &ipld.EthTx{}, n)
}
- cids := make([]string, len(transactions))
- for i, transaction := range transactions {
- node, err := ipld.NewEthTx(transaction)
- if err != nil {
- return nil, err
- }
- if err := etdp.adder.Add(node); err != nil {
- return nil, err
- }
- cids[i] = node.Cid().String()
+ if err := etdp.adder.Add(transaction); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
+ return "", err
}
- return cids, nil
+ return transaction.Cid().String(), nil
}
diff --git a/pkg/ipfs/dag_putters/eth_tx_trie.go b/pkg/ipfs/dag_putters/eth_tx_trie.go
new file mode 100644
index 00000000..f29478a2
--- /dev/null
+++ b/pkg/ipfs/dag_putters/eth_tx_trie.go
@@ -0,0 +1,46 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package dag_putters
+
+import (
+ "fmt"
+ "strings"
+
+ node "github.com/ipfs/go-ipld-format"
+
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
+)
+
+type EthTxTrieDagPutter struct {
+ adder *ipfs.IPFS
+}
+
+func NewEthTxTrieDagPutter(adder *ipfs.IPFS) *EthTxTrieDagPutter {
+ return &EthTxTrieDagPutter{adder: adder}
+}
+
+func (etdp *EthTxTrieDagPutter) DagPut(n node.Node) (string, error) {
+ txTrieNode, ok := n.(*ipld.EthTxTrie)
+ if !ok {
+ return "", fmt.Errorf("EthTxTrieDagPutter expected input type %T got %T", &ipld.EthTxTrie{}, n)
+ }
+ if err := etdp.adder.Add(txTrieNode); err != nil && !strings.Contains(err.Error(), duplicateKeyErrorString) {
+ return "", err
+ }
+ return txTrieNode.Cid().String(), nil
+}
diff --git a/pkg/ipfs/ipld/btc_header.go b/pkg/ipfs/ipld/btc_header.go
index 8e630057..5d171de0 100644
--- a/pkg/ipfs/ipld/btc_header.go
+++ b/pkg/ipfs/ipld/btc_header.go
@@ -48,7 +48,7 @@ func NewBtcHeader(header *wire.BlockHeader) (*BtcHeader, error) {
return nil, err
}
rawdata := w.Bytes()
- c, err := rawdataToCid(MBitcoinHeader, rawdata, mh.DBL_SHA2_256)
+ c, err := RawdataToCid(MBitcoinHeader, rawdata, mh.DBL_SHA2_256)
if err != nil {
return nil, err
}
diff --git a/pkg/ipfs/ipld/btc_parser.go b/pkg/ipfs/ipld/btc_parser.go
new file mode 100644
index 00000000..a554b667
--- /dev/null
+++ b/pkg/ipfs/ipld/btc_parser.go
@@ -0,0 +1,74 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package ipld
+
+import (
+ "github.com/btcsuite/btcd/wire"
+ "github.com/btcsuite/btcutil"
+ node "github.com/ipfs/go-ipld-format"
+)
+
+// FromHeaderAndTxs takes a block header and txs and processes it
+// to return it a set of IPLD nodes for further processing.
+func FromHeaderAndTxs(header *wire.BlockHeader, txs []*btcutil.Tx) (*BtcHeader, []*BtcTx, []*BtcTxTrie, error) {
+ var txNodes []*BtcTx
+ for _, tx := range txs {
+ txNode, err := NewBtcTx(tx.MsgTx())
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ txNodes = append(txNodes, txNode)
+ }
+ txTrie, err := mkMerkleTree(txNodes)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ headerNode, err := NewBtcHeader(header)
+ return headerNode, txNodes, txTrie, err
+}
+
+func mkMerkleTree(txs []*BtcTx) ([]*BtcTxTrie, error) {
+ layer := make([]node.Node, len(txs))
+ for i, tx := range txs {
+ layer[i] = tx
+ }
+ var out []*BtcTxTrie
+ var next []node.Node
+ for len(layer) > 1 {
+ if len(layer)%2 != 0 {
+ layer = append(layer, layer[len(layer)-1])
+ }
+ for i := 0; i < len(layer)/2; i++ {
+ var left, right node.Node
+ left = layer[i*2]
+ right = layer[(i*2)+1]
+
+ t := &BtcTxTrie{
+ Left: &node.Link{Cid: left.Cid()},
+ Right: &node.Link{Cid: right.Cid()},
+ }
+
+ out = append(out, t)
+ next = append(next, t)
+ }
+
+ layer = next
+ next = nil
+ }
+
+ return out, nil
+}
diff --git a/pkg/ipfs/ipld/btc_tx.go b/pkg/ipfs/ipld/btc_tx.go
index b535a233..f37332d3 100644
--- a/pkg/ipfs/ipld/btc_tx.go
+++ b/pkg/ipfs/ipld/btc_tx.go
@@ -1,3 +1,19 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
package ipld
import (
@@ -7,7 +23,7 @@ import (
"strconv"
"github.com/btcsuite/btcd/wire"
- cid "github.com/ipfs/go-cid"
+ "github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
)
@@ -33,7 +49,7 @@ func NewBtcTx(tx *wire.MsgTx) (*BtcTx, error) {
return nil, err
}
rawdata := w.Bytes()
- c, err := rawdataToCid(MBitcoinTx, rawdata, mh.DBL_SHA2_256)
+ c, err := RawdataToCid(MBitcoinTx, rawdata, mh.DBL_SHA2_256)
if err != nil {
return nil, err
}
diff --git a/pkg/ipfs/ipld/btc_tx_trie.go b/pkg/ipfs/ipld/btc_tx_trie.go
new file mode 100644
index 00000000..b88194a8
--- /dev/null
+++ b/pkg/ipfs/ipld/btc_tx_trie.go
@@ -0,0 +1,110 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package ipld
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ node "github.com/ipfs/go-ipld-format"
+ mh "github.com/multiformats/go-multihash"
+)
+
+type BtcTxTrie struct {
+ Left *node.Link
+ Right *node.Link
+}
+
+func (t *BtcTxTrie) BTCSha() []byte {
+ return cidToHash(t.Cid())
+}
+
+func (t *BtcTxTrie) Cid() cid.Cid {
+ h, _ := mh.Sum(t.RawData(), mh.DBL_SHA2_256, -1)
+ return cid.NewCidV1(cid.BitcoinTx, h)
+}
+
+func (t *BtcTxTrie) Links() []*node.Link {
+ return []*node.Link{t.Left, t.Right}
+}
+
+func (t *BtcTxTrie) RawData() []byte {
+ out := make([]byte, 64)
+ lbytes := cidToHash(t.Left.Cid)
+ copy(out[:32], lbytes)
+
+ rbytes := cidToHash(t.Right.Cid)
+ copy(out[32:], rbytes)
+
+ return out
+}
+
+func (t *BtcTxTrie) Loggable() map[string]interface{} {
+ return map[string]interface{}{
+ "type": "bitcoin_tx_tree",
+ }
+}
+
+func (t *BtcTxTrie) Resolve(path []string) (interface{}, []string, error) {
+ if len(path) == 0 {
+ return nil, nil, fmt.Errorf("zero length path")
+ }
+
+ switch path[0] {
+ case "0":
+ return t.Left, path[1:], nil
+ case "1":
+ return t.Right, path[1:], nil
+ default:
+ return nil, nil, fmt.Errorf("no such link")
+ }
+}
+
+func (t *BtcTxTrie) Copy() node.Node {
+ nt := *t
+ return &nt
+}
+
+func (t *BtcTxTrie) ResolveLink(path []string) (*node.Link, []string, error) {
+ out, rest, err := t.Resolve(path)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ lnk, ok := out.(*node.Link)
+ if ok {
+ return lnk, rest, nil
+ }
+
+ return nil, nil, fmt.Errorf("path did not lead to link")
+}
+
+func (t *BtcTxTrie) Size() (uint64, error) {
+ return uint64(len(t.RawData())), nil
+}
+
+func (t *BtcTxTrie) Stat() (*node.NodeStat, error) {
+ return &node.NodeStat{}, nil
+}
+
+func (t *BtcTxTrie) String() string {
+ return fmt.Sprintf("[bitcoin transaction tree]")
+}
+
+func (t *BtcTxTrie) Tree(p string, depth int) []string {
+ return []string{"0", "1"}
+}
diff --git a/pkg/ipfs/ipld/eth_header.go b/pkg/ipfs/ipld/eth_header.go
index 3f0ae730..c33931d4 100644
--- a/pkg/ipfs/ipld/eth_header.go
+++ b/pkg/ipfs/ipld/eth_header.go
@@ -20,7 +20,6 @@ import (
"encoding/json"
"fmt"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ipfs/go-cid"
@@ -49,7 +48,7 @@ func NewEthHeader(header *types.Header) (*EthHeader, error) {
if err != nil {
return nil, err
}
- c, err := rawdataToCid(MEthHeader, headerRLP, mh.KECCAK_256)
+ c, err := RawdataToCid(MEthHeader, headerRLP, mh.KECCAK_256)
if err != nil {
return nil, err
}
@@ -60,6 +59,24 @@ func NewEthHeader(header *types.Header) (*EthHeader, error) {
}, nil
}
+/*
+ OUTPUT
+*/
+
+// DecodeEthHeader takes a cid and its raw binary data
+// from IPFS and returns an EthTx object for further processing.
+func DecodeEthHeader(c cid.Cid, b []byte) (*EthHeader, error) {
+ var h *types.Header
+ if err := rlp.DecodeBytes(b, h); err != nil {
+ return nil, err
+ }
+ return &EthHeader{
+ Header: h,
+ cid: c,
+ rawdata: b,
+ }, nil
+}
+
/*
Block INTERFACE
*/
@@ -237,38 +254,3 @@ func (b *EthHeader) MarshalJSON() ([]byte, error) {
}
return json.Marshal(out)
}
-
-// objJSONBlock defines the output of the JSON RPC API for either
-// "eth_BlockByHash" or "eth_BlockByHeader".
-type objJSONBlock struct {
- Result objJSONBlockResult `json:"result"`
-}
-
-// objJSONBLockResult is the nested struct that takes
-// the contents of the JSON field "result".
-type objJSONBlockResult struct {
- types.Header // Use its fields and unmarshaler
- *objJSONBlockResultExt // Add these fields to the parsing
-}
-
-// objJSONBLockResultExt facilitates the composition
-// of the field "result", adding to the
-// `types.Header` fields, both ommers (their hashes) and transactions.
-type objJSONBlockResultExt struct {
- OmmerHashes []common.Hash `json:"uncles"`
- Transactions []*types.Transaction `json:"transactions"`
-}
-
-// UnmarshalJSON overrides the function types.Header.UnmarshalJSON, allowing us
-// to parse the fields of Header, plus ommer hashes and transactions.
-// (yes, ommer hashes. You will need to "eth_getUncleCountByBlockHash" per each ommer)
-func (o *objJSONBlockResult) UnmarshalJSON(input []byte) error {
- err := o.Header.UnmarshalJSON(input)
- if err != nil {
- return err
- }
-
- o.objJSONBlockResultExt = &objJSONBlockResultExt{}
- err = json.Unmarshal(input, o.objJSONBlockResultExt)
- return err
-}
diff --git a/pkg/ipfs/ipld/eth_parser.go b/pkg/ipfs/ipld/eth_parser.go
new file mode 100644
index 00000000..f02d7d40
--- /dev/null
+++ b/pkg/ipfs/ipld/eth_parser.go
@@ -0,0 +1,97 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package ipld
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// FromBlockAndReceipts takes a block and processes it
+// to return it a set of IPLD nodes for further processing.
+func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthHeader, []*EthTx, []*EthTxTrie, []*EthReceipt, []*EthRctTrie, error) {
+ // Process the header
+ headerNode, err := NewEthHeader(block.Header())
+ if err != nil {
+ return nil, nil, nil, nil, nil, nil, err
+ }
+ // Process the uncles
+ uncleNodes := make([]*EthHeader, len(block.Uncles()))
+ for i, uncle := range block.Uncles() {
+ uncleNode, err := NewEthHeader(uncle)
+ if err != nil {
+ return nil, nil, nil, nil, nil, nil, err
+ }
+ uncleNodes[i] = uncleNode
+ }
+ // Process the txs
+ ethTxNodes, ethTxTrieNodes, err := processTransactions(block.Transactions(),
+ block.Header().TxHash[:])
+ if err != nil {
+ return nil, nil, nil, nil, nil, nil, err
+ }
+ // Process the receipts
+ ethRctNodes, ethRctTrieNodes, err := processReceipts(receipts,
+ block.Header().ReceiptHash[:])
+ return headerNode, uncleNodes, ethTxNodes, ethTxTrieNodes, ethRctNodes, ethRctTrieNodes, err
+}
+
+// processTransactions will take the found transactions in a parsed block body
+// to return IPLD node slices for eth-tx and eth-tx-trie
+func processTransactions(txs []*types.Transaction, expectedTxRoot []byte) ([]*EthTx, []*EthTxTrie, error) {
+ var ethTxNodes []*EthTx
+ transactionTrie := newTxTrie()
+
+ for idx, tx := range txs {
+ ethTx, err := NewEthTx(tx)
+ if err != nil {
+ return nil, nil, err
+ }
+ ethTxNodes = append(ethTxNodes, ethTx)
+ transactionTrie.add(idx, ethTx.RawData())
+ }
+
+ if !bytes.Equal(transactionTrie.rootHash(), expectedTxRoot) {
+ return nil, nil, fmt.Errorf("wrong transaction hash computed")
+ }
+
+ return ethTxNodes, transactionTrie.getNodes(), nil
+}
+
+// processReceipts will take in receipts
+// to return IPLD node slices for eth-rct and eth-rct-trie
+func processReceipts(rcts []*types.Receipt, expectedRctRoot []byte) ([]*EthReceipt, []*EthRctTrie, error) {
+ var ethRctNodes []*EthReceipt
+ receiptTrie := newRctTrie()
+
+ for idx, rct := range rcts {
+ ethRct, err := NewReceipt(rct)
+ if err != nil {
+ return nil, nil, err
+ }
+ ethRctNodes = append(ethRctNodes, ethRct)
+ receiptTrie.add(idx, ethRct.RawData())
+ }
+
+ if !bytes.Equal(receiptTrie.rootHash(), expectedRctRoot) {
+ return nil, nil, fmt.Errorf("wrong receipt hash computed")
+ }
+
+ return ethRctNodes, receiptTrie.getNodes(), nil
+}
diff --git a/pkg/ipfs/ipld/eth_receipt.go b/pkg/ipfs/ipld/eth_receipt.go
index 99915f1f..cfa46b36 100644
--- a/pkg/ipfs/ipld/eth_receipt.go
+++ b/pkg/ipfs/ipld/eth_receipt.go
@@ -29,7 +29,7 @@ import (
)
type EthReceipt struct {
- *types.ReceiptForStorage
+ *types.Receipt
rawdata []byte
cid cid.Cid
@@ -43,19 +43,37 @@ var _ node.Node = (*EthReceipt)(nil)
*/
// NewReceipt converts a types.ReceiptForStorage to an EthReceipt IPLD node
-func NewReceipt(receipt *types.ReceiptForStorage) (*EthReceipt, error) {
+func NewReceipt(receipt *types.Receipt) (*EthReceipt, error) {
receiptRLP, err := rlp.EncodeToBytes(receipt)
if err != nil {
return nil, err
}
- c, err := rawdataToCid(MEthTxReceipt, receiptRLP, mh.KECCAK_256)
+ c, err := RawdataToCid(MEthTxReceipt, receiptRLP, mh.KECCAK_256)
if err != nil {
return nil, err
}
return &EthReceipt{
- ReceiptForStorage: receipt,
- cid: c,
- rawdata: receiptRLP,
+ Receipt: receipt,
+ cid: c,
+ rawdata: receiptRLP,
+ }, nil
+}
+
+/*
+ OUTPUT
+*/
+
+// DecodeEthReceipt takes a cid and its raw binary data
+// from IPFS and returns an EthTx object for further processing.
+func DecodeEthReceipt(c cid.Cid, b []byte) (*EthReceipt, error) {
+ var r *types.Receipt
+ if err := rlp.DecodeBytes(b, r); err != nil {
+ return nil, err
+ }
+ return &EthReceipt{
+ Receipt: r,
+ cid: c,
+ rawdata: b,
}, nil
}
@@ -158,7 +176,7 @@ func (r *EthReceipt) Stat() (*node.NodeStat, error) {
// Size will go away. It is here to comply with the interface.
func (r *EthReceipt) Size() (uint64, error) {
- return strconv.ParseUint((*types.Receipt)(r.ReceiptForStorage).Size().String(), 10, 64)
+ return strconv.ParseUint(r.Receipt.Size().String(), 10, 64)
}
/*
diff --git a/pkg/ipfs/ipld/eth_receipt_trie.go b/pkg/ipfs/ipld/eth_receipt_trie.go
new file mode 100644
index 00000000..6a1b7e40
--- /dev/null
+++ b/pkg/ipfs/ipld/eth_receipt_trie.go
@@ -0,0 +1,152 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package ipld
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ node "github.com/ipfs/go-ipld-format"
+ "github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// EthRctTrie (eth-tx-trie codec 0x92) represents
+// a node from the transaction trie in ethereum.
+type EthRctTrie struct {
+ *TrieNode
+}
+
+// Static (compile time) check that EthRctTrie satisfies the node.Node interface.
+var _ node.Node = (*EthRctTrie)(nil)
+
+/*
+ INPUT
+*/
+
+// To create a proper trie of the eth-tx-trie objects, it is required
+// to input all transactions belonging to a forest in a single step.
+// We are adding the transactions, and creating its trie on
+// block body parsing time.
+
+/*
+ OUTPUT
+*/
+
+// DecodeEthRctTrie returns an EthRctTrie object from its cid and rawdata.
+func DecodeEthRctTrie(c cid.Cid, b []byte) (*EthRctTrie, error) {
+ tn, err := decodeTrieNode(c, b, decodeEthRctTrieLeaf)
+ if err != nil {
+ return nil, err
+ }
+ return &EthRctTrie{TrieNode: tn}, nil
+}
+
+// decodeEthRctTrieLeaf parses a eth-rct-trie leaf
+//from decoded RLP elements
+func decodeEthRctTrieLeaf(i []interface{}) ([]interface{}, error) {
+ var r types.Receipt
+ err := rlp.DecodeBytes(i[1].([]byte), &r)
+ if err != nil {
+ return nil, err
+ }
+ c, err := RawdataToCid(MEthTxReceipt, i[1].([]byte), multihash.KECCAK_256)
+ if err != nil {
+ return nil, err
+ }
+ return []interface{}{
+ i[0].([]byte),
+ &EthReceipt{
+ Receipt: &r,
+ cid: c,
+ rawdata: i[1].([]byte),
+ },
+ }, nil
+}
+
+/*
+ Block INTERFACE
+*/
+
+// RawData returns the binary of the RLP encode of the transaction.
+func (t *EthRctTrie) RawData() []byte {
+ return t.rawdata
+}
+
+// Cid returns the cid of the transaction.
+func (t *EthRctTrie) Cid() cid.Cid {
+ return t.cid
+}
+
+// String is a helper for output
+func (t *EthRctTrie) String() string {
+ return fmt.Sprintf("", t.cid)
+}
+
+// Loggable returns in a map the type of IPLD Link.
+func (t *EthRctTrie) Loggable() map[string]interface{} {
+ return map[string]interface{}{
+ "type": "eth-rct-trie",
+ }
+}
+
+/*
+ EthRctTrie functions
+*/
+
+// rctTrie wraps a localTrie for use on the receipt trie.
+type rctTrie struct {
+ *localTrie
+}
+
+// newRctTrie initializes and returns a rctTrie.
+func newRctTrie() *rctTrie {
+ return &rctTrie{
+ localTrie: newLocalTrie(),
+ }
+}
+
+// getNodes invokes the localTrie, which computes the root hash of the
+// transaction trie and returns its database keys, to return a slice
+// of EthRctTrie nodes.
+func (rt *rctTrie) getNodes() []*EthRctTrie {
+ keys := rt.getKeys()
+ var out []*EthRctTrie
+ it := rt.trie.NodeIterator([]byte{})
+ for it.Next(true) {
+
+ }
+ for _, k := range keys {
+ rawdata, err := rt.db.Get(k)
+ if err != nil {
+ panic(err)
+ }
+ c, err := RawdataToCid(MEthTxReceiptTrie, rawdata, multihash.KECCAK_256)
+ if err != nil {
+ return nil
+ }
+ tn := &TrieNode{
+ cid: c,
+ rawdata: rawdata,
+ }
+ out = append(out, &EthRctTrie{TrieNode: tn})
+ }
+
+ return out
+}
diff --git a/pkg/ipfs/ipld/eth_state.go b/pkg/ipfs/ipld/eth_state.go
index c01765bb..a127f956 100644
--- a/pkg/ipfs/ipld/eth_state.go
+++ b/pkg/ipfs/ipld/eth_state.go
@@ -21,14 +21,15 @@ import (
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
- mh "github.com/multiformats/go-multihash"
+ "github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/rlp"
)
// EthStateTrie (eth-state-trie, codec 0x96), represents
-// a node from the state trie in ethereum.
+// a node from the satte trie in ethereum.
type EthStateTrie struct {
- cid cid.Cid
- rawdata []byte
+ *TrieNode
}
// Static (compile time) check that EthStateTrie satisfies the node.Node interface.
@@ -38,16 +39,51 @@ var _ node.Node = (*EthStateTrie)(nil)
INPUT
*/
-// FromStateTrieRLP takes the RLP bytes of an ethereum
+// FromStateTrieRLP takes the RLP representation of an ethereum
// state trie node to return it as an IPLD node for further processing.
-func FromStateTrieRLP(stateNodeRLP []byte) (*EthStateTrie, error) {
- c, err := rawdataToCid(MEthStateTrie, stateNodeRLP, mh.KECCAK_256)
+func FromStateTrieRLP(raw []byte) (*EthStateTrie, error) {
+ c, err := RawdataToCid(MEthStateTrie, raw, multihash.KECCAK_256)
if err != nil {
return nil, err
}
- return &EthStateTrie{
- cid: c,
- rawdata: stateNodeRLP,
+ // Let's run the whole mile and process the nodeKind and
+ // its elements, in case somebody would need this function
+ // to parse an RLP element from the filesystem
+ return DecodeEthStateTrie(c, raw)
+}
+
+/*
+ OUTPUT
+*/
+
+// DecodeEthStateTrie returns an EthStateTrie object from its cid and rawdata.
+func DecodeEthStateTrie(c cid.Cid, b []byte) (*EthStateTrie, error) {
+ tn, err := decodeTrieNode(c, b, decodeEthStateTrieLeaf)
+ if err != nil {
+ return nil, err
+ }
+ return &EthStateTrie{TrieNode: tn}, nil
+}
+
+// decodeEthStateTrieLeaf parses a eth-tx-trie leaf
+// from decoded RLP elements
+func decodeEthStateTrieLeaf(i []interface{}) ([]interface{}, error) {
+ var account EthAccount
+ err := rlp.DecodeBytes(i[1].([]byte), &account)
+ if err != nil {
+ return nil, err
+ }
+ c, err := RawdataToCid(MEthAccountSnapshot, i[1].([]byte), multihash.KECCAK_256)
+ if err != nil {
+ return nil, err
+ }
+ return []interface{}{
+ i[0].([]byte),
+ &EthAccountSnapshot{
+ EthAccount: &account,
+ cid: c,
+ rawdata: i[1].([]byte),
+ },
}, nil
}
@@ -70,35 +106,6 @@ func (st *EthStateTrie) String() string {
return fmt.Sprintf("", st.cid)
}
-// Copy will go away. It is here to comply with the Node interface.
-func (*EthStateTrie) Copy() node.Node {
- panic("implement me")
-}
-
-func (*EthStateTrie) Links() []*node.Link {
- panic("implement me")
-}
-
-func (*EthStateTrie) Resolve(path []string) (interface{}, []string, error) {
- panic("implement me")
-}
-
-func (*EthStateTrie) ResolveLink(path []string) (*node.Link, []string, error) {
- panic("implement me")
-}
-
-func (*EthStateTrie) Size() (uint64, error) {
- panic("implement me")
-}
-
-func (*EthStateTrie) Stat() (*node.NodeStat, error) {
- panic("implement me")
-}
-
-func (*EthStateTrie) Tree(path string, depth int) []string {
- panic("implement me")
-}
-
// Loggable returns in a map the type of IPLD Link.
func (st *EthStateTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
diff --git a/pkg/ipfs/ipld/eth_storage.go b/pkg/ipfs/ipld/eth_storage.go
index 6d9e1cbe..779cad4d 100644
--- a/pkg/ipfs/ipld/eth_storage.go
+++ b/pkg/ipfs/ipld/eth_storage.go
@@ -21,14 +21,13 @@ import (
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
- mh "github.com/multiformats/go-multihash"
+ "github.com/multiformats/go-multihash"
)
// EthStorageTrie (eth-storage-trie, codec 0x98), represents
// a node from the storage trie in ethereum.
type EthStorageTrie struct {
- cid cid.Cid
- rawdata []byte
+ *TrieNode
}
// Static (compile time) check that EthStorageTrie satisfies the node.Node interface.
@@ -38,16 +37,39 @@ var _ node.Node = (*EthStorageTrie)(nil)
INPUT
*/
-// FromStorageTrieRLP takes the RLP bytes of an ethereum
+// FromStorageTrieRLP takes the RLP representation of an ethereum
// storage trie node to return it as an IPLD node for further processing.
-func FromStorageTrieRLP(storageNodeRLP []byte) (*EthStorageTrie, error) {
- c, err := rawdataToCid(MEthStorageTrie, storageNodeRLP, mh.KECCAK_256)
+func FromStorageTrieRLP(raw []byte) (*EthStorageTrie, error) {
+ c, err := RawdataToCid(MEthStorageTrie, raw, multihash.KECCAK_256)
if err != nil {
return nil, err
}
- return &EthStorageTrie{
- cid: c,
- rawdata: storageNodeRLP,
+
+ // Let's run the whole mile and process the nodeKind and
+ // its elements, in case somebody would need this function
+ // to parse an RLP element from the filesystem
+ return DecodeEthStorageTrie(c, raw)
+}
+
+/*
+ OUTPUT
+*/
+
+// DecodeEthStorageTrie returns an EthStorageTrie object from its cid and rawdata.
+func DecodeEthStorageTrie(c cid.Cid, b []byte) (*EthStorageTrie, error) {
+ tn, err := decodeTrieNode(c, b, decodeEthStorageTrieLeaf)
+ if err != nil {
+ return nil, err
+ }
+ return &EthStorageTrie{TrieNode: tn}, nil
+}
+
+// decodeEthStorageTrieLeaf parses a eth-tx-trie leaf
+// from decoded RLP elements
+func decodeEthStorageTrieLeaf(i []interface{}) ([]interface{}, error) {
+ return []interface{}{
+ i[0].([]byte),
+ i[1].([]byte),
}, nil
}
@@ -70,35 +92,6 @@ func (st *EthStorageTrie) String() string {
return fmt.Sprintf("", st.cid)
}
-// Copy will go away. It is here to comply with the Node interface.
-func (*EthStorageTrie) Copy() node.Node {
- panic("implement me")
-}
-
-func (*EthStorageTrie) Links() []*node.Link {
- panic("implement me")
-}
-
-func (*EthStorageTrie) Resolve(path []string) (interface{}, []string, error) {
- panic("implement me")
-}
-
-func (*EthStorageTrie) ResolveLink(path []string) (*node.Link, []string, error) {
- panic("implement me")
-}
-
-func (*EthStorageTrie) Size() (uint64, error) {
- panic("implement me")
-}
-
-func (*EthStorageTrie) Stat() (*node.NodeStat, error) {
- panic("implement me")
-}
-
-func (*EthStorageTrie) Tree(path string, depth int) []string {
- panic("implement me")
-}
-
// Loggable returns in a map the type of IPLD Link.
func (st *EthStorageTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
diff --git a/pkg/ipfs/ipld/eth_tx.go b/pkg/ipfs/ipld/eth_tx.go
index a18bfa39..4fc4d20a 100644
--- a/pkg/ipfs/ipld/eth_tx.go
+++ b/pkg/ipfs/ipld/eth_tx.go
@@ -50,7 +50,7 @@ func NewEthTx(tx *types.Transaction) (*EthTx, error) {
if err != nil {
return nil, err
}
- c, err := rawdataToCid(MEthTx, txRLP, mh.KECCAK_256)
+ c, err := RawdataToCid(MEthTx, txRLP, mh.KECCAK_256)
if err != nil {
return nil, err
}
diff --git a/pkg/ipfs/ipld/eth_tx_trie.go b/pkg/ipfs/ipld/eth_tx_trie.go
new file mode 100644
index 00000000..6f106f6d
--- /dev/null
+++ b/pkg/ipfs/ipld/eth_tx_trie.go
@@ -0,0 +1,152 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package ipld
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ node "github.com/ipfs/go-ipld-format"
+ "github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// EthTxTrie (eth-tx-trie codec 0x92) represents
+// a node from the transaction trie in ethereum.
+type EthTxTrie struct {
+ *TrieNode
+}
+
+// Static (compile time) check that EthTxTrie satisfies the node.Node interface.
+var _ node.Node = (*EthTxTrie)(nil)
+
+/*
+ INPUT
+*/
+
+// To create a proper trie of the eth-tx-trie objects, it is required
+// to input all transactions belonging to a forest in a single step.
+// We are adding the transactions, and creating its trie on
+// block body parsing time.
+
+/*
+ OUTPUT
+*/
+
+// DecodeEthTxTrie returns an EthTxTrie object from its cid and rawdata.
+func DecodeEthTxTrie(c cid.Cid, b []byte) (*EthTxTrie, error) {
+ tn, err := decodeTrieNode(c, b, decodeEthTxTrieLeaf)
+ if err != nil {
+ return nil, err
+ }
+ return &EthTxTrie{TrieNode: tn}, nil
+}
+
+// decodeEthTxTrieLeaf parses a eth-tx-trie leaf
+//from decoded RLP elements
+func decodeEthTxTrieLeaf(i []interface{}) ([]interface{}, error) {
+ var t types.Transaction
+ err := rlp.DecodeBytes(i[1].([]byte), &t)
+ if err != nil {
+ return nil, err
+ }
+ c, err := RawdataToCid(MEthTx, i[1].([]byte), multihash.KECCAK_256)
+ if err != nil {
+ return nil, err
+ }
+ return []interface{}{
+ i[0].([]byte),
+ &EthTx{
+ Transaction: &t,
+ cid: c,
+ rawdata: i[1].([]byte),
+ },
+ }, nil
+}
+
+/*
+ Block INTERFACE
+*/
+
+// RawData returns the binary of the RLP encode of the transaction.
+func (t *EthTxTrie) RawData() []byte {
+ return t.rawdata
+}
+
+// Cid returns the cid of the transaction.
+func (t *EthTxTrie) Cid() cid.Cid {
+ return t.cid
+}
+
+// String is a helper for output
+func (t *EthTxTrie) String() string {
+ return fmt.Sprintf("", t.cid)
+}
+
+// Loggable returns in a map the type of IPLD Link.
+func (t *EthTxTrie) Loggable() map[string]interface{} {
+ return map[string]interface{}{
+ "type": "eth-tx-trie",
+ }
+}
+
+/*
+ EthTxTrie functions
+*/
+
+// txTrie wraps a localTrie for use on the transaction trie.
+type txTrie struct {
+ *localTrie
+}
+
+// newTxTrie initializes and returns a txTrie.
+func newTxTrie() *txTrie {
+ return &txTrie{
+ localTrie: newLocalTrie(),
+ }
+}
+
+// getNodes invokes the localTrie, which computes the root hash of the
+// transaction trie and returns its database keys, to return a slice
+// of EthTxTrie nodes.
+func (tt *txTrie) getNodes() []*EthTxTrie {
+ keys := tt.getKeys()
+ var out []*EthTxTrie
+ it := tt.trie.NodeIterator([]byte{})
+ for it.Next(true) {
+
+ }
+ for _, k := range keys {
+ rawdata, err := tt.db.Get(k)
+ if err != nil {
+ panic(err)
+ }
+ c, err := RawdataToCid(MEthTxTrie, rawdata, multihash.KECCAK_256)
+ if err != nil {
+ return nil
+ }
+ tn := &TrieNode{
+ cid: c,
+ rawdata: rawdata,
+ }
+ out = append(out, &EthTxTrie{TrieNode: tn})
+ }
+
+ return out
+}
diff --git a/pkg/ipfs/ipld/shared.go b/pkg/ipfs/ipld/shared.go
index a47debe7..e8358f7b 100644
--- a/pkg/ipfs/ipld/shared.go
+++ b/pkg/ipfs/ipld/shared.go
@@ -17,7 +17,13 @@
package ipld
import (
+ "bytes"
+
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
)
@@ -40,9 +46,9 @@ const (
MBitcoinTx = 0xb1
)
-// rawdataToCid takes the desired codec and a slice of bytes
+// RawdataToCid takes the desired codec and a slice of bytes
// and returns the proper cid of the object.
-func rawdataToCid(codec uint64, rawdata []byte, multiHash uint64) (cid.Cid, error) {
+func RawdataToCid(codec uint64, rawdata []byte, multiHash uint64) (cid.Cid, error) {
c, err := cid.Prefix{
Codec: codec,
Version: 1,
@@ -87,3 +93,59 @@ func sha256ToCid(codec uint64, h []byte) cid.Cid {
return cid.NewCidV1(codec, hash)
}
+
+// getRLP encodes the given object to RLP returning its bytes.
+func getRLP(object interface{}) []byte {
+ buf := new(bytes.Buffer)
+ if err := rlp.Encode(buf, object); err != nil {
+ panic(err)
+ }
+
+ return buf.Bytes()
+}
+
+// localTrie wraps a go-ethereum trie and its underlying memory db.
+// It contributes to the creation of the trie node objects.
+type localTrie struct {
+ keys [][]byte
+ db ethdb.Database
+ trie *trie.Trie
+}
+
+// newLocalTrie initializes and returns a localTrie object
+func newLocalTrie() *localTrie {
+ var err error
+ lt := &localTrie{}
+ lt.db = rawdb.NewMemoryDatabase()
+ lt.trie, err = trie.New(common.Hash{}, trie.NewDatabase(lt.db))
+ if err != nil {
+ panic(err)
+ }
+ return lt
+}
+
+// add receives the index of an object and its rawdata value
+// and includes it into the localTrie
+func (lt *localTrie) add(idx int, rawdata []byte) {
+ key, err := rlp.EncodeToBytes(uint(idx))
+ if err != nil {
+ panic(err)
+ }
+ lt.keys = append(lt.keys, key)
+ if err := lt.db.Put(key, rawdata); err != nil {
+ panic(err)
+ }
+ lt.trie.Update(key, rawdata)
+}
+
+// rootHash returns the computed trie root.
+// Useful for sanity checks on parsed data.
+func (lt *localTrie) rootHash() []byte {
+ return lt.trie.Hash().Bytes()
+}
+
+// getKeys returns the stored keys of the memory database
+// of the localTrie for further processing.
+func (lt *localTrie) getKeys() [][]byte {
+ return lt.keys
+}
diff --git a/pkg/ipfs/ipld/trie_node.go b/pkg/ipfs/ipld/trie_node.go
new file mode 100644
index 00000000..0a35ad77
--- /dev/null
+++ b/pkg/ipfs/ipld/trie_node.go
@@ -0,0 +1,444 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package ipld
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ipfs/go-cid"
+ node "github.com/ipfs/go-ipld-format"
+)
+
+// TrieNode is the general abstraction for
+//ethereum IPLD trie nodes.
+type TrieNode struct {
+ // leaf, extension or branch
+ nodeKind string
+
+ // If leaf or extension: [0] is key, [1] is val.
+ // If branch: [0] - [16] are children.
+ elements []interface{}
+
+ // IPLD block information
+ cid cid.Cid
+ rawdata []byte
+}
+
+/*
+ OUTPUT
+*/
+
+type trieNodeLeafDecoder func([]interface{}) ([]interface{}, error)
+
+// decodeTrieNode returns a TrieNode object from an IPLD block's
+// cid and rawdata.
+func decodeTrieNode(c cid.Cid, b []byte,
+ leafDecoder trieNodeLeafDecoder) (*TrieNode, error) {
+ var (
+ i, decoded, elements []interface{}
+ nodeKind string
+ err error
+ )
+
+ if err = rlp.DecodeBytes(b, &i); err != nil {
+ return nil, err
+ }
+
+ codec := c.Type()
+ switch len(i) {
+ case 2:
+ nodeKind, decoded, err = decodeCompactKey(i)
+ if err != nil {
+ return nil, err
+ }
+
+ if nodeKind == "extension" {
+ elements, err = parseTrieNodeExtension(decoded, codec)
+ }
+ if nodeKind == "leaf" {
+ elements, err = leafDecoder(decoded)
+ }
+ if nodeKind != "extension" && nodeKind != "leaf" {
+ return nil, fmt.Errorf("unexpected nodeKind returned from decoder")
+ }
+ case 17:
+ nodeKind = "branch"
+ elements, err = parseTrieNodeBranch(i, codec)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unknown trie node type")
+ }
+
+ return &TrieNode{
+ nodeKind: nodeKind,
+ elements: elements,
+ rawdata: b,
+ cid: c,
+ }, nil
+}
+
+// decodeCompactKey takes a compact key, and returns its nodeKind and value.
+func decodeCompactKey(i []interface{}) (string, []interface{}, error) {
+ first := i[0].([]byte)
+ last := i[1].([]byte)
+
+ switch first[0] / 16 {
+ case '\x00':
+ return "extension", []interface{}{
+ nibbleToByte(first)[2:],
+ last,
+ }, nil
+ case '\x01':
+ return "extension", []interface{}{
+ nibbleToByte(first)[1:],
+ last,
+ }, nil
+ case '\x02':
+ return "leaf", []interface{}{
+ nibbleToByte(first)[2:],
+ last,
+ }, nil
+ case '\x03':
+ return "leaf", []interface{}{
+ nibbleToByte(first)[1:],
+ last,
+ }, nil
+ default:
+ return "", nil, fmt.Errorf("unknown hex prefix")
+ }
+}
+
+// parseTrieNodeExtension helper improves readability
+func parseTrieNodeExtension(i []interface{}, codec uint64) ([]interface{}, error) {
+ return []interface{}{
+ i[0].([]byte),
+ keccak256ToCid(codec, i[1].([]byte)),
+ }, nil
+}
+
+// parseTrieNodeBranch helper improves readability
+func parseTrieNodeBranch(i []interface{}, codec uint64) ([]interface{}, error) {
+ var out []interface{}
+
+ for _, vi := range i {
+ v, ok := vi.([]byte)
+ // Sometimes this throws "panic: interface conversion: interface {} is []interface {}, not []uint8"
+ // Figure out why, and if it is okay to continue
+ if !ok {
+ continue
+ }
+
+ switch len(v) {
+ case 0:
+ out = append(out, nil)
+ case 32:
+ out = append(out, keccak256ToCid(codec, v))
+ default:
+ return nil, fmt.Errorf("unrecognized object: %v", v)
+ }
+ }
+
+ return out, nil
+}
+
+/*
+ Node INTERFACE
+*/
+
+// Resolve resolves a path through this node, stopping at any link boundary
+// and returning the object found as well as the remaining path to traverse
+func (t *TrieNode) Resolve(p []string) (interface{}, []string, error) {
+ switch t.nodeKind {
+ case "extension":
+ return t.resolveTrieNodeExtension(p)
+ case "leaf":
+ return t.resolveTrieNodeLeaf(p)
+ case "branch":
+ return t.resolveTrieNodeBranch(p)
+ default:
+ return nil, nil, fmt.Errorf("nodeKind case not implemented")
+ }
+}
+
+// Tree lists all paths within the object under 'path', and up to the given depth.
+// To list the entire object (similar to `find .`) pass "" and -1
+func (t *TrieNode) Tree(p string, depth int) []string {
+ if p != "" || depth == 0 {
+ return nil
+ }
+
+ var out []string
+
+ switch t.nodeKind {
+ case "extension":
+ var val string
+ for _, e := range t.elements[0].([]byte) {
+ val += fmt.Sprintf("%x", e)
+ }
+ return []string{val}
+ case "branch":
+ for i, elem := range t.elements {
+ if _, ok := elem.(*cid.Cid); ok {
+ out = append(out, fmt.Sprintf("%x", i))
+ }
+ }
+ return out
+
+ default:
+ return nil
+ }
+}
+
+// ResolveLink is a helper function that calls resolve and asserts the
+// output is a link
+func (t *TrieNode) ResolveLink(p []string) (*node.Link, []string, error) {
+ obj, rest, err := t.Resolve(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ lnk, ok := obj.(*node.Link)
+ if !ok {
+ return nil, nil, fmt.Errorf("was not a link")
+ }
+
+ return lnk, rest, nil
+}
+
+// Copy will go away. It is here to comply with the interface.
+func (t *TrieNode) Copy() node.Node {
+ panic("dont use this yet")
+}
+
+// Links is a helper function that returns all links within this object
+func (t *TrieNode) Links() []*node.Link {
+ var out []*node.Link
+
+ for _, i := range t.elements {
+ c, ok := i.(cid.Cid)
+ if ok {
+ out = append(out, &node.Link{Cid: c})
+ }
+ }
+
+ return out
+}
+
+// Stat will go away. It is here to comply with the interface.
+func (t *TrieNode) Stat() (*node.NodeStat, error) {
+ return &node.NodeStat{}, nil
+}
+
+// Size will go away. It is here to comply with the interface.
+func (t *TrieNode) Size() (uint64, error) {
+ return 0, nil
+}
+
+/*
+ TrieNode functions
+*/
+
+// MarshalJSON processes the transaction trie into readable JSON format.
+func (t *TrieNode) MarshalJSON() ([]byte, error) {
+ var out map[string]interface{}
+
+ switch t.nodeKind {
+ case "extension":
+ fallthrough
+ case "leaf":
+ var hexPrefix string
+ for _, e := range t.elements[0].([]byte) {
+ hexPrefix += fmt.Sprintf("%x", e)
+ }
+
+ // if we got a byte we need to do this casting otherwise
+ // it will be marshaled to a base64 encoded value
+ if _, ok := t.elements[1].([]byte); ok {
+ var hexVal string
+ for _, e := range t.elements[1].([]byte) {
+ hexVal += fmt.Sprintf("%x", e)
+ }
+
+ t.elements[1] = hexVal
+ }
+
+ out = map[string]interface{}{
+ "type": t.nodeKind,
+ hexPrefix: t.elements[1],
+ }
+
+ case "branch":
+ out = map[string]interface{}{
+ "type": "branch",
+ "0": t.elements[0],
+ "1": t.elements[1],
+ "2": t.elements[2],
+ "3": t.elements[3],
+ "4": t.elements[4],
+ "5": t.elements[5],
+ "6": t.elements[6],
+ "7": t.elements[7],
+ "8": t.elements[8],
+ "9": t.elements[9],
+ "a": t.elements[10],
+ "b": t.elements[11],
+ "c": t.elements[12],
+ "d": t.elements[13],
+ "e": t.elements[14],
+ "f": t.elements[15],
+ }
+ default:
+ return nil, fmt.Errorf("nodeKind %s not supported", t.nodeKind)
+ }
+
+ return json.Marshal(out)
+}
+
+// nibbleToByte expands the nibbles of a byte slice into their own bytes.
+func nibbleToByte(k []byte) []byte {
+ var out []byte
+
+ for _, b := range k {
+ out = append(out, b/16)
+ out = append(out, b%16)
+ }
+
+ return out
+}
+
+// Resolve reading conveniences
+func (t *TrieNode) resolveTrieNodeExtension(p []string) (interface{}, []string, error) {
+ nibbles := t.elements[0].([]byte)
+ idx, rest := shiftFromPath(p, len(nibbles))
+ if len(idx) < len(nibbles) {
+ return nil, nil, fmt.Errorf("not enough nibbles to traverse this extension")
+ }
+
+ for _, i := range idx {
+ if getHexIndex(string(i)) == -1 {
+ return nil, nil, fmt.Errorf("invalid path element")
+ }
+ }
+
+ for i, n := range nibbles {
+ if string(idx[i]) != fmt.Sprintf("%x", n) {
+ return nil, nil, fmt.Errorf("no such link in this extension")
+ }
+ }
+
+ return &node.Link{Cid: t.elements[1].(cid.Cid)}, rest, nil
+}
+
+func (t *TrieNode) resolveTrieNodeLeaf(p []string) (interface{}, []string, error) {
+ nibbles := t.elements[0].([]byte)
+
+ if len(nibbles) != 0 {
+ idx, rest := shiftFromPath(p, len(nibbles))
+ if len(idx) < len(nibbles) {
+ return nil, nil, fmt.Errorf("not enough nibbles to traverse this leaf")
+ }
+
+ for _, i := range idx {
+ if getHexIndex(string(i)) == -1 {
+ return nil, nil, fmt.Errorf("invalid path element")
+ }
+ }
+
+ for i, n := range nibbles {
+ if string(idx[i]) != fmt.Sprintf("%x", n) {
+ return nil, nil, fmt.Errorf("no such link in this extension")
+ }
+ }
+
+ p = rest
+ }
+
+ link, ok := t.elements[1].(node.Node)
+ if !ok {
+ return nil, nil, fmt.Errorf("leaf children is not an IPLD node")
+ }
+
+ return link.Resolve(p)
+}
+
+func (t *TrieNode) resolveTrieNodeBranch(p []string) (interface{}, []string, error) {
+ idx, rest := shiftFromPath(p, 1)
+ hidx := getHexIndex(idx)
+ if hidx == -1 {
+ return nil, nil, fmt.Errorf("incorrect path")
+ }
+
+ child := t.elements[hidx]
+ if child != nil {
+ return &node.Link{Cid: child.(cid.Cid)}, rest, nil
+ }
+ return nil, nil, fmt.Errorf("no such link in this branch")
+}
+
+// shiftFromPath extracts from a given path (as a slice of strings)
+// the given number of elements as a single string, returning whatever
+// it has not taken.
+//
+// Examples:
+// ["0", "a", "something"] and 1 -> "0" and ["a", "something"]
+// ["ab", "c", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
+// ["abc", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
+func shiftFromPath(p []string, i int) (string, []string) {
+ var (
+ out string
+ rest []string
+ )
+
+ for _, pe := range p {
+ re := ""
+ for _, c := range pe {
+ if len(out) < i {
+ out += string(c)
+ } else {
+ re += string(c)
+ }
+ }
+
+ if len(out) == i && re != "" {
+ rest = append(rest, re)
+ }
+ }
+
+ return out, rest
+}
+
+// getHexIndex returns to you the integer 0 - 15 equivalent to your
+// string character if applicable, or -1 otherwise.
+func getHexIndex(s string) int {
+ if len(s) != 1 {
+ return -1
+ }
+
+ c := byte(s[0])
+ switch {
+ case '0' <= c && c <= '9':
+ return int(c - '0')
+ case 'a' <= c && c <= 'f':
+ return int(c - 'a' + 10)
+ }
+
+ return -1
+}
diff --git a/pkg/ipfs/mocks/dag_putters.go b/pkg/ipfs/mocks/dag_putters.go
index 2c2cb8bc..dd7a9380 100644
--- a/pkg/ipfs/mocks/dag_putters.go
+++ b/pkg/ipfs/mocks/dag_putters.go
@@ -19,39 +19,35 @@ package mocks
import (
"errors"
+ node "github.com/ipfs/go-ipld-format"
+
"github.com/ethereum/go-ethereum/common"
)
// DagPutter is a mock for testing the ipfs publisher
type DagPutter struct {
- CIDsToReturn []string
- PassedRaw interface{}
- ErrToReturn error
+ PassedNode node.Node
+ ErrToReturn error
}
// DagPut returns the pre-loaded CIDs or error
-func (dp *DagPutter) DagPut(raw interface{}) ([]string, error) {
- dp.PassedRaw = raw
- return dp.CIDsToReturn, dp.ErrToReturn
+func (dp *DagPutter) DagPut(n node.Node) (string, error) {
+ dp.PassedNode = n
+ return n.Cid().String(), dp.ErrToReturn
}
// MappedDagPutter is a mock for testing the ipfs publisher
type MappedDagPutter struct {
- CIDsToReturn map[common.Hash][]string
- PassedRaw interface{}
+ CIDsToReturn map[common.Hash]string
+ PassedNode node.Node
ErrToReturn error
}
// DagPut returns the pre-loaded CIDs or error
-func (mdp *MappedDagPutter) DagPut(raw interface{}) ([]string, error) {
- mdp.PassedRaw = raw
+func (mdp *MappedDagPutter) DagPut(n node.Node) (string, error) {
if mdp.CIDsToReturn == nil {
- return nil, errors.New("mapped dag putter needs to be initialized with a map of cids to return")
+ return "", errors.New("mapped dag putter needs to be initialized with a map of cids to return")
}
- by, ok := raw.([]byte)
- if !ok {
- return nil, errors.New("mapped dag putters can only dag put []byte values")
- }
- hash := common.BytesToHash(by)
+ hash := common.BytesToHash(n.RawData())
return mdp.CIDsToReturn[hash], nil
}
diff --git a/pkg/super_node/api.go b/pkg/super_node/api.go
index 32dcf2c2..8d33eadd 100644
--- a/pkg/super_node/api.go
+++ b/pkg/super_node/api.go
@@ -19,12 +19,15 @@ package super_node
import (
"context"
- "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
-
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/btc"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
// APIName is the namespace used for the state diffing service API
@@ -46,7 +49,24 @@ func NewPublicSuperNodeAPI(superNodeInterface SuperNode) *PublicSuperNodeAPI {
}
// Stream is the public method to setup a subscription that fires off super node payloads as they are processed
-func (api *PublicSuperNodeAPI) Stream(ctx context.Context, params shared.SubscriptionSettings) (*rpc.Subscription, error) {
+func (api *PublicSuperNodeAPI) Stream(ctx context.Context, rlpParams []byte) (*rpc.Subscription, error) {
+ var params shared.SubscriptionSettings
+ switch api.sn.Chain() {
+ case shared.Ethereum:
+ var ethParams eth.SubscriptionSettings
+ if err := rlp.DecodeBytes(rlpParams, ðParams); err != nil {
+ return nil, err
+ }
+ params = ðParams
+ case shared.Bitcoin:
+ var btcParams btc.SubscriptionSettings
+ if err := rlp.DecodeBytes(rlpParams, &btcParams); err != nil {
+ return nil, err
+ }
+ params = &btcParams
+ default:
+ panic("SuperNode is not configured for a specific chain type")
+ }
// ensure that the RPC connection supports subscriptions
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
@@ -85,6 +105,41 @@ func (api *PublicSuperNodeAPI) Stream(ctx context.Context, params shared.Subscri
}
// Node is a public rpc method to allow transformers to fetch the node info for the super node
-func (api *PublicSuperNodeAPI) Node() core.Node {
+// NOTE: this is the node info for the node that the super node is syncing from, not the node info for the super node itself
+func (api *PublicSuperNodeAPI) Node() *core.Node {
return api.sn.Node()
}
+
+// Chain returns the chain type that this super node instance supports
+func (api *PublicSuperNodeAPI) Chain() shared.ChainType {
+ return api.sn.Chain()
+}
+
+// Struct for holding super node meta data
+type InfoAPI struct{}
+
+// NewPublicSuperNodeAPI creates a new PublicSuperNodeAPI with the provided underlying SyncPublishScreenAndServe process
+func NewInfoAPI() *InfoAPI {
+ return &InfoAPI{}
+}
+
+// Modules returns modules supported by this api
+func (iapi *InfoAPI) Modules() map[string]string {
+ return map[string]string{
+ "vdb": "Stream",
+ }
+}
+
+// NodeInfo gathers and returns a collection of metadata for the super node
+func (iapi *InfoAPI) NodeInfo() *p2p.NodeInfo {
+ return &p2p.NodeInfo{
+ // TODO: formalize this
+ ID: "vulcanizeDB",
+ Name: "superNode",
+ }
+}
+
+// Version returns the version of the super node
+func (iapi *InfoAPI) Version() string {
+ return VersionWithMeta
+}
diff --git a/pkg/super_node/backfiller.go b/pkg/super_node/backfiller.go
index 4116e09b..247a30f4 100644
--- a/pkg/super_node/backfiller.go
+++ b/pkg/super_node/backfiller.go
@@ -17,7 +17,7 @@
package super_node
import (
- "errors"
+ "fmt"
"sync"
"sync/atomic"
"time"
@@ -30,13 +30,13 @@ import (
const (
DefaultMaxBatchSize uint64 = 100
- defaultMaxBatchNumber int64 = 10
+ DefaultMaxBatchNumber int64 = 50
)
// BackFillInterface for filling in gaps in the super node
type BackFillInterface interface {
// Method for the super node to periodically check for and fill in gaps in its data using an archival node
- FillGaps(wg *sync.WaitGroup)
+ FillGapsInSuperNode(wg *sync.WaitGroup)
}
// BackFillService for filling in gaps in the super node
@@ -52,17 +52,21 @@ type BackFillService struct {
// Interface for fetching payloads over at historical blocks; over http
Fetcher shared.PayloadFetcher
// Channel for forwarding backfill payloads to the ScreenAndServe process
- ScreenAndServeChan chan shared.StreamedIPLDs
+ ScreenAndServeChan chan shared.ConvertedData
// Check frequency
GapCheckFrequency time.Duration
// Size of batch fetches
BatchSize uint64
+ // Number of goroutines
+ BatchNumber int64
// Channel for receiving quit signal
QuitChan chan bool
+ // Chain type
+ chain shared.ChainType
}
// NewBackFillService returns a new BackFillInterface
-func NewBackFillService(settings *shared.SuperNodeConfig, screenAndServeChan chan shared.StreamedIPLDs) (BackFillInterface, error) {
+func NewBackFillService(settings *Config, screenAndServeChan chan shared.ConvertedData) (BackFillInterface, error) {
publisher, err := NewIPLDPublisher(settings.Chain, settings.IPFSPath)
if err != nil {
return nil, err
@@ -87,6 +91,10 @@ func NewBackFillService(settings *shared.SuperNodeConfig, screenAndServeChan cha
if batchSize == 0 {
batchSize = DefaultMaxBatchSize
}
+ batchNumber := int64(settings.BatchNumber)
+ if batchNumber == 0 {
+ batchNumber = DefaultMaxBatchNumber
+ }
return &BackFillService{
Indexer: indexer,
Converter: converter,
@@ -95,14 +103,15 @@ func NewBackFillService(settings *shared.SuperNodeConfig, screenAndServeChan cha
Fetcher: fetcher,
GapCheckFrequency: settings.Frequency,
BatchSize: batchSize,
+ BatchNumber: int64(batchNumber),
ScreenAndServeChan: screenAndServeChan,
QuitChan: settings.Quit,
+ chain: settings.Chain,
}, nil
}
-// FillGaps periodically checks for and fills in gaps in the super node db
-// this requires a core.RpcClient that is pointed at an archival node with the StateDiffAt method exposed
-func (bfs *BackFillService) FillGaps(wg *sync.WaitGroup) {
+// FillGapsInSuperNode periodically checks for and fills in gaps in the super node db
+func (bfs *BackFillService) FillGapsInSuperNode(wg *sync.WaitGroup) {
ticker := time.NewTicker(bfs.GapCheckFrequency)
wg.Add(1)
@@ -110,60 +119,44 @@ func (bfs *BackFillService) FillGaps(wg *sync.WaitGroup) {
for {
select {
case <-bfs.QuitChan:
- log.Info("quiting FillGaps process")
+ log.Infof("quiting %s FillGapsInSuperNode process", bfs.chain.String())
wg.Done()
return
case <-ticker.C:
- log.Info("searching for gaps in the super node database")
+ log.Infof("searching for gaps in the %s super node database", bfs.chain.String())
startingBlock, err := bfs.Retriever.RetrieveFirstBlockNumber()
if err != nil {
- log.Error(err)
+ log.Errorf("super node db backfill RetrieveFirstBlockNumber error for chain %s: %v", bfs.chain.String(), err)
continue
}
if startingBlock != 0 {
- log.Info("found gap at the beginning of the sync")
- bfs.fillGaps(0, uint64(startingBlock-1))
+ log.Infof("found gap at the beginning of the %s sync", bfs.chain.String())
+ if err := bfs.backFill(0, uint64(startingBlock-1)); err != nil {
+ log.Error(err)
+ }
}
gaps, err := bfs.Retriever.RetrieveGapsInData()
if err != nil {
- log.Error(err)
+ log.Errorf("super node db backfill RetrieveGapsInData error for chain %s: %v", bfs.chain.String(), err)
continue
}
for _, gap := range gaps {
- if err := bfs.fillGaps(gap.Start, gap.Stop); err != nil {
+ if err := bfs.backFill(gap.Start, gap.Stop); err != nil {
log.Error(err)
}
}
}
}
}()
- log.Info("fillGaps goroutine successfully spun up")
-}
-
-func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) error {
- log.Infof("going to fill in gap from %d to %d", startingBlock, endingBlock)
- errChan := make(chan error)
- done := make(chan bool)
- err := bfs.backFill(startingBlock, endingBlock, errChan, done)
- if err != nil {
- return err
- }
- for {
- select {
- case err := <-errChan:
- log.Error(err)
- case <-done:
- log.Infof("finished filling in gap from %d to %d", startingBlock, endingBlock)
- return nil
- }
- }
+ log.Infof("%s fillGaps goroutine successfully spun up", bfs.chain.String())
}
// backFill fetches, processes, and returns utils.StorageDiffs over a range of blocks
// It splits a large range up into smaller chunks, batch fetching and processing those chunks concurrently
-func (bfs *BackFillService) backFill(startingBlock, endingBlock uint64, errChan chan error, done chan bool) error {
+func (bfs *BackFillService) backFill(startingBlock, endingBlock uint64) error {
+ log.Infof("filling in %s gap from %d to %d", bfs.chain.String(), startingBlock, endingBlock)
if endingBlock < startingBlock {
- return errors.New("backfill: ending block number needs to be greater than starting block number")
+ return fmt.Errorf("super node %s db backfill: ending block number needs to be greater than starting block number", bfs.chain.String())
}
//
// break the range up into bins of smaller ranges
@@ -174,28 +167,27 @@ func (bfs *BackFillService) backFill(startingBlock, endingBlock uint64, errChan
// int64 for atomic incrementing and decrementing to track the number of active processing goroutines we have
var activeCount int64
// channel for processing goroutines to signal when they are done
- processingDone := make(chan [2]uint64)
+ processingDone := make(chan bool)
forwardDone := make(chan bool)
- // for each block range bin spin up a goroutine to batch fetch and process state diffs for that range
+ // for each block range bin spin up a goroutine to batch fetch and process data for that range
go func() {
for _, blockHeights := range blockRangeBins {
// if we have reached our limit of active goroutines
// wait for one to finish before starting the next
- if atomic.AddInt64(&activeCount, 1) > defaultMaxBatchNumber {
+ if atomic.AddInt64(&activeCount, 1) > bfs.BatchNumber {
// this blocks until a process signals it has finished
<-forwardDone
}
go func(blockHeights []uint64) {
payloads, err := bfs.Fetcher.FetchAt(blockHeights)
if err != nil {
- errChan <- err
+ log.Errorf("%s super node historical data fetcher error: %s", bfs.chain.String(), err.Error())
}
for _, payload := range payloads {
ipldPayload, err := bfs.Converter.Convert(payload)
if err != nil {
- errChan <- err
- continue
+ log.Errorf("%s super node historical data converter error: %s", bfs.chain.String(), err.Error())
}
// If there is a ScreenAndServe process listening, forward payload to it
select {
@@ -204,42 +196,36 @@ func (bfs *BackFillService) backFill(startingBlock, endingBlock uint64, errChan
}
cidPayload, err := bfs.Publisher.Publish(ipldPayload)
if err != nil {
- errChan <- err
- continue
+ log.Errorf("%s super node historical data publisher error: %s", bfs.chain.String(), err.Error())
}
if err := bfs.Indexer.Index(cidPayload); err != nil {
- errChan <- err
+ log.Errorf("%s super node historical data indexer error: %s", bfs.chain.String(), err.Error())
}
}
// when this goroutine is done, send out a signal
- processingDone <- [2]uint64{blockHeights[0], blockHeights[len(blockHeights)-1]}
+ log.Infof("finished filling in %s gap from %d to %d", bfs.chain.String(), blockHeights[0], blockHeights[len(blockHeights)-1])
+ processingDone <- true
}(blockHeights)
}
}()
- // goroutine that listens on the processingDone chan
+ // listen on the processingDone chan
// keeps track of the number of processing goroutines that have finished
- // when they have all finished, sends the final signal out
- go func() {
- goroutinesFinished := 0
- for {
+ // when they have all finished, return
+ goroutinesFinished := 0
+ for {
+ select {
+ case <-processingDone:
+ atomic.AddInt64(&activeCount, -1)
select {
- case doneWithHeights := <-processingDone:
- atomic.AddInt64(&activeCount, -1)
- select {
- // if we are waiting for a process to finish, signal that one has
- case forwardDone <- true:
- default:
- }
- log.Infof("finished filling in gap sub-bin from %d to %d", doneWithHeights[0], doneWithHeights[1])
- goroutinesFinished++
- if goroutinesFinished >= len(blockRangeBins) {
- done <- true
- return
- }
+ // if we are waiting for a process to finish, signal that one has
+ case forwardDone <- true:
+ default:
+ }
+ goroutinesFinished++
+ if goroutinesFinished >= len(blockRangeBins) {
+ return nil
}
}
- }()
-
- return nil
+ }
}
diff --git a/pkg/super_node/backfiller_test.go b/pkg/super_node/backfiller_test.go
index 833adce8..236357e0 100644
--- a/pkg/super_node/backfiller_test.go
+++ b/pkg/super_node/backfiller_test.go
@@ -41,10 +41,10 @@ var _ = Describe("BackFiller", func() {
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
- ReturnIPLDPayload: []eth.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
+ ReturnIPLDPayload: []eth.ConvertedPayload{mocks.MockConvertedPayload, mocks.MockConvertedPayload},
ReturnErr: nil,
}
- mockRetriever := &mocks2.MockCIDRetriever{
+ mockRetriever := &mocks2.CIDRetriever{
FirstBlockNumberToReturn: 0,
GapsToRetrieve: []shared.Gap{
{
@@ -52,7 +52,7 @@ var _ = Describe("BackFiller", func() {
},
},
}
- mockFetcher := &mocks2.IPLDFetcher{
+ mockFetcher := &mocks2.PayloadFetcher{
PayloadsToReturn: map[uint64]shared.RawChainData{
100: mocks.MockStateDiffPayload,
101: mocks.MockStateDiffPayload,
@@ -67,18 +67,19 @@ var _ = Describe("BackFiller", func() {
Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2,
BatchSize: super_node.DefaultMaxBatchSize,
+ BatchNumber: super_node.DefaultMaxBatchNumber,
QuitChan: quitChan,
}
wg := &sync.WaitGroup{}
- backfiller.FillGaps(wg)
+ backfiller.FillGapsInSuperNode(wg)
time.Sleep(time.Second * 3)
quitChan <- true
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(2))
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
Expect(mockCidRepo.PassedCIDPayload[1]).To(Equal(mocks.MockCIDPayload))
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(2))
- Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockIPLDPayload))
- Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockIPLDPayload))
+ Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockConvertedPayload))
+ Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockConvertedPayload))
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(2))
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
Expect(mockConverter.PassedStatediffPayload[1]).To(Equal(mocks.MockStateDiffPayload))
@@ -96,10 +97,10 @@ var _ = Describe("BackFiller", func() {
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
- ReturnIPLDPayload: []eth.IPLDPayload{mocks.MockIPLDPayload},
+ ReturnIPLDPayload: []eth.ConvertedPayload{mocks.MockConvertedPayload},
ReturnErr: nil,
}
- mockRetriever := &mocks2.MockCIDRetriever{
+ mockRetriever := &mocks2.CIDRetriever{
FirstBlockNumberToReturn: 0,
GapsToRetrieve: []shared.Gap{
{
@@ -107,7 +108,7 @@ var _ = Describe("BackFiller", func() {
},
},
}
- mockFetcher := &mocks2.IPLDFetcher{
+ mockFetcher := &mocks2.PayloadFetcher{
PayloadsToReturn: map[uint64]shared.RawChainData{
100: mocks.MockStateDiffPayload,
},
@@ -121,16 +122,17 @@ var _ = Describe("BackFiller", func() {
Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2,
BatchSize: super_node.DefaultMaxBatchSize,
+ BatchNumber: super_node.DefaultMaxBatchNumber,
QuitChan: quitChan,
}
wg := &sync.WaitGroup{}
- backfiller.FillGaps(wg)
+ backfiller.FillGapsInSuperNode(wg)
time.Sleep(time.Second * 3)
quitChan <- true
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(1))
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(1))
- Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockIPLDPayload))
+ Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockConvertedPayload))
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(1))
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
Expect(mockRetriever.CalledTimes).To(Equal(1))
@@ -147,14 +149,14 @@ var _ = Describe("BackFiller", func() {
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
- ReturnIPLDPayload: []eth.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
+ ReturnIPLDPayload: []eth.ConvertedPayload{mocks.MockConvertedPayload, mocks.MockConvertedPayload},
ReturnErr: nil,
}
- mockRetriever := &mocks2.MockCIDRetriever{
+ mockRetriever := &mocks2.CIDRetriever{
FirstBlockNumberToReturn: 3,
GapsToRetrieve: []shared.Gap{},
}
- mockFetcher := &mocks2.IPLDFetcher{
+ mockFetcher := &mocks2.PayloadFetcher{
PayloadsToReturn: map[uint64]shared.RawChainData{
1: mocks.MockStateDiffPayload,
2: mocks.MockStateDiffPayload,
@@ -169,18 +171,19 @@ var _ = Describe("BackFiller", func() {
Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2,
BatchSize: super_node.DefaultMaxBatchSize,
+ BatchNumber: super_node.DefaultMaxBatchNumber,
QuitChan: quitChan,
}
wg := &sync.WaitGroup{}
- backfiller.FillGaps(wg)
+ backfiller.FillGapsInSuperNode(wg)
time.Sleep(time.Second * 3)
quitChan <- true
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(2))
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
Expect(mockCidRepo.PassedCIDPayload[1]).To(Equal(mocks.MockCIDPayload))
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(2))
- Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockIPLDPayload))
- Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockIPLDPayload))
+ Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockConvertedPayload))
+ Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockConvertedPayload))
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(2))
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
Expect(mockConverter.PassedStatediffPayload[1]).To(Equal(mocks.MockStateDiffPayload))
diff --git a/pkg/super_node/btc/cleaner.go b/pkg/super_node/btc/cleaner.go
new file mode 100644
index 00000000..cf39cd5b
--- /dev/null
+++ b/pkg/super_node/btc/cleaner.go
@@ -0,0 +1,176 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package btc
+
+import (
+ "fmt"
+
+ "github.com/jmoiron/sqlx"
+ "github.com/sirupsen/logrus"
+
+ "github.com/vulcanize/vulcanizedb/pkg/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+)
+
+// Cleaner satisfies the shared.Cleaner interface fo bitcoin
+type Cleaner struct {
+ db *postgres.DB
+}
+
+// NewCleaner returns a new Cleaner struct that satisfies the shared.Cleaner interface
+func NewCleaner(db *postgres.DB) *Cleaner {
+ return &Cleaner{
+ db: db,
+ }
+}
+
+// Clean removes the specified data from the db within the provided block range
+func (c *Cleaner) Clean(rngs [][2]uint64, t shared.DataType) error {
+ tx, err := c.db.Beginx()
+ if err != nil {
+ return err
+ }
+ for _, rng := range rngs {
+ logrus.Infof("btc db cleaner cleaning up block range %d to %d", rng[0], rng[1])
+ if err := c.clean(tx, rng, t); err != nil {
+ if err := tx.Rollback(); err != nil {
+ logrus.Error(err)
+ }
+ return err
+ }
+ }
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+ logrus.Infof("btc db cleaner vacuum analyzing cleaned tables to free up space from deleted rows")
+ return c.vacuumAnalyze(t)
+}
+
+func (c *Cleaner) clean(tx *sqlx.Tx, rng [2]uint64, t shared.DataType) error {
+ switch t {
+ case shared.Full, shared.Headers:
+ return c.cleanFull(tx, rng)
+ case shared.Transactions:
+ if err := c.cleanTransactionIPLDs(tx, rng); err != nil {
+ return err
+ }
+ return c.cleanTransactionMetaData(tx, rng)
+ default:
+ return fmt.Errorf("btc cleaner unrecognized type: %s", t.String())
+ }
+}
+
+func (c *Cleaner) vacuumAnalyze(t shared.DataType) error {
+ switch t {
+ case shared.Full, shared.Headers:
+ if err := c.vacuumHeaders(); err != nil {
+ return err
+ }
+ if err := c.vacuumTxs(); err != nil {
+ return err
+ }
+ if err := c.vacuumTxInputs(); err != nil {
+ return err
+ }
+ if err := c.vacuumTxOutputs(); err != nil {
+ return err
+ }
+ case shared.Transactions:
+ if err := c.vacuumTxs(); err != nil {
+ return err
+ }
+ if err := c.vacuumTxInputs(); err != nil {
+ return err
+ }
+ if err := c.vacuumTxOutputs(); err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("btc cleaner unrecognized type: %s", t.String())
+ }
+ return c.vacuumIPLDs()
+}
+
+func (c *Cleaner) vacuumHeaders() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE btc.header_cids`)
+ return err
+}
+
+func (c *Cleaner) vacuumTxs() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE btc.transaction_cids`)
+ return err
+}
+
+func (c *Cleaner) vacuumTxInputs() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE btc.tx_inputs`)
+ return err
+}
+
+func (c *Cleaner) vacuumTxOutputs() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE btc.tx_outputs`)
+ return err
+}
+
+func (c *Cleaner) vacuumIPLDs() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE public.blocks`)
+ return err
+}
+
+func (c *Cleaner) cleanFull(tx *sqlx.Tx, rng [2]uint64) error {
+ if err := c.cleanTransactionIPLDs(tx, rng); err != nil {
+ return err
+ }
+ if err := c.cleanHeaderIPLDs(tx, rng); err != nil {
+ return err
+ }
+ return c.cleanHeaderMetaData(tx, rng)
+}
+
+func (c *Cleaner) cleanTransactionIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM public.blocks A
+ USING btc.transaction_cids B, btc.header_cids C
+ WHERE A.key = B.cid
+ AND B.header_id = C.id
+ AND C.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanTransactionMetaData(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM btc.transaction_cids A
+ USING btc.header_cids B
+ WHERE A.header_id = B.id
+ AND B.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanHeaderIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM public.blocks A
+ USING btc.header_cids B
+ WHERE A.key = B.cid
+ AND B.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanHeaderMetaData(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM btc.header_cids
+ WHERE block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
diff --git a/pkg/super_node/btc/cleaner_test.go b/pkg/super_node/btc/cleaner_test.go
new file mode 100644
index 00000000..a245a294
--- /dev/null
+++ b/pkg/super_node/btc/cleaner_test.go
@@ -0,0 +1,288 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package btc_test
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/crypto"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/btc"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+)
+
+var (
+ // Block 0
+ // header variables
+ blockHash1 = crypto.Keccak256Hash([]byte{00, 02})
+ blocKNumber1 = big.NewInt(0)
+ headerCid1 = "mockHeader1CID"
+ parentHash = crypto.Keccak256Hash([]byte{00, 01})
+ totalDifficulty = "50000000000000000000"
+ reward = "5000000000000000000"
+ headerModel1 = btc.HeaderModel{
+ BlockHash: blockHash1.String(),
+ BlockNumber: blocKNumber1.String(),
+ ParentHash: parentHash.String(),
+ CID: headerCid1,
+ }
+
+ // tx variables
+ tx1CID = "mockTx1CID"
+ tx2CID = "mockTx2CID"
+ tx1Hash = crypto.Keccak256Hash([]byte{01, 01})
+ tx2Hash = crypto.Keccak256Hash([]byte{01, 02})
+ opHash = crypto.Keccak256Hash([]byte{02, 01})
+ txModels1 = []btc.TxModelWithInsAndOuts{
+ {
+ Index: 0,
+ CID: tx1CID,
+ TxHash: tx1Hash.String(),
+ SegWit: true,
+ TxInputs: []btc.TxInput{
+ {
+ Index: 0,
+ TxWitness: []string{"mockWitness"},
+ SignatureScript: []byte{01},
+ PreviousOutPointIndex: 0,
+ PreviousOutPointHash: opHash.String(),
+ },
+ },
+ TxOutputs: []btc.TxOutput{
+ {
+ Index: 0,
+ Value: 50000000,
+ PkScript: []byte{02},
+ ScriptClass: 0,
+ RequiredSigs: 1,
+ },
+ },
+ },
+ {
+ Index: 1,
+ CID: tx2CID,
+ TxHash: tx2Hash.String(),
+ SegWit: true,
+ },
+ }
+ mockCIDPayload1 = &btc.CIDPayload{
+ HeaderCID: headerModel1,
+ TransactionCIDs: txModels1,
+ }
+
+ // Block 1
+ // header variables
+ blockHash2 = crypto.Keccak256Hash([]byte{00, 03})
+ blocKNumber2 = big.NewInt(1)
+ headerCid2 = "mockHeaderCID2"
+ headerModel2 = btc.HeaderModel{
+ BlockNumber: blocKNumber2.String(),
+ BlockHash: blockHash2.String(),
+ ParentHash: blockHash1.String(),
+ CID: headerCid2,
+ }
+
+ // tx variables
+ tx3CID = "mockTx3CID"
+ tx3Hash = crypto.Keccak256Hash([]byte{01, 03})
+ txModels2 = []btc.TxModelWithInsAndOuts{
+ {
+ Index: 0,
+ CID: tx3CID,
+ TxHash: tx3Hash.String(),
+ SegWit: true,
+ },
+ }
+ mockCIDPayload2 = &btc.CIDPayload{
+ HeaderCID: headerModel2,
+ TransactionCIDs: txModels2,
+ }
+ rngs = [][2]uint64{{0, 1}}
+ cids = []string{
+ headerCid1,
+ headerCid2,
+ tx1CID,
+ tx2CID,
+ tx3CID,
+ }
+ mockData = []byte{'\x01'}
+)
+
+var _ = Describe("Cleaner", func() {
+ var (
+ db *postgres.DB
+ repo *btc.CIDIndexer
+ cleaner *btc.Cleaner
+ )
+ BeforeEach(func() {
+ var err error
+ db, err = shared.SetupDB()
+ Expect(err).ToNot(HaveOccurred())
+ repo = btc.NewCIDIndexer(db)
+ cleaner = btc.NewCleaner(db)
+ })
+
+ Describe("Clean", func() {
+ BeforeEach(func() {
+ err := repo.Index(mockCIDPayload1)
+ Expect(err).ToNot(HaveOccurred())
+ err = repo.Index(mockCIDPayload2)
+ Expect(err).ToNot(HaveOccurred())
+
+ for _, cid := range cids {
+ _, err = db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, cid, mockData)
+ Expect(err).ToNot(HaveOccurred())
+ }
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+ var startingIPFSBlocksCount int
+ pgStr := `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&startingIPFSBlocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var startingTxCount int
+ pgStr = `SELECT COUNT(*) FROM btc.transaction_cids`
+ err = tx.Get(&startingTxCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var startingHeaderCount int
+ pgStr = `SELECT COUNT(*) FROM btc.header_cids`
+ err = tx.Get(&startingHeaderCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(startingIPFSBlocksCount).To(Equal(5))
+ Expect(startingTxCount).To(Equal(3))
+ Expect(startingHeaderCount).To(Equal(2))
+ })
+ AfterEach(func() {
+ btc.TearDownDB(db)
+ })
+ It("Cleans everything", func() {
+ err := cleaner.Clean(rngs, shared.Full)
+ Expect(err).ToNot(HaveOccurred())
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+ var txCount int
+ pgStr := `SELECT COUNT(*) FROM btc.transaction_cids`
+ err = tx.Get(&txCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txInCount int
+ pgStr = `SELECT COUNT(*) FROM btc.tx_inputs`
+ err = tx.Get(&txInCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txOutCount int
+ pgStr = `SELECT COUNT(*) FROM btc.tx_outputs`
+ err = tx.Get(&txOutCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var headerCount int
+ pgStr = `SELECT COUNT(*) FROM btc.header_cids`
+ err = tx.Get(&headerCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var blocksCount int
+ pgStr = `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&blocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(blocksCount).To(Equal(0))
+ Expect(txCount).To(Equal(0))
+ Expect(txInCount).To(Equal(0))
+ Expect(txOutCount).To(Equal(0))
+ Expect(headerCount).To(Equal(0))
+ })
+ It("Cleans headers and all linked data", func() {
+ err := cleaner.Clean(rngs, shared.Headers)
+ Expect(err).ToNot(HaveOccurred())
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+ var txCount int
+ pgStr := `SELECT COUNT(*) FROM btc.transaction_cids`
+ err = tx.Get(&txCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txInCount int
+ pgStr = `SELECT COUNT(*) FROM btc.tx_inputs`
+ err = tx.Get(&txInCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txOutCount int
+ pgStr = `SELECT COUNT(*) FROM btc.tx_outputs`
+ err = tx.Get(&txOutCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var headerCount int
+ pgStr = `SELECT COUNT(*) FROM btc.header_cids`
+ err = tx.Get(&headerCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var blocksCount int
+ pgStr = `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&blocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(blocksCount).To(Equal(0))
+ Expect(txCount).To(Equal(0))
+ Expect(txInCount).To(Equal(0))
+ Expect(txOutCount).To(Equal(0))
+ Expect(headerCount).To(Equal(0))
+ })
+ It("Cleans transactions", func() {
+ err := cleaner.Clean(rngs, shared.Transactions)
+ Expect(err).ToNot(HaveOccurred())
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+ var txCount int
+ pgStr := `SELECT COUNT(*) FROM btc.transaction_cids`
+ err = tx.Get(&txCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txInCount int
+ pgStr = `SELECT COUNT(*) FROM btc.tx_inputs`
+ err = tx.Get(&txInCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txOutCount int
+ pgStr = `SELECT COUNT(*) FROM btc.tx_outputs`
+ err = tx.Get(&txOutCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var headerCount int
+ pgStr = `SELECT COUNT(*) FROM btc.header_cids`
+ err = tx.Get(&headerCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var blocksCount int
+ pgStr = `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&blocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(blocksCount).To(Equal(2))
+ Expect(txCount).To(Equal(0))
+ Expect(txInCount).To(Equal(0))
+ Expect(txOutCount).To(Equal(0))
+ Expect(headerCount).To(Equal(2))
+ })
+ })
+})
diff --git a/pkg/super_node/btc/converter.go b/pkg/super_node/btc/converter.go
index cc10b305..7559fa17 100644
--- a/pkg/super_node/btc/converter.go
+++ b/pkg/super_node/btc/converter.go
@@ -40,7 +40,7 @@ func NewPayloadConverter(chainConfig *chaincfg.Params) *PayloadConverter {
// Convert method is used to convert a bitcoin BlockPayload to an IPLDPayload
// Satisfies the shared.PayloadConverter interface
-func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.StreamedIPLDs, error) {
+func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
btcBlockPayload, ok := payload.(BlockPayload)
if !ok {
return nil, fmt.Errorf("btc converter: expected payload type %T got %T", BlockPayload{}, payload)
@@ -87,7 +87,7 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Streame
}
txMeta[i] = txModel
}
- return IPLDPayload{
+ return ConvertedPayload{
BlockPayload: btcBlockPayload,
TxMetaData: txMeta,
}, nil
diff --git a/pkg/super_node/btc/converter_test.go b/pkg/super_node/btc/converter_test.go
index c5ad628d..c76ad6c6 100644
--- a/pkg/super_node/btc/converter_test.go
+++ b/pkg/super_node/btc/converter_test.go
@@ -31,10 +31,10 @@ var _ = Describe("Converter", func() {
converter := btc.NewPayloadConverter(&chaincfg.MainNetParams)
payload, err := converter.Convert(mocks.MockBlockPayload)
Expect(err).ToNot(HaveOccurred())
- convertedPayload, ok := payload.(btc.IPLDPayload)
+ convertedPayload, ok := payload.(btc.ConvertedPayload)
Expect(ok).To(BeTrue())
- Expect(convertedPayload).To(Equal(mocks.MockIPLDPayload))
- Expect(convertedPayload.Height).To(Equal(mocks.MockBlockHeight))
+ Expect(convertedPayload).To(Equal(mocks.MockConvertedPayload))
+ Expect(convertedPayload.BlockHeight).To(Equal(mocks.MockBlockHeight))
Expect(convertedPayload.Header).To(Equal(&mocks.MockBlock.Header))
Expect(convertedPayload.Txs).To(Equal(mocks.MockTransactions))
Expect(convertedPayload.TxMetaData).To(Equal(mocks.MockTxsMetaData))
diff --git a/pkg/super_node/btc/filterer.go b/pkg/super_node/btc/filterer.go
index b9b6a78d..5fa55d68 100644
--- a/pkg/super_node/btc/filterer.go
+++ b/pkg/super_node/btc/filterer.go
@@ -21,6 +21,10 @@ import (
"fmt"
"math/big"
+ "github.com/multiformats/go-multihash"
+
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
@@ -33,37 +37,45 @@ func NewResponseFilterer() *ResponseFilterer {
}
// Filter is used to filter through btc data to extract and package requested data into a Payload
-func (s *ResponseFilterer) Filter(filter shared.SubscriptionSettings, payload shared.StreamedIPLDs) (shared.ServerResponse, error) {
+func (s *ResponseFilterer) Filter(filter shared.SubscriptionSettings, payload shared.ConvertedData) (shared.IPLDs, error) {
btcFilters, ok := filter.(*SubscriptionSettings)
if !ok {
- return StreamResponse{}, fmt.Errorf("btc filterer expected filter type %T got %T", &SubscriptionSettings{}, filter)
+ return IPLDs{}, fmt.Errorf("btc filterer expected filter type %T got %T", &SubscriptionSettings{}, filter)
}
- btcPayload, ok := payload.(IPLDPayload)
+ btcPayload, ok := payload.(ConvertedPayload)
if !ok {
- return StreamResponse{}, fmt.Errorf("btc filterer expected payload type %T got %T", IPLDPayload{}, payload)
+ return IPLDs{}, fmt.Errorf("btc filterer expected payload type %T got %T", ConvertedPayload{}, payload)
}
- height := int64(btcPayload.Height)
+ height := int64(btcPayload.BlockPayload.BlockHeight)
if checkRange(btcFilters.Start.Int64(), btcFilters.End.Int64(), height) {
- response := new(StreamResponse)
+ response := new(IPLDs)
if err := s.filterHeaders(btcFilters.HeaderFilter, response, btcPayload); err != nil {
- return StreamResponse{}, err
+ return IPLDs{}, err
}
if err := s.filterTransactions(btcFilters.TxFilter, response, btcPayload); err != nil {
- return StreamResponse{}, err
+ return IPLDs{}, err
}
response.BlockNumber = big.NewInt(height)
return *response, nil
}
- return StreamResponse{}, nil
+ return IPLDs{}, nil
}
-func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *StreamResponse, payload IPLDPayload) error {
+func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload ConvertedPayload) error {
if !headerFilter.Off {
headerBuffer := new(bytes.Buffer)
if err := payload.Header.Serialize(headerBuffer); err != nil {
return err
}
- response.SerializedHeaders = append(response.SerializedHeaders, headerBuffer.Bytes())
+ data := headerBuffer.Bytes()
+ cid, err := ipld.RawdataToCid(ipld.MBitcoinHeader, data, multihash.DBL_SHA2_256)
+ if err != nil {
+ return err
+ }
+ response.Header = ipfs.BlockModel{
+ Data: data,
+ CID: cid.String(),
+ }
}
return nil
}
@@ -75,15 +87,24 @@ func checkRange(start, end, actual int64) bool {
return false
}
-func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *StreamResponse, payload IPLDPayload) error {
+func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload ConvertedPayload) error {
if !trxFilter.Off {
+ response.Transactions = make([]ipfs.BlockModel, 0, len(payload.TxMetaData))
for i, txMeta := range payload.TxMetaData {
if checkTransaction(txMeta, trxFilter) {
trxBuffer := new(bytes.Buffer)
if err := payload.Txs[i].MsgTx().Serialize(trxBuffer); err != nil {
return err
}
- response.SerializedTxs = append(response.SerializedTxs, trxBuffer.Bytes())
+ data := trxBuffer.Bytes()
+ cid, err := ipld.RawdataToCid(ipld.MBitcoinTx, data, multihash.DBL_SHA2_256)
+ if err != nil {
+ return err
+ }
+ response.Transactions = append(response.Transactions, ipfs.BlockModel{
+ Data: data,
+ CID: cid.String(),
+ })
}
}
}
diff --git a/pkg/super_node/btc/http_streamer.go b/pkg/super_node/btc/http_streamer.go
index 67dd3261..3cee388a 100644
--- a/pkg/super_node/btc/http_streamer.go
+++ b/pkg/super_node/btc/http_streamer.go
@@ -68,16 +68,16 @@ func (ps *HTTPPayloadStreamer) Stream(payloadChan chan shared.RawChainData) (sha
if bytes.Equal(blockHashBytes, ps.lastHash) {
continue
}
- ps.lastHash = blockHashBytes
block, err := client.GetBlock(blockHash)
if err != nil {
errChan <- err
continue
}
+ ps.lastHash = blockHashBytes
payloadChan <- BlockPayload{
- Header: &block.Header,
- Height: height,
- Txs: msgTxsToUtilTxs(block.Transactions),
+ Header: &block.Header,
+ BlockHeight: height,
+ Txs: msgTxsToUtilTxs(block.Transactions),
}
default:
}
diff --git a/pkg/super_node/btc/ipld_fetcher.go b/pkg/super_node/btc/ipld_fetcher.go
index 633cbf75..c839e031 100644
--- a/pkg/super_node/btc/ipld_fetcher.go
+++ b/pkg/super_node/btc/ipld_fetcher.go
@@ -21,14 +21,13 @@ import (
"errors"
"fmt"
- "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
-
"github.com/ipfs/go-block-format"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
var (
@@ -52,16 +51,16 @@ func NewIPLDFetcher(ipfsPath string) (*IPLDFetcher, error) {
}
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
-func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.FetchedIPLDs, error) {
+func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) {
cidWrapper, ok := cids.(*CIDWrapper)
if !ok {
return nil, fmt.Errorf("btc fetcher: expected cids type %T got %T", &CIDWrapper{}, cids)
}
log.Debug("fetching iplds")
- iplds := new(IPLDWrapper)
+ iplds := IPLDs{}
iplds.BlockNumber = cidWrapper.BlockNumber
var err error
- iplds.Headers, err = f.FetchHeaders(cidWrapper.Headers)
+ iplds.Header, err = f.FetchHeader(cidWrapper.Header)
if err != nil {
return nil, err
}
@@ -73,43 +72,48 @@ func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.FetchedIPLDs, e
}
// FetchHeaders fetches headers
-// It uses the f.fetchBatch method
-func (f *IPLDFetcher) FetchHeaders(cids []HeaderModel) ([]blocks.Block, error) {
- log.Debug("fetching header iplds")
- headerCids := make([]cid.Cid, 0, len(cids))
- for _, c := range cids {
- dc, err := cid.Decode(c.CID)
- if err != nil {
- return nil, err
- }
- headerCids = append(headerCids, dc)
+// It uses the f.fetch method
+func (f *IPLDFetcher) FetchHeader(c HeaderModel) (ipfs.BlockModel, error) {
+ log.Debug("fetching header ipld")
+ dc, err := cid.Decode(c.CID)
+ if err != nil {
+ return ipfs.BlockModel{}, err
}
- headers := f.fetchBatch(headerCids)
- if len(headers) != len(headerCids) {
- log.Errorf("ipfs fetcher: number of header blocks returned (%d) does not match number expected (%d)", len(headers), len(headerCids))
- return headers, errUnexpectedNumberOfIPLDs
+ header, err := f.fetch(dc)
+ if err != nil {
+ return ipfs.BlockModel{}, err
}
- return headers, nil
+ return ipfs.BlockModel{
+ Data: header.RawData(),
+ CID: header.Cid().String(),
+ }, nil
}
// FetchTrxs fetches transactions
// It uses the f.fetchBatch method
-func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]blocks.Block, error) {
+func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching transaction iplds")
- trxCids := make([]cid.Cid, 0, len(cids))
- for _, c := range cids {
+ trxCids := make([]cid.Cid, len(cids))
+ for i, c := range cids {
dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
- trxCids = append(trxCids, dc)
+ trxCids[i] = dc
}
trxs := f.fetchBatch(trxCids)
- if len(trxs) != len(trxCids) {
- log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids))
- return trxs, errUnexpectedNumberOfIPLDs
+ trxIPLDs := make([]ipfs.BlockModel, len(trxs))
+ for i, trx := range trxs {
+ trxIPLDs[i] = ipfs.BlockModel{
+ Data: trx.RawData(),
+ CID: trx.Cid().String(),
+ }
}
- return trxs, nil
+ if len(trxIPLDs) != len(trxCids) {
+ log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids))
+ return trxIPLDs, errUnexpectedNumberOfIPLDs
+ }
+ return trxIPLDs, nil
}
// fetch is used to fetch a single cid
diff --git a/pkg/super_node/btc/ipld_fetcher_test.go b/pkg/super_node/btc/ipld_fetcher_test.go
deleted file mode 100644
index 8dd3c1ae..00000000
--- a/pkg/super_node/btc/ipld_fetcher_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package btc
diff --git a/pkg/super_node/btc/mocks/converter.go b/pkg/super_node/btc/mocks/converter.go
index 8f7f2ccd..5ba7a096 100644
--- a/pkg/super_node/btc/mocks/converter.go
+++ b/pkg/super_node/btc/mocks/converter.go
@@ -26,12 +26,12 @@ import (
// PayloadConverter is the underlying struct for the Converter interface
type PayloadConverter struct {
PassedStatediffPayload btc.BlockPayload
- ReturnIPLDPayload btc.IPLDPayload
+ ReturnIPLDPayload btc.ConvertedPayload
ReturnErr error
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
-func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.StreamedIPLDs, error) {
+func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
stateDiffPayload, ok := payload.(btc.BlockPayload)
if !ok {
return nil, fmt.Errorf("convert expected payload type %T got %T", btc.BlockPayload{}, payload)
@@ -43,13 +43,13 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Streame
// IterativePayloadConverter is the underlying struct for the Converter interface
type IterativePayloadConverter struct {
PassedStatediffPayload []btc.BlockPayload
- ReturnIPLDPayload []btc.IPLDPayload
+ ReturnIPLDPayload []btc.ConvertedPayload
ReturnErr error
iteration int
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
-func (pc *IterativePayloadConverter) Convert(payload shared.RawChainData) (shared.StreamedIPLDs, error) {
+func (pc *IterativePayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
stateDiffPayload, ok := payload.(btc.BlockPayload)
if !ok {
return nil, fmt.Errorf("convert expected payload type %T got %T", btc.BlockPayload{}, payload)
diff --git a/pkg/super_node/btc/mocks/publisher.go b/pkg/super_node/btc/mocks/publisher.go
index dc15fe52..c9a7cc59 100644
--- a/pkg/super_node/btc/mocks/publisher.go
+++ b/pkg/super_node/btc/mocks/publisher.go
@@ -26,16 +26,16 @@ import (
// IPLDPublisher is the underlying struct for the Publisher interface
type IPLDPublisher struct {
- PassedIPLDPayload btc.IPLDPayload
+ PassedIPLDPayload btc.ConvertedPayload
ReturnCIDPayload *btc.CIDPayload
ReturnErr error
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
-func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForIndexing, error) {
- ipldPayload, ok := payload.(btc.IPLDPayload)
+func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
+ ipldPayload, ok := payload.(btc.ConvertedPayload)
if !ok {
- return nil, fmt.Errorf("publish expected payload type %T got %T", &btc.IPLDPayload{}, payload)
+ return nil, fmt.Errorf("publish expected payload type %T got %T", &btc.ConvertedPayload{}, payload)
}
pub.PassedIPLDPayload = ipldPayload
return pub.ReturnCIDPayload, pub.ReturnErr
@@ -43,17 +43,17 @@ func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForI
// IterativeIPLDPublisher is the underlying struct for the Publisher interface; used in testing
type IterativeIPLDPublisher struct {
- PassedIPLDPayload []btc.IPLDPayload
+ PassedIPLDPayload []btc.ConvertedPayload
ReturnCIDPayload []*btc.CIDPayload
ReturnErr error
iteration int
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
-func (pub *IterativeIPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForIndexing, error) {
- ipldPayload, ok := payload.(btc.IPLDPayload)
+func (pub *IterativeIPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
+ ipldPayload, ok := payload.(btc.ConvertedPayload)
if !ok {
- return nil, fmt.Errorf("publish expected payload type %T got %T", &btc.IPLDPayload{}, payload)
+ return nil, fmt.Errorf("publish expected payload type %T got %T", &btc.ConvertedPayload{}, payload)
}
pub.PassedIPLDPayload = append(pub.PassedIPLDPayload, ipldPayload)
if len(pub.ReturnCIDPayload) < pub.iteration+1 {
diff --git a/pkg/super_node/btc/mocks/test_data.go b/pkg/super_node/btc/mocks/test_data.go
index 8e3de6be..0d5645bf 100644
--- a/pkg/super_node/btc/mocks/test_data.go
+++ b/pkg/super_node/btc/mocks/test_data.go
@@ -229,9 +229,9 @@ var (
btcutil.NewTx(MockBlock.Transactions[2]),
}
MockBlockPayload = btc.BlockPayload{
- Header: &MockBlock.Header,
- Txs: MockTransactions,
- Height: MockBlockHeight,
+ Header: &MockBlock.Header,
+ Txs: MockTransactions,
+ BlockHeight: MockBlockHeight,
}
sClass1, addresses1, numOfSigs1, _ = txscript.ExtractPkScriptAddrs([]byte{
0x41, // OP_DATA_65
@@ -677,7 +677,7 @@ var (
Timestamp: MockBlock.Header.Timestamp.UnixNano(),
Bits: MockBlock.Header.Bits,
}
- MockIPLDPayload = btc.IPLDPayload{
+ MockConvertedPayload = btc.ConvertedPayload{
BlockPayload: MockBlockPayload,
TxMetaData: MockTxsMetaData,
}
diff --git a/pkg/super_node/btc/payload_fetcher.go b/pkg/super_node/btc/payload_fetcher.go
index d8290d10..3582bd90 100644
--- a/pkg/super_node/btc/payload_fetcher.go
+++ b/pkg/super_node/btc/payload_fetcher.go
@@ -55,9 +55,9 @@ func (fetcher *PayloadFetcher) FetchAt(blockHeights []uint64) ([]shared.RawChain
return nil, err
}
blockPayloads[i] = BlockPayload{
- Height: int64(height),
- Header: &block.Header,
- Txs: msgTxsToUtilTxs(block.Transactions),
+ BlockHeight: int64(height),
+ Header: &block.Header,
+ Txs: msgTxsToUtilTxs(block.Transactions),
}
}
return blockPayloads, nil
diff --git a/pkg/super_node/btc/payload_fetcher_test.go b/pkg/super_node/btc/payload_fetcher_test.go
deleted file mode 100644
index 8dd3c1ae..00000000
--- a/pkg/super_node/btc/payload_fetcher_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package btc
diff --git a/pkg/super_node/btc/publisher.go b/pkg/super_node/btc/publisher.go
index f010217c..e8ffc051 100644
--- a/pkg/super_node/btc/publisher.go
+++ b/pkg/super_node/btc/publisher.go
@@ -17,22 +17,20 @@
package btc
import (
- "errors"
"fmt"
"strconv"
- "github.com/btcsuite/btcd/wire"
- "github.com/btcsuite/btcutil"
-
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/dag_putters"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
// IPLDPublisher satisfies the IPLDPublisher for ethereum
type IPLDPublisher struct {
- HeaderPutter shared.DagPutter
- TransactionPutter shared.DagPutter
+ HeaderPutter shared.DagPutter
+ TransactionPutter shared.DagPutter
+ TransactionTriePutter shared.DagPutter
}
// NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface
@@ -42,32 +40,38 @@ func NewIPLDPublisher(ipfsPath string) (*IPLDPublisher, error) {
return nil, err
}
return &IPLDPublisher{
- HeaderPutter: dag_putters.NewBtcHeaderDagPutter(node),
- TransactionPutter: dag_putters.NewBtcTxDagPutter(node),
+ HeaderPutter: dag_putters.NewBtcHeaderDagPutter(node),
+ TransactionPutter: dag_putters.NewBtcTxDagPutter(node),
+ TransactionTriePutter: dag_putters.NewBtcTxTrieDagPutter(node),
}, nil
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
-func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForIndexing, error) {
- ipldPayload, ok := payload.(IPLDPayload)
+func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
+ ipldPayload, ok := payload.(ConvertedPayload)
if !ok {
- return nil, fmt.Errorf("eth publisher expected payload type %T got %T", &IPLDPayload{}, payload)
+ return nil, fmt.Errorf("eth publisher expected payload type %T got %T", &ConvertedPayload{}, payload)
+ }
+ // Generate nodes
+ headerNode, txNodes, txTrieNodes, err := ipld.FromHeaderAndTxs(ipldPayload.Header, ipldPayload.Txs)
+ if err != nil {
+ return nil, err
}
// Process and publish headers
- headerCid, err := pub.publishHeader(ipldPayload.Header)
+ headerCid, err := pub.publishHeader(headerNode)
if err != nil {
return nil, err
}
header := HeaderModel{
CID: headerCid,
ParentHash: ipldPayload.Header.PrevBlock.String(),
- BlockNumber: strconv.Itoa(int(ipldPayload.Height)),
+ BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)),
BlockHash: ipldPayload.Header.BlockHash().String(),
Timestamp: ipldPayload.Header.Timestamp.UnixNano(),
Bits: ipldPayload.Header.Bits,
}
// Process and publish transactions
- transactionCids, err := pub.publishTransactions(ipldPayload.Txs, ipldPayload.TxMetaData)
+ transactionCids, err := pub.publishTransactions(txNodes, txTrieNodes, ipldPayload.TxMetaData)
if err != nil {
return nil, err
}
@@ -78,25 +82,22 @@ func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForI
}, nil
}
-func (pub *IPLDPublisher) publishHeader(header *wire.BlockHeader) (string, error) {
- cids, err := pub.HeaderPutter.DagPut(header)
+func (pub *IPLDPublisher) publishHeader(header *ipld.BtcHeader) (string, error) {
+ cid, err := pub.HeaderPutter.DagPut(header)
if err != nil {
return "", err
}
- return cids[0], nil
+ return cid, nil
}
-func (pub *IPLDPublisher) publishTransactions(transactions []*btcutil.Tx, trxMeta []TxModelWithInsAndOuts) ([]TxModelWithInsAndOuts, error) {
- transactionCids, err := pub.TransactionPutter.DagPut(transactions)
- if err != nil {
- return nil, err
- }
- if len(transactionCids) != len(trxMeta) {
- return nil, errors.New("expected one CID for each transaction")
- }
- mappedTrxCids := make([]TxModelWithInsAndOuts, len(transactionCids))
- for i, cid := range transactionCids {
- mappedTrxCids[i] = TxModelWithInsAndOuts{
+func (pub *IPLDPublisher) publishTransactions(transactions []*ipld.BtcTx, txTrie []*ipld.BtcTxTrie, trxMeta []TxModelWithInsAndOuts) ([]TxModelWithInsAndOuts, error) {
+ txCids := make([]TxModelWithInsAndOuts, len(transactions))
+ for i, tx := range transactions {
+ cid, err := pub.TransactionPutter.DagPut(tx)
+ if err != nil {
+ return nil, err
+ }
+ txCids[i] = TxModelWithInsAndOuts{
CID: cid,
Index: trxMeta[i].Index,
TxHash: trxMeta[i].TxHash,
@@ -106,5 +107,11 @@ func (pub *IPLDPublisher) publishTransactions(transactions []*btcutil.Tx, trxMet
TxOutputs: trxMeta[i].TxOutputs,
}
}
- return mappedTrxCids, nil
+ for _, txNode := range txTrie {
+ // We don't do anything with the tx trie cids atm
+ if _, err := pub.TransactionTriePutter.DagPut(txNode); err != nil {
+ return nil, err
+ }
+ }
+ return txCids, nil
}
diff --git a/pkg/super_node/btc/publisher_test.go b/pkg/super_node/btc/publisher_test.go
index fee15d57..5d92286a 100644
--- a/pkg/super_node/btc/publisher_test.go
+++ b/pkg/super_node/btc/publisher_test.go
@@ -17,6 +17,9 @@
package btc_test
import (
+ "bytes"
+
+ "github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -26,25 +29,47 @@ import (
)
var (
- mockHeaderDagPutter *mocks2.DagPutter
- mockTrxDagPutter *mocks2.DagPutter
+ mockHeaderDagPutter *mocks2.MappedDagPutter
+ mockTrxDagPutter *mocks2.MappedDagPutter
+ mockTrxTrieDagPutter *mocks2.DagPutter
)
var _ = Describe("Publisher", func() {
BeforeEach(func() {
- mockHeaderDagPutter = new(mocks2.DagPutter)
- mockTrxDagPutter = new(mocks2.DagPutter)
+ mockHeaderDagPutter = new(mocks2.MappedDagPutter)
+ mockTrxDagPutter = new(mocks2.MappedDagPutter)
+ mockTrxTrieDagPutter = new(mocks2.DagPutter)
})
Describe("Publish", func() {
It("Publishes the passed IPLDPayload objects to IPFS and returns a CIDPayload for indexing", func() {
- mockHeaderDagPutter.CIDsToReturn = []string{"mockHeaderCID"}
- mockTrxDagPutter.CIDsToReturn = []string{"mockTrxCID1", "mockTrxCID2", "mockTrxCID3"}
- publisher := btc.IPLDPublisher{
- HeaderPutter: mockHeaderDagPutter,
- TransactionPutter: mockTrxDagPutter,
+ by := new(bytes.Buffer)
+ err := mocks.MockConvertedPayload.BlockPayload.Header.Serialize(by)
+ Expect(err).ToNot(HaveOccurred())
+ headerBytes := by.Bytes()
+ err = mocks.MockTransactions[0].MsgTx().Serialize(by)
+ Expect(err).ToNot(HaveOccurred())
+ tx1Bytes := by.Bytes()
+ err = mocks.MockTransactions[1].MsgTx().Serialize(by)
+ Expect(err).ToNot(HaveOccurred())
+ tx2Bytes := by.Bytes()
+ err = mocks.MockTransactions[2].MsgTx().Serialize(by)
+ Expect(err).ToNot(HaveOccurred())
+ tx3Bytes := by.Bytes()
+ mockHeaderDagPutter.CIDsToReturn = map[common.Hash]string{
+ common.BytesToHash(headerBytes): "mockHeaderCID",
}
- payload, err := publisher.Publish(mocks.MockIPLDPayload)
+ mockTrxDagPutter.CIDsToReturn = map[common.Hash]string{
+ common.BytesToHash(tx1Bytes): "mockTrxCID1",
+ common.BytesToHash(tx2Bytes): "mockTrxCID2",
+ common.BytesToHash(tx3Bytes): "mockTrxCID3",
+ }
+ publisher := btc.IPLDPublisher{
+ HeaderPutter: mockHeaderDagPutter,
+ TransactionPutter: mockTrxDagPutter,
+ TransactionTriePutter: mockTrxTrieDagPutter,
+ }
+ payload, err := publisher.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
cidPayload, ok := payload.(*btc.CIDPayload)
Expect(ok).To(BeTrue())
diff --git a/pkg/super_node/btc/resolver.go b/pkg/super_node/btc/resolver.go
deleted file mode 100644
index e7788362..00000000
--- a/pkg/super_node/btc/resolver.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package btc
-
-import (
- "fmt"
-
- "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
-
- "github.com/ipfs/go-block-format"
-)
-
-// IPLDResolver satisfies the IPLDResolver interface for bitcoin
-type IPLDResolver struct{}
-
-// NewIPLDResolver returns a pointer to an IPLDResolver which satisfies the IPLDResolver interface
-func NewIPLDResolver() *IPLDResolver {
- return &IPLDResolver{}
-}
-
-// Resolve is the exported method for resolving all of the BTC IPLDs packaged in an IpfsBlockWrapper
-func (eir *IPLDResolver) Resolve(iplds shared.FetchedIPLDs) (shared.ServerResponse, error) {
- ipfsBlocks, ok := iplds.(*IPLDWrapper)
- if !ok {
- return StreamResponse{}, fmt.Errorf("eth resolver expected iplds type %T got %T", &IPLDWrapper{}, iplds)
- }
- return StreamResponse{
- BlockNumber: ipfsBlocks.BlockNumber,
- SerializedHeaders: eir.resolve(ipfsBlocks.Headers),
- SerializedTxs: eir.resolve(ipfsBlocks.Transactions),
- }, nil
-}
-
-func (eir *IPLDResolver) resolve(iplds []blocks.Block) [][]byte {
- rlps := make([][]byte, 0, len(iplds))
- for _, ipld := range iplds {
- rlps = append(rlps, ipld.RawData())
- }
- return rlps
-}
diff --git a/pkg/super_node/btc/retriever.go b/pkg/super_node/btc/retriever.go
index d49bd3c4..864812bd 100644
--- a/pkg/super_node/btc/retriever.go
+++ b/pkg/super_node/btc/retriever.go
@@ -57,7 +57,7 @@ func (ecr *CIDRetriever) RetrieveLastBlockNumber() (int64, error) {
}
// Retrieve is used to retrieve all of the CIDs which conform to the passed StreamFilters
-func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumber int64) (shared.CIDsForFetching, bool, error) {
+func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumber int64) ([]shared.CIDsForFetching, bool, error) {
streamFilter, ok := filter.(*SubscriptionSettings)
if !ok {
return nil, true, fmt.Errorf("btc retriever expected filter type %T got %T", &SubscriptionSettings{}, filter)
@@ -68,38 +68,42 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
return nil, true, err
}
- cw := new(CIDWrapper)
- cw.BlockNumber = big.NewInt(blockNumber)
// Retrieve cached header CIDs
- if !streamFilter.HeaderFilter.Off {
- cw.Headers, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
- if err != nil {
- if err := tx.Rollback(); err != nil {
- log.Error(err)
- }
- log.Error("header cid retrieval error")
- return nil, true, err
+ headers, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
+ if err != nil {
+ if err := tx.Rollback(); err != nil {
+ log.Error(err)
}
+ log.Error("header cid retrieval error")
+ return nil, true, err
}
- // Retrieve cached trx CIDs
- if !streamFilter.TxFilter.Off {
- cw.Transactions, err = ecr.RetrieveTxCIDs(tx, streamFilter.TxFilter, blockNumber)
- if err != nil {
- if err := tx.Rollback(); err != nil {
- log.Error(err)
- }
- log.Error("transaction cid retrieval error")
- return nil, true, err
+ cws := make([]shared.CIDsForFetching, len(headers))
+ empty := true
+ for i, header := range headers {
+ cw := new(CIDWrapper)
+ cw.BlockNumber = big.NewInt(blockNumber)
+ if !streamFilter.HeaderFilter.Off {
+ cw.Header = header
+ empty = false
}
+ // Retrieve cached trx CIDs
+ if !streamFilter.TxFilter.Off {
+ cw.Transactions, err = ecr.RetrieveTxCIDs(tx, streamFilter.TxFilter, header.ID)
+ if err != nil {
+ if err := tx.Rollback(); err != nil {
+ log.Error(err)
+ }
+ log.Error("transaction cid retrieval error")
+ return nil, true, err
+ }
+ if len(cw.Transactions) > 0 {
+ empty = false
+ }
+ }
+ cws[i] = cw
}
- return cw, empty(cw), tx.Commit()
-}
-func empty(cidWrapper *CIDWrapper) bool {
- if len(cidWrapper.Transactions) > 0 || len(cidWrapper.Headers) > 0 {
- return false
- }
- return true
+ return cws, empty, tx.Commit()
}
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
@@ -111,32 +115,10 @@ func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]H
return headers, tx.Select(&headers, pgStr, blockNumber)
}
-/*
-type TxModel struct {
- ID int64 `db:"id"`
- HeaderID int64 `db:"header_id"`
- Index int64 `db:"index"`
- TxHash string `db:"tx_hash"`
- CID string `db:"cid"`
- SegWit bool `db:"segwit"`
- WitnessHash string `db:"witness_hash"`
-}
-// TxFilter contains filter settings for txs
-type TxFilter struct {
- Off bool
- Index int64 // allow filtering by index so that we can filter for only coinbase transactions (index 0) if we want to
- Segwit bool // allow filtering for segwit trxs
- WitnessHashes []string // allow filtering for specific witness hashes
- PkScriptClass uint8 // allow filtering for txs that have at least one tx output with the specified pkscript class
- MultiSig bool // allow filtering for txs that have at least one tx output that requires more than one signature
- Addresses []string // allow filtering for txs that have at least one tx output with at least one of the provided addresses
-}
-*/
-
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
// also returns the ids for the returned transaction cids
-func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, blockNumber int64) ([]TxModel, error) {
- log.Debug("retrieving transaction cids for block ", blockNumber)
+func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID int64) ([]TxModel, error) {
+ log.Debug("retrieving transaction cids for header id ", headerID)
args := make([]interface{}, 0, 3)
results := make([]TxModel, 0)
id := 1
@@ -147,8 +129,8 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, blockNum
WHERE transaction_cids.header_id = header_cids.id
AND tx_inputs.tx_id = transaction_cids.id
AND tx_outputs.tx_id = transaction_cids.id
- AND header_cids.block_number = $%d`, id)
- args = append(args, blockNumber)
+ AND header_cids.id = $%d`, id)
+ args = append(args, headerID)
id++
if txFilter.Segwit {
pgStr += ` AND transaction_cids.segwit = true`
diff --git a/pkg/super_node/btc/retriever_test.go b/pkg/super_node/btc/retriever_test.go
deleted file mode 100644
index 8dd3c1ae..00000000
--- a/pkg/super_node/btc/retriever_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package btc
diff --git a/pkg/super_node/btc/streamer.go b/pkg/super_node/btc/streamer.go
index 8a212e68..9c5f4839 100644
--- a/pkg/super_node/btc/streamer.go
+++ b/pkg/super_node/btc/streamer.go
@@ -49,9 +49,9 @@ func (ps *PayloadStreamer) Stream(payloadChan chan shared.RawChainData) (shared.
// Notification handler for block connections, forwards new block data to the payloadChan
OnFilteredBlockConnected: func(height int32, header *wire.BlockHeader, txs []*btcutil.Tx) {
payloadChan <- BlockPayload{
- Height: int64(height),
- Header: header,
- Txs: txs,
+ BlockHeight: int64(height),
+ Header: header,
+ Txs: txs,
}
},
}
diff --git a/pkg/super_node/btc/streamer_test.go b/pkg/super_node/btc/streamer_test.go
deleted file mode 100644
index 8dd3c1ae..00000000
--- a/pkg/super_node/btc/streamer_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package btc
diff --git a/pkg/super_node/btc/types.go b/pkg/super_node/btc/types.go
index ece70479..292984f3 100644
--- a/pkg/super_node/btc/types.go
+++ b/pkg/super_node/btc/types.go
@@ -17,29 +17,34 @@
package btc
import (
- "encoding/json"
"math/big"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
- "github.com/ipfs/go-block-format"
)
// BlockPayload packages the block and tx data received from block connection notifications
type BlockPayload struct {
- Height int64
- Header *wire.BlockHeader
- Txs []*btcutil.Tx
+ BlockHeight int64
+ Header *wire.BlockHeader
+ Txs []*btcutil.Tx
}
-// IPLDPayload is a custom type which packages raw BTC data for publishing to IPFS and filtering to subscribers
+// ConvertedPayload is a custom type which packages raw BTC data for publishing to IPFS and filtering to subscribers
// Returned by PayloadConverter
// Passed to IPLDPublisher and ResponseFilterer
-type IPLDPayload struct {
+type ConvertedPayload struct {
BlockPayload
TxMetaData []TxModelWithInsAndOuts
}
+// Height satisfies the StreamedIPLDs interface
+func (cp ConvertedPayload) Height() int64 {
+ return cp.BlockPayload.BlockHeight
+}
+
// CIDPayload is a struct to hold all the CIDs and their associated meta data for indexing in Postgres
// Returned by IPLDPublisher
// Passed to CIDIndexer
@@ -53,45 +58,19 @@ type CIDPayload struct {
// Passed to IPLDFetcher
type CIDWrapper struct {
BlockNumber *big.Int
- Headers []HeaderModel
+ Header HeaderModel
Transactions []TxModel
}
-// IPLDWrapper is used to package raw IPLD block data fetched from IPFS
-// Returned by IPLDFetcher
-// Passed to IPLDResolver
-type IPLDWrapper struct {
+// IPLDs is used to package raw IPLD block data fetched from IPFS and returned by the server
+// Returned by IPLDFetcher and ResponseFilterer
+type IPLDs struct {
BlockNumber *big.Int
- Headers []blocks.Block
- Transactions []blocks.Block
+ Header ipfs.BlockModel
+ Transactions []ipfs.BlockModel
}
-// StreamResponse holds the data streamed from the super node eth service to the requesting clients
-// Returned by IPLDResolver and ResponseFilterer
-// Passed to client subscriptions
-type StreamResponse struct {
- BlockNumber *big.Int `json:"blockNumber"`
- SerializedHeaders [][]byte `json:"headerBytes"`
- SerializedTxs [][]byte `json:"transactionBytes"`
-
- encoded []byte
- err error
-}
-
-func (sr *StreamResponse) ensureEncoded() {
- if sr.encoded == nil && sr.err == nil {
- sr.encoded, sr.err = json.Marshal(sr)
- }
-}
-
-// Length to implement Encoder interface for StateDiff
-func (sr *StreamResponse) Length() int {
- sr.ensureEncoded()
- return len(sr.encoded)
-}
-
-// Encode to implement Encoder interface for StateDiff
-func (sr *StreamResponse) Encode() ([]byte, error) {
- sr.ensureEncoded()
- return sr.encoded, sr.err
+// Height satisfies the StreamedIPLDs interface
+func (i IPLDs) Height() int64 {
+ return i.BlockNumber.Int64()
}
diff --git a/pkg/super_node/config.go b/pkg/super_node/config.go
new file mode 100644
index 00000000..e8afe345
--- /dev/null
+++ b/pkg/super_node/config.go
@@ -0,0 +1,194 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package super_node
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/spf13/viper"
+
+ "github.com/vulcanize/vulcanizedb/pkg/config"
+ "github.com/vulcanize/vulcanizedb/pkg/eth/core"
+ "github.com/vulcanize/vulcanizedb/pkg/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+ "github.com/vulcanize/vulcanizedb/utils"
+)
+
+// Env variables
+const (
+ SUPERNODE_CHAIN = "SUPERNODE_CHAIN"
+ SUPERNODE_SYNC = "SUPERNODE_SYNC"
+ SUPERNODE_WORKERS = "SUPERNODE_WORKERS"
+ SUPERNODE_SERVER = "SUPERNODE_SERVER"
+ SUPERNODE_WS_PATH = "SUPERNODE_WS_PATH"
+ SUPERNODE_IPC_PATH = "SUPERNODE_IPC_PATH"
+ SUPERNODE_HTTP_PATH = "SUPERNODE_HTTP_PATH"
+ SUPERNODE_BACKFILL = "SUPERNODE_BACKFILL"
+ SUPERNODE_FREQUENCY = "SUPERNODE_FREQUENCY"
+ SUPERNODE_BATCH_SIZE = "SUPERNODE_BATCH_SIZE"
+ SUPERNODE_BATCH_NUMBER = "SUPERNODE_BATCH_NUMBER"
+)
+
+// Config struct
+type Config struct {
+ // Ubiquitous fields
+ Chain shared.ChainType
+ IPFSPath string
+ DB *postgres.DB
+ DBConfig config.Database
+ Quit chan bool
+ // Server fields
+ Serve bool
+ WSEndpoint string
+ HTTPEndpoint string
+ IPCEndpoint string
+ // Sync params
+ Sync bool
+ Workers int
+ WSClient interface{}
+ NodeInfo core.Node
+ // Backfiller params
+ BackFill bool
+ HTTPClient interface{}
+ Frequency time.Duration
+ BatchSize uint64
+ BatchNumber uint64
+}
+
+// NewSuperNodeConfig is used to initialize a SuperNode config from a .toml file
+// Separate chain supernode instances need to be ran with separate ipfs path in order to avoid lock contention on the ipfs repository lockfile
+func NewSuperNodeConfig() (*Config, error) {
+ c := new(Config)
+ var err error
+
+ viper.BindEnv("superNode.chain", SUPERNODE_CHAIN)
+ viper.BindEnv("superNode.sync", SUPERNODE_SYNC)
+ viper.BindEnv("superNode.workers", SUPERNODE_WORKERS)
+ viper.BindEnv("ethereum.wsPath", shared.ETH_WS_PATH)
+ viper.BindEnv("bitcoin.wsPath", shared.BTC_WS_PATH)
+ viper.BindEnv("superNode.server", SUPERNODE_SERVER)
+ viper.BindEnv("superNode.wsPath", SUPERNODE_WS_PATH)
+ viper.BindEnv("superNode.ipcPath", SUPERNODE_IPC_PATH)
+ viper.BindEnv("superNode.httpPath", SUPERNODE_HTTP_PATH)
+ viper.BindEnv("superNode.backFill", SUPERNODE_BACKFILL)
+
+ chain := viper.GetString("superNode.chain")
+ c.Chain, err = shared.NewChainType(chain)
+ if err != nil {
+ return nil, err
+ }
+
+ c.IPFSPath, err = shared.GetIPFSPath()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Sync = viper.GetBool("superNode.sync")
+ if c.Sync {
+ workers := viper.GetInt("superNode.workers")
+ if workers < 1 {
+ workers = 1
+ }
+ c.Workers = workers
+ switch c.Chain {
+ case shared.Ethereum:
+ ethWS := viper.GetString("ethereum.wsPath")
+ c.NodeInfo, c.WSClient, err = shared.GetEthNodeAndClient(fmt.Sprintf("ws://%s", ethWS))
+ if err != nil {
+ return nil, err
+ }
+ case shared.Bitcoin:
+ btcWS := viper.GetString("bitcoin.wsPath")
+ c.NodeInfo, c.WSClient = shared.GetBtcNodeAndClient(btcWS)
+ }
+ }
+
+ c.Serve = viper.GetBool("superNode.server")
+ if c.Serve {
+ wsPath := viper.GetString("superNode.wsPath")
+ if wsPath == "" {
+ wsPath = "127.0.0.1:8080"
+ }
+ c.WSEndpoint = wsPath
+ ipcPath := viper.GetString("superNode.ipcPath")
+ if ipcPath == "" {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return nil, err
+ }
+ ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
+ }
+ c.IPCEndpoint = ipcPath
+ httpPath := viper.GetString("superNode.httpPath")
+ if httpPath == "" {
+ httpPath = "127.0.0.1:8081"
+ }
+ c.HTTPEndpoint = httpPath
+ }
+
+ c.BackFill = viper.GetBool("superNode.backFill")
+ if c.BackFill {
+ if err := c.BackFillFields(); err != nil {
+ return nil, err
+ }
+ }
+
+ c.DBConfig.Init()
+ db := utils.LoadPostgres(c.DBConfig, c.NodeInfo)
+ c.DB = &db
+ c.Quit = make(chan bool)
+
+ return c, nil
+}
+
+// BackFillFields is used to fill in the BackFill fields of the config
+func (c *Config) BackFillFields() error {
+ var err error
+
+ viper.BindEnv("ethereum.httpPath", shared.ETH_HTTP_PATH)
+ viper.BindEnv("bitcoin.httpPath", shared.BTC_HTTP_PATH)
+ viper.BindEnv("superNode.frequency", SUPERNODE_FREQUENCY)
+ viper.BindEnv("superNode.batchSize", SUPERNODE_BATCH_SIZE)
+ viper.BindEnv("superNode.batchNumber", SUPERNODE_BATCH_NUMBER)
+
+ switch c.Chain {
+ case shared.Ethereum:
+ ethHTTP := viper.GetString("ethereum.httpPath")
+ c.NodeInfo, c.HTTPClient, err = shared.GetEthNodeAndClient(fmt.Sprintf("http://%s", ethHTTP))
+ if err != nil {
+ return err
+ }
+ case shared.Bitcoin:
+ btcHTTP := viper.GetString("bitcoin.httpPath")
+ c.NodeInfo, c.HTTPClient = shared.GetBtcNodeAndClient(btcHTTP)
+ }
+
+ freq := viper.GetInt("superNode.frequency")
+ var frequency time.Duration
+ if freq <= 0 {
+ frequency = time.Second * 30
+ } else {
+ frequency = time.Second * time.Duration(freq)
+ }
+ c.Frequency = frequency
+ c.BatchSize = uint64(viper.GetInt64("superNode.batchSize"))
+ c.BatchNumber = uint64(viper.GetInt64("superNode.batchNumber"))
+ return nil
+}
diff --git a/pkg/super_node/constructors.go b/pkg/super_node/constructors.go
index d737adb0..34ddb93d 100644
--- a/pkg/super_node/constructors.go
+++ b/pkg/super_node/constructors.go
@@ -148,18 +148,6 @@ func NewIPLDPublisher(chain shared.ChainType, ipfsPath string) (shared.IPLDPubli
}
}
-// NewIPLDResolver constructs an IPLDResolver for the provided chain type
-func NewIPLDResolver(chain shared.ChainType) (shared.IPLDResolver, error) {
- switch chain {
- case shared.Ethereum:
- return eth.NewIPLDResolver(), nil
- case shared.Bitcoin:
- return btc.NewIPLDResolver(), nil
- default:
- return nil, fmt.Errorf("invalid chain %s for resolver constructor", chain.String())
- }
-}
-
// NewPublicAPI constructs a PublicAPI for the provided chain type
func NewPublicAPI(chain shared.ChainType, db *postgres.DB, ipfsPath string) (rpc.API, error) {
switch chain {
@@ -178,3 +166,15 @@ func NewPublicAPI(chain shared.ChainType, db *postgres.DB, ipfsPath string) (rpc
return rpc.API{}, fmt.Errorf("invalid chain %s for public api constructor", chain.String())
}
}
+
+// NewCleaner constructs a Cleaner for the provided chain type
+func NewCleaner(chain shared.ChainType, db *postgres.DB) (shared.Cleaner, error) {
+ switch chain {
+ case shared.Ethereum:
+ return eth.NewCleaner(db), nil
+ case shared.Bitcoin:
+ return btc.NewCleaner(db), nil
+ default:
+ return nil, fmt.Errorf("invalid chain %s for cleaner constructor", chain.String())
+ }
+}
diff --git a/pkg/super_node/eth/api.go b/pkg/super_node/eth/api.go
index 7418c4d8..cd80453c 100644
--- a/pkg/super_node/eth/api.go
+++ b/pkg/super_node/eth/api.go
@@ -20,13 +20,14 @@ import (
"context"
"math/big"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/ipfs/go-block-format"
)
// APIName is the namespace for the super node's eth api
@@ -48,7 +49,7 @@ func NewPublicEthAPI(b *Backend) *PublicEthAPI {
// BlockNumber returns the block number of the chain head.
func (pea *PublicEthAPI) BlockNumber() hexutil.Uint64 {
- number, _ := pea.b.retriever.RetrieveLastBlockNumber()
+ number, _ := pea.b.Retriever.RetrieveLastBlockNumber()
return hexutil.Uint64(number)
}
@@ -64,6 +65,7 @@ func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery)
topicStrSets := make([][]string, 4)
for i, topicSet := range crit.Topics {
if i > 3 {
+ // don't allow more than 4 topics
break
}
for _, topic := range topicSet {
@@ -74,20 +76,20 @@ func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery)
Contracts: addrStrs,
Topics: topicStrSets,
}
- tx, err := pea.b.db.Beginx()
+ tx, err := pea.b.DB.Beginx()
if err != nil {
return nil, err
}
// If we have a blockhash to filter on, fire off single retrieval query
if crit.BlockHash != nil {
- rctCIDs, err := pea.b.retriever.RetrieveRctCIDs(tx, filter, 0, crit.BlockHash, nil)
+ rctCIDs, err := pea.b.Retriever.RetrieveRctCIDs(tx, filter, 0, crit.BlockHash, nil)
if err != nil {
return nil, err
}
if err := tx.Commit(); err != nil {
return nil, err
}
- rctIPLDs, err := pea.b.fetcher.FetchRcts(rctCIDs)
+ rctIPLDs, err := pea.b.Fetcher.FetchRcts(rctCIDs)
if err != nil {
return nil, err
}
@@ -98,14 +100,14 @@ func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery)
startingBlock := crit.FromBlock
endingBlock := crit.ToBlock
if startingBlock == nil {
- startingBlockInt, err := pea.b.retriever.RetrieveFirstBlockNumber()
+ startingBlockInt, err := pea.b.Retriever.RetrieveFirstBlockNumber()
if err != nil {
return nil, err
}
startingBlock = big.NewInt(startingBlockInt)
}
if endingBlock == nil {
- endingBlockInt, err := pea.b.retriever.RetrieveLastBlockNumber()
+ endingBlockInt, err := pea.b.Retriever.RetrieveLastBlockNumber()
if err != nil {
return nil, err
}
@@ -115,7 +117,7 @@ func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery)
end := endingBlock.Int64()
allRctCIDs := make([]ReceiptModel, 0)
for i := start; i <= end; i++ {
- rctCIDs, err := pea.b.retriever.RetrieveRctCIDs(tx, filter, i, nil, nil)
+ rctCIDs, err := pea.b.Retriever.RetrieveRctCIDs(tx, filter, i, nil, nil)
if err != nil {
return nil, err
}
@@ -124,7 +126,7 @@ func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery)
if err := tx.Commit(); err != nil {
return nil, err
}
- rctIPLDs, err := pea.b.fetcher.FetchRcts(allRctCIDs)
+ rctIPLDs, err := pea.b.Fetcher.FetchRcts(allRctCIDs)
if err != nil {
return nil, err
}
@@ -174,19 +176,19 @@ func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.H
return nil, err
}
if tx != nil {
- return newRPCTransaction(tx, blockHash, blockNumber, index), nil
+ return NewRPCTransaction(tx, blockHash, blockNumber, index), nil
}
// Transaction unknown, return as such
return nil, nil
}
// extractLogsOfInterest returns logs from the receipt IPLD
-func extractLogsOfInterest(rctIPLDs []blocks.Block, wantedTopics [][]string) ([]*types.Log, error) {
+func extractLogsOfInterest(rctIPLDs []ipfs.BlockModel, wantedTopics [][]string) ([]*types.Log, error) {
var logs []*types.Log
for _, rctIPLD := range rctIPLDs {
- rctRLP := rctIPLD.RawData()
+ rctRLP := rctIPLD
var rct types.Receipt
- if err := rlp.DecodeBytes(rctRLP, &rct); err != nil {
+ if err := rlp.DecodeBytes(rctRLP.Data, &rct); err != nil {
return nil, err
}
for _, log := range rct.Logs {
@@ -201,7 +203,7 @@ func extractLogsOfInterest(rctIPLDs []blocks.Block, wantedTopics [][]string) ([]
// returns true if the log matches on the filter
func wantedLog(wantedTopics [][]string, actualTopics []common.Hash) bool {
// actualTopics will always have length <= 4
- // wantedTopics will always have length == 4
+ // wantedTopics will always have length 4
matches := 0
for i, actualTopic := range actualTopics {
// If we have topics in this filter slot, count as a match if the actualTopic matches one of the ones in this filter slot
@@ -292,7 +294,7 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]i
}
if fullTx {
formatTx = func(tx *types.Transaction) (interface{}, error) {
- return newRPCTransactionFromBlockHash(block, tx.Hash()), nil
+ return NewRPCTransactionFromBlockHash(block, tx.Hash()), nil
}
}
txs := block.Transactions()
@@ -315,8 +317,8 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]i
return fields, nil
}
-// newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation.
-func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransaction {
+// NewRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation.
+func NewRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransaction {
for idx, tx := range b.Transactions() {
if tx.Hash() == hash {
return newRPCTransactionFromBlockIndex(b, uint64(idx))
@@ -331,7 +333,7 @@ func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransacti
if index >= uint64(len(txs)) {
return nil
}
- return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index)
+ return NewRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index)
}
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
@@ -352,9 +354,9 @@ type RPCTransaction struct {
S *hexutil.Big `json:"s"`
}
-// newRPCTransaction returns a transaction that will serialize to the RPC
+// NewRPCTransaction returns a transaction that will serialize to the RPC
// representation, with the given location metadata set (if available).
-func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction {
+func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction {
var signer types.Signer = types.FrontierSigner{}
if tx.Protected() {
signer = types.NewEIP155Signer(tx.ChainId())
@@ -367,7 +369,7 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
Gas: hexutil.Uint64(tx.Gas()),
GasPrice: (*hexutil.Big)(tx.GasPrice()),
Hash: tx.Hash(),
- Input: hexutil.Bytes(tx.Data()),
+ Input: hexutil.Bytes(tx.Data()), // somehow this is ending up `nil`
Nonce: hexutil.Uint64(tx.Nonce()),
To: tx.To(),
Value: (*hexutil.Big)(tx.Value()),
diff --git a/pkg/super_node/eth/api_test.go b/pkg/super_node/eth/api_test.go
new file mode 100644
index 00000000..61915b62
--- /dev/null
+++ b/pkg/super_node/eth/api_test.go
@@ -0,0 +1,588 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth_test
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/rpc"
+
+ "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ mocks3 "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
+ "github.com/vulcanize/vulcanizedb/pkg/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+)
+
+var (
+ expectedBlock = map[string]interface{}{
+ "number": (*hexutil.Big)(mocks.MockBlock.Number()),
+ "hash": mocks.MockBlock.Hash(),
+ "parentHash": mocks.MockBlock.ParentHash(),
+ "nonce": mocks.MockBlock.Header().Nonce,
+ "mixHash": mocks.MockBlock.MixDigest(),
+ "sha3Uncles": mocks.MockBlock.UncleHash(),
+ "logsBloom": mocks.MockBlock.Bloom(),
+ "stateRoot": mocks.MockBlock.Root(),
+ "miner": mocks.MockBlock.Coinbase(),
+ "difficulty": (*hexutil.Big)(mocks.MockBlock.Difficulty()),
+ "extraData": hexutil.Bytes(mocks.MockBlock.Header().Extra),
+ "gasLimit": hexutil.Uint64(mocks.MockBlock.GasLimit()),
+ "gasUsed": hexutil.Uint64(mocks.MockBlock.GasUsed()),
+ "timestamp": hexutil.Uint64(mocks.MockBlock.Time()),
+ "transactionsRoot": mocks.MockBlock.TxHash(),
+ "receiptsRoot": mocks.MockBlock.ReceiptHash(),
+ "totalDifficulty": (*hexutil.Big)(mocks.MockBlock.Difficulty()),
+ "size": hexutil.Uint64(mocks.MockBlock.Size()),
+ }
+ expectedHeader = map[string]interface{}{
+ "number": (*hexutil.Big)(mocks.MockBlock.Header().Number),
+ "hash": mocks.MockBlock.Header().Hash(),
+ "parentHash": mocks.MockBlock.Header().ParentHash,
+ "nonce": mocks.MockBlock.Header().Nonce,
+ "mixHash": mocks.MockBlock.Header().MixDigest,
+ "sha3Uncles": mocks.MockBlock.Header().UncleHash,
+ "logsBloom": mocks.MockBlock.Header().Bloom,
+ "stateRoot": mocks.MockBlock.Header().Root,
+ "miner": mocks.MockBlock.Header().Coinbase,
+ "difficulty": (*hexutil.Big)(mocks.MockBlock.Header().Difficulty),
+ "extraData": hexutil.Bytes(mocks.MockBlock.Header().Extra),
+ "size": hexutil.Uint64(mocks.MockBlock.Header().Size()),
+ "gasLimit": hexutil.Uint64(mocks.MockBlock.Header().GasLimit),
+ "gasUsed": hexutil.Uint64(mocks.MockBlock.Header().GasUsed),
+ "timestamp": hexutil.Uint64(mocks.MockBlock.Header().Time),
+ "transactionsRoot": mocks.MockBlock.Header().TxHash,
+ "receiptsRoot": mocks.MockBlock.Header().ReceiptHash,
+ "totalDifficulty": (*hexutil.Big)(mocks.MockBlock.Header().Difficulty),
+ }
+ expectedTransaction = eth.NewRPCTransaction(mocks.MockTransactions[0], mocks.MockBlock.Hash(), mocks.MockBlock.NumberU64(), 0)
+)
+
+var _ = Describe("API", func() {
+ var (
+ db *postgres.DB
+ retriever *eth.CIDRetriever
+ fetcher *eth.IPLDFetcher
+ indexer *eth.CIDIndexer
+ backend *eth.Backend
+ api *eth.PublicEthAPI
+ )
+ BeforeEach(func() {
+ var err error
+ db, err = shared.SetupDB()
+ Expect(err).ToNot(HaveOccurred())
+ retriever = eth.NewCIDRetriever(db)
+ blocksToReturn := map[cid.Cid]blocks.Block{
+ mocks.HeaderCID: mocks.HeaderIPLD,
+ mocks.Trx1CID: mocks.Trx1IPLD,
+ mocks.Trx2CID: mocks.Trx2IPLD,
+ mocks.Rct1CID: mocks.Rct1IPLD,
+ mocks.Rct2CID: mocks.Rct2IPLD,
+ mocks.State1CID: mocks.State1IPLD,
+ mocks.State2CID: mocks.State2IPLD,
+ mocks.StorageCID: mocks.StorageIPLD,
+ }
+ mockBlockService := &mocks3.MockIPFSBlockService{
+ Blocks: blocksToReturn,
+ }
+ fetcher = ð.IPLDFetcher{
+ BlockService: mockBlockService,
+ }
+ indexer = eth.NewCIDIndexer(db)
+ backend = ð.Backend{
+ Retriever: retriever,
+ Fetcher: fetcher,
+ DB: db,
+ }
+ api = eth.NewPublicEthAPI(backend)
+ err = indexer.Index(mocks.MockCIDPayload)
+ Expect(err).ToNot(HaveOccurred())
+ uncles := mocks.MockBlock.Uncles()
+ uncleHashes := make([]common.Hash, len(uncles))
+ for i, uncle := range uncles {
+ uncleHashes[i] = uncle.Hash()
+ }
+ expectedBlock["uncles"] = uncleHashes
+ })
+ AfterEach(func() {
+ eth.TearDownDB(db)
+ })
+ Describe("BlockNumber", func() {
+ It("Retrieves the head block number", func() {
+ bn := api.BlockNumber()
+ ubn := (uint64)(bn)
+ subn := strconv.FormatUint(ubn, 10)
+ Expect(subn).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockNumber))
+ })
+ })
+
+ Describe("GetTransactionByHash", func() {
+ It("Retrieves the head block number", func() {
+ hash := mocks.MockTransactions[0].Hash()
+ tx, err := api.GetTransactionByHash(context.Background(), hash)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(tx).To(Equal(expectedTransaction))
+ })
+ })
+
+ Describe("GetBlockByNumber", func() {
+ It("Retrieves a block by number", func() {
+ // without full txs
+ number, err := strconv.ParseInt(mocks.MockCIDPayload.HeaderCID.BlockNumber, 10, 64)
+ Expect(err).ToNot(HaveOccurred())
+ block, err := api.GetBlockByNumber(context.Background(), rpc.BlockNumber(number), false)
+ Expect(err).ToNot(HaveOccurred())
+ transactionHashes := make([]interface{}, len(mocks.MockBlock.Transactions()))
+ for i, trx := range mocks.MockBlock.Transactions() {
+ transactionHashes[i] = trx.Hash()
+ }
+ expectedBlock["transactions"] = transactionHashes
+ for key, val := range expectedBlock {
+ Expect(val).To(Equal(block[key]))
+ }
+ // with full txs
+ block, err = api.GetBlockByNumber(context.Background(), rpc.BlockNumber(number), true)
+ Expect(err).ToNot(HaveOccurred())
+ transactions := make([]interface{}, len(mocks.MockBlock.Transactions()))
+ for i, trx := range mocks.MockBlock.Transactions() {
+ transactions[i] = eth.NewRPCTransactionFromBlockHash(mocks.MockBlock, trx.Hash())
+ }
+ expectedBlock["transactions"] = transactions
+ for key, val := range expectedBlock {
+ Expect(val).To(Equal(block[key]))
+ }
+ })
+ })
+
+ Describe("GetHeaderByNumber", func() {
+ It("Retrieves a header by number", func() {
+ number, err := strconv.ParseInt(mocks.MockCIDPayload.HeaderCID.BlockNumber, 10, 64)
+ Expect(err).ToNot(HaveOccurred())
+ header, err := api.GetHeaderByNumber(context.Background(), rpc.BlockNumber(number))
+ Expect(header).To(Equal(expectedHeader))
+ })
+ })
+
+ Describe("GetBlockByHash", func() {
+ It("Retrieves a block by hash", func() {
+ // without full txs
+ block, err := api.GetBlockByHash(context.Background(), mocks.MockBlock.Hash(), false)
+ Expect(err).ToNot(HaveOccurred())
+ transactionHashes := make([]interface{}, len(mocks.MockBlock.Transactions()))
+ for i, trx := range mocks.MockBlock.Transactions() {
+ transactionHashes[i] = trx.Hash()
+ }
+ expectedBlock["transactions"] = transactionHashes
+ for key, val := range expectedBlock {
+ Expect(val).To(Equal(block[key]))
+ }
+ // with full txs
+ block, err = api.GetBlockByHash(context.Background(), mocks.MockBlock.Hash(), true)
+ Expect(err).ToNot(HaveOccurred())
+ transactions := make([]interface{}, len(mocks.MockBlock.Transactions()))
+ for i, trx := range mocks.MockBlock.Transactions() {
+ transactions[i] = eth.NewRPCTransactionFromBlockHash(mocks.MockBlock, trx.Hash())
+ }
+ expectedBlock["transactions"] = transactions
+ for key, val := range expectedBlock {
+ Expect(val).To(Equal(block[key]))
+ }
+ })
+ })
+
+ Describe("GetLogs", func() {
+ It("Retrieves receipt logs that match the provided topcis within the provided range", func() {
+ crit := ethereum.FilterQuery{
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ },
+ },
+ FromBlock: mocks.MockBlock.Number(),
+ ToBlock: mocks.MockBlock.Number(),
+ }
+ logs, err := api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
+
+ crit = ethereum.FilterQuery{
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ common.HexToHash("0x05"),
+ },
+ },
+ FromBlock: mocks.MockBlock.Number(),
+ ToBlock: mocks.MockBlock.Number(),
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(2))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
+
+ crit = ethereum.FilterQuery{
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ common.HexToHash("0x06"),
+ },
+ },
+ FromBlock: mocks.MockBlock.Number(),
+ ToBlock: mocks.MockBlock.Number(),
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
+
+ crit = ethereum.FilterQuery{
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ },
+ {
+ common.HexToHash("0x07"),
+ },
+ },
+ FromBlock: mocks.MockBlock.Number(),
+ ToBlock: mocks.MockBlock.Number(),
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(0))
+
+ crit = ethereum.FilterQuery{
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ },
+ {
+ common.HexToHash("0x06"),
+ },
+ },
+ FromBlock: mocks.MockBlock.Number(),
+ ToBlock: mocks.MockBlock.Number(),
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
+
+ crit = ethereum.FilterQuery{
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x05"),
+ },
+ {
+ common.HexToHash("0x07"),
+ },
+ },
+ FromBlock: mocks.MockBlock.Number(),
+ ToBlock: mocks.MockBlock.Number(),
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
+
+ crit = ethereum.FilterQuery{
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x05"),
+ },
+ {
+ common.HexToHash("0x06"),
+ common.HexToHash("0x07"),
+ },
+ },
+ FromBlock: mocks.MockBlock.Number(),
+ ToBlock: mocks.MockBlock.Number(),
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
+
+ crit = ethereum.FilterQuery{
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ common.HexToHash("0x05"),
+ },
+ {
+ common.HexToHash("0x06"),
+ common.HexToHash("0x07"),
+ },
+ },
+ FromBlock: mocks.MockBlock.Number(),
+ ToBlock: mocks.MockBlock.Number(),
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(2))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
+
+ crit = ethereum.FilterQuery{
+ Topics: [][]common.Hash{
+ {},
+ {
+ common.HexToHash("0x07"),
+ },
+ },
+ FromBlock: mocks.MockBlock.Number(),
+ ToBlock: mocks.MockBlock.Number(),
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
+
+ crit = ethereum.FilterQuery{
+ Topics: [][]common.Hash{
+ {},
+ {
+ common.HexToHash("0x06"),
+ },
+ },
+ FromBlock: mocks.MockBlock.Number(),
+ ToBlock: mocks.MockBlock.Number(),
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
+
+ crit = ethereum.FilterQuery{
+ Topics: [][]common.Hash{},
+ FromBlock: mocks.MockBlock.Number(),
+ ToBlock: mocks.MockBlock.Number(),
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(2))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
+ })
+
+ It("Uses the provided blockhash if one is provided", func() {
+ hash := mocks.MockBlock.Hash()
+ crit := ethereum.FilterQuery{
+ BlockHash: &hash,
+ Topics: [][]common.Hash{
+ {},
+ {
+ common.HexToHash("0x06"),
+ },
+ },
+ }
+ logs, err := api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
+
+ crit = ethereum.FilterQuery{
+ BlockHash: &hash,
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ },
+ {
+ common.HexToHash("0x06"),
+ },
+ },
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
+
+ crit = ethereum.FilterQuery{
+ BlockHash: &hash,
+ Topics: [][]common.Hash{
+ {},
+ {
+ common.HexToHash("0x07"),
+ },
+ },
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
+
+ crit = ethereum.FilterQuery{
+ BlockHash: &hash,
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x05"),
+ },
+ {
+ common.HexToHash("0x07"),
+ },
+ },
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
+
+ crit = ethereum.FilterQuery{
+ BlockHash: &hash,
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ },
+ {
+ common.HexToHash("0x07"),
+ },
+ },
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(0))
+
+ crit = ethereum.FilterQuery{
+ BlockHash: &hash,
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ common.HexToHash("0x05"),
+ },
+ {
+ common.HexToHash("0x07"),
+ },
+ },
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog2}))
+
+ crit = ethereum.FilterQuery{
+ BlockHash: &hash,
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ common.HexToHash("0x05"),
+ },
+ },
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(2))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
+
+ crit = ethereum.FilterQuery{
+ BlockHash: &hash,
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ common.HexToHash("0x05"),
+ },
+ {
+ common.HexToHash("0x06"),
+ common.HexToHash("0x07"),
+ },
+ },
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(2))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
+
+ crit = ethereum.FilterQuery{
+ BlockHash: &hash,
+ Topics: [][]common.Hash{},
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(2))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
+ })
+
+ It("Filters on contract address if any are provided", func() {
+ hash := mocks.MockBlock.Hash()
+ crit := ethereum.FilterQuery{
+ BlockHash: &hash,
+ Addresses: []common.Address{
+ mocks.Address,
+ },
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ common.HexToHash("0x05"),
+ },
+ {
+ common.HexToHash("0x06"),
+ common.HexToHash("0x07"),
+ },
+ },
+ }
+ logs, err := api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(1))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1}))
+
+ hash = mocks.MockBlock.Hash()
+ crit = ethereum.FilterQuery{
+ BlockHash: &hash,
+ Addresses: []common.Address{
+ mocks.Address,
+ mocks.AnotherAddress,
+ },
+ Topics: [][]common.Hash{
+ {
+ common.HexToHash("0x04"),
+ common.HexToHash("0x05"),
+ },
+ {
+ common.HexToHash("0x06"),
+ common.HexToHash("0x07"),
+ },
+ },
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(2))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
+
+ hash = mocks.MockBlock.Hash()
+ crit = ethereum.FilterQuery{
+ BlockHash: &hash,
+ Addresses: []common.Address{
+ mocks.Address,
+ mocks.AnotherAddress,
+ },
+ }
+ logs, err = api.GetLogs(context.Background(), crit)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(logs)).To(Equal(2))
+ Expect(logs).To(Equal([]*types.Log{mocks.MockLog1, mocks.MockLog2}))
+ })
+ })
+})
diff --git a/pkg/super_node/eth/backend.go b/pkg/super_node/eth/backend.go
index 635d5f1e..0b23a0d5 100644
--- a/pkg/super_node/eth/backend.go
+++ b/pkg/super_node/eth/backend.go
@@ -36,9 +36,9 @@ var (
)
type Backend struct {
- retriever *CIDRetriever
- fetcher *IPLDFetcher
- db *postgres.DB
+ Retriever *CIDRetriever
+ Fetcher *IPLDFetcher
+ DB *postgres.DB
}
func NewEthBackend(db *postgres.DB, ipfsPath string) (*Backend, error) {
@@ -48,9 +48,9 @@ func NewEthBackend(db *postgres.DB, ipfsPath string) (*Backend, error) {
return nil, err
}
return &Backend{
- retriever: r,
- fetcher: f,
- db: db,
+ Retriever: r,
+ Fetcher: f,
+ DB: db,
}, nil
}
@@ -58,7 +58,7 @@ func (b *Backend) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumbe
number := blockNumber.Int64()
var err error
if blockNumber == rpc.LatestBlockNumber {
- number, err = b.retriever.RetrieveLastBlockNumber()
+ number, err = b.Retriever.RetrieveLastBlockNumber()
if err != nil {
return nil, err
}
@@ -67,11 +67,11 @@ func (b *Backend) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumbe
return nil, errPendingBlockNumber
}
// Retrieve the CIDs for headers at this height
- tx, err := b.db.Beginx()
+ tx, err := b.DB.Beginx()
if err != nil {
return nil, err
}
- headerCids, err := b.retriever.RetrieveHeaderCIDs(tx, number)
+ headerCids, err := b.Retriever.RetrieveHeaderCIDs(tx, number)
if err != nil {
if err := tx.Rollback(); err != nil {
logrus.Error(err)
@@ -86,26 +86,26 @@ func (b *Backend) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumbe
return nil, fmt.Errorf("header at block %d is not available", number)
}
// Fetch the header IPLDs for those CIDs
- headerIPLDs, err := b.fetcher.FetchHeaders([]HeaderModel{headerCids[0]})
+ headerIPLD, err := b.Fetcher.FetchHeader(headerCids[0])
if err != nil {
return nil, err
}
// Decode the first header at this block height and return it
// We throw an error in FetchHeaders() if the number of headers does not match the number of CIDs and we already
// confirmed the number of CIDs is greater than 0 so there is no need to bound check the slice before accessing
- header := new(types.Header)
- if err := rlp.DecodeBytes(headerIPLDs[0].RawData(), header); err != nil {
+ var header types.Header
+ if err := rlp.DecodeBytes(headerIPLD.Data, &header); err != nil {
return nil, err
}
- return header, nil
+ return &header, nil
}
// GetTd retrieves and returns the total difficulty at the given block hash
func (b *Backend) GetTd(blockHash common.Hash) (*big.Int, error) {
- pgStr := `SELECT header_cids.td FROM header_cids
+ pgStr := `SELECT td FROM eth.header_cids
WHERE header_cids.block_hash = $1`
var tdStr string
- err := b.db.Select(&tdStr, pgStr, blockHash.String())
+ err := b.DB.Get(&tdStr, pgStr, blockHash.String())
if err != nil {
return nil, err
}
@@ -118,11 +118,11 @@ func (b *Backend) GetTd(blockHash common.Hash) (*big.Int, error) {
// GetLogs returns all the logs for the given block hash
func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
- tx, err := b.db.Beginx()
+ tx, err := b.DB.Beginx()
if err != nil {
return nil, err
}
- receiptCIDs, err := b.retriever.RetrieveRctCIDs(tx, ReceiptFilter{}, 0, &hash, nil)
+ receiptCIDs, err := b.Retriever.RetrieveRctCIDs(tx, ReceiptFilter{}, 0, &hash, nil)
if err != nil {
if err := tx.Rollback(); err != nil {
logrus.Error(err)
@@ -135,14 +135,14 @@ func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log
if len(receiptCIDs) == 0 {
return nil, nil
}
- receiptIPLDs, err := b.fetcher.FetchRcts(receiptCIDs)
+ receiptIPLDs, err := b.Fetcher.FetchRcts(receiptCIDs)
if err != nil {
return nil, err
}
logs := make([][]*types.Log, len(receiptIPLDs))
for i, rctIPLD := range receiptIPLDs {
var rct types.Receipt
- if err := rlp.DecodeBytes(rctIPLD.RawData(), &rct); err != nil {
+ if err := rlp.DecodeBytes(rctIPLD.Data, &rct); err != nil {
return nil, err
}
logs[i] = rct.Logs
@@ -157,7 +157,7 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
number := blockNumber.Int64()
var err error
if blockNumber == rpc.LatestBlockNumber {
- number, err = b.retriever.RetrieveLastBlockNumber()
+ number, err = b.Retriever.RetrieveLastBlockNumber()
if err != nil {
return nil, err
}
@@ -166,127 +166,128 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
return nil, errPendingBlockNumber
}
// Retrieve all the CIDs for the block
- headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.retriever.RetrieveBlockByNumber(number)
+ headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByNumber(number)
if err != nil {
return nil, err
}
+
// Fetch and decode the header IPLD
- headerIPLDs, err := b.fetcher.FetchHeaders([]HeaderModel{headerCID})
+ headerIPLD, err := b.Fetcher.FetchHeader(headerCID)
if err != nil {
return nil, err
}
- var header *types.Header
- if err := rlp.DecodeBytes(headerIPLDs[0].RawData(), header); err != nil {
+ var header types.Header
+ if err := rlp.DecodeBytes(headerIPLD.Data, &header); err != nil {
return nil, err
}
// Fetch and decode the uncle IPLDs
- uncleIPLDs, err := b.fetcher.FetchUncles(uncleCIDs)
+ uncleIPLDs, err := b.Fetcher.FetchUncles(uncleCIDs)
if err != nil {
return nil, err
}
var uncles []*types.Header
for _, uncleIPLD := range uncleIPLDs {
- var uncle *types.Header
- if err := rlp.DecodeBytes(uncleIPLD.RawData(), uncle); err != nil {
+ var uncle types.Header
+ if err := rlp.DecodeBytes(uncleIPLD.Data, &uncle); err != nil {
return nil, err
}
- uncles = append(uncles, uncle)
+ uncles = append(uncles, &uncle)
}
// Fetch and decode the transaction IPLDs
- txIPLDs, err := b.fetcher.FetchTrxs(txCIDs)
+ txIPLDs, err := b.Fetcher.FetchTrxs(txCIDs)
if err != nil {
return nil, err
}
var transactions []*types.Transaction
for _, txIPLD := range txIPLDs {
- var tx *types.Transaction
- if err := rlp.DecodeBytes(txIPLD.RawData(), tx); err != nil {
+ var tx types.Transaction
+ if err := rlp.DecodeBytes(txIPLD.Data, &tx); err != nil {
return nil, err
}
- transactions = append(transactions, tx)
+ transactions = append(transactions, &tx)
}
// Fetch and decode the receipt IPLDs
- rctIPLDs, err := b.fetcher.FetchRcts(rctCIDs)
+ rctIPLDs, err := b.Fetcher.FetchRcts(rctCIDs)
if err != nil {
return nil, err
}
var receipts []*types.Receipt
for _, rctIPLD := range rctIPLDs {
- var receipt *types.Receipt
- if err := rlp.DecodeBytes(rctIPLD.RawData(), receipt); err != nil {
+ var receipt types.Receipt
+ if err := rlp.DecodeBytes(rctIPLD.Data, &receipt); err != nil {
return nil, err
}
- receipts = append(receipts, receipt)
+ receipts = append(receipts, &receipt)
}
// Compose everything together into a complete block
- return types.NewBlock(header, transactions, uncles, receipts), nil
+ return types.NewBlock(&header, transactions, uncles, receipts), nil
}
// BlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full
// detail, otherwise only the transaction hash is returned.
func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
// Retrieve all the CIDs for the block
- headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.retriever.RetrieveBlockByHash(hash)
+ headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByHash(hash)
if err != nil {
return nil, err
}
// Fetch and decode the header IPLD
- headerIPLDs, err := b.fetcher.FetchHeaders([]HeaderModel{headerCID})
+ headerIPLD, err := b.Fetcher.FetchHeader(headerCID)
if err != nil {
return nil, err
}
- var header *types.Header
- if err := rlp.DecodeBytes(headerIPLDs[0].RawData(), header); err != nil {
+ var header types.Header
+ if err := rlp.DecodeBytes(headerIPLD.Data, &header); err != nil {
return nil, err
}
// Fetch and decode the uncle IPLDs
- uncleIPLDs, err := b.fetcher.FetchUncles(uncleCIDs)
+ uncleIPLDs, err := b.Fetcher.FetchUncles(uncleCIDs)
if err != nil {
return nil, err
}
var uncles []*types.Header
for _, uncleIPLD := range uncleIPLDs {
- var uncle *types.Header
- if err := rlp.DecodeBytes(uncleIPLD.RawData(), uncle); err != nil {
+ var uncle types.Header
+ if err := rlp.DecodeBytes(uncleIPLD.Data, &uncle); err != nil {
return nil, err
}
- uncles = append(uncles, uncle)
+ uncles = append(uncles, &uncle)
}
// Fetch and decode the transaction IPLDs
- txIPLDs, err := b.fetcher.FetchTrxs(txCIDs)
+ txIPLDs, err := b.Fetcher.FetchTrxs(txCIDs)
if err != nil {
return nil, err
}
var transactions []*types.Transaction
for _, txIPLD := range txIPLDs {
- var tx *types.Transaction
- if err := rlp.DecodeBytes(txIPLD.RawData(), tx); err != nil {
+ var tx types.Transaction
+ if err := rlp.DecodeBytes(txIPLD.Data, &tx); err != nil {
return nil, err
}
- transactions = append(transactions, tx)
+ transactions = append(transactions, &tx)
}
// Fetch and decode the receipt IPLDs
- rctIPLDs, err := b.fetcher.FetchRcts(rctCIDs)
+ rctIPLDs, err := b.Fetcher.FetchRcts(rctCIDs)
if err != nil {
return nil, err
}
var receipts []*types.Receipt
for _, rctIPLD := range rctIPLDs {
- var receipt *types.Receipt
- if err := rlp.DecodeBytes(rctIPLD.RawData(), receipt); err != nil {
+ var receipt types.Receipt
+ if err := rlp.DecodeBytes(rctIPLD.Data, &receipt); err != nil {
return nil, err
}
- receipts = append(receipts, receipt)
+ receipts = append(receipts, &receipt)
}
// Compose everything together into a complete block
- return types.NewBlock(header, transactions, uncles, receipts), nil
+ return types.NewBlock(&header, transactions, uncles, receipts), nil
}
// GetTransaction retrieves a tx by hash
// It also returns the blockhash, blocknumber, and tx index associated with the transaction
func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
pgStr := `SELECT transaction_cids.cid, transaction_cids.index, header_cids.block_hash, header_cids.block_number
- FROM transaction_cids, header_cids
+ FROM eth.transaction_cids, eth.header_cids
WHERE transaction_cids.header_id = header_cids.id
AND transaction_cids.tx_hash = $1`
var txCIDWithHeaderInfo struct {
@@ -295,16 +296,16 @@ func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*type
BlockHash string `db:"block_hash"`
BlockNumber int64 `db:"block_number"`
}
- if err := b.db.Get(&txCIDWithHeaderInfo, pgStr, txHash.String()); err != nil {
+ if err := b.DB.Get(&txCIDWithHeaderInfo, pgStr, txHash.String()); err != nil {
return nil, common.Hash{}, 0, 0, err
}
- txIPLD, err := b.fetcher.FetchTrxs([]TxModel{{CID: txCIDWithHeaderInfo.CID}})
+ txIPLD, err := b.Fetcher.FetchTrxs([]TxModel{{CID: txCIDWithHeaderInfo.CID}})
if err != nil {
return nil, common.Hash{}, 0, 0, err
}
- var transaction *types.Transaction
- if err := rlp.DecodeBytes(txIPLD[0].RawData(), transaction); err != nil {
+ var transaction types.Transaction
+ if err := rlp.DecodeBytes(txIPLD[0].Data, &transaction); err != nil {
return nil, common.Hash{}, 0, 0, err
}
- return transaction, common.HexToHash(txCIDWithHeaderInfo.BlockHash), uint64(txCIDWithHeaderInfo.BlockNumber), uint64(txCIDWithHeaderInfo.Index), nil
+ return &transaction, common.HexToHash(txCIDWithHeaderInfo.BlockHash), uint64(txCIDWithHeaderInfo.BlockNumber), uint64(txCIDWithHeaderInfo.Index), nil
}
diff --git a/pkg/super_node/eth/cleaner.go b/pkg/super_node/eth/cleaner.go
new file mode 100644
index 00000000..c3496504
--- /dev/null
+++ b/pkg/super_node/eth/cleaner.go
@@ -0,0 +1,339 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth
+
+import (
+ "fmt"
+
+ "github.com/jmoiron/sqlx"
+ "github.com/sirupsen/logrus"
+
+ "github.com/vulcanize/vulcanizedb/pkg/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+)
+
+// Cleaner satisfies the shared.Cleaner interface fo ethereum
+type Cleaner struct {
+ db *postgres.DB
+}
+
+// NewCleaner returns a new Cleaner struct that satisfies the shared.Cleaner interface
+func NewCleaner(db *postgres.DB) *Cleaner {
+ return &Cleaner{
+ db: db,
+ }
+}
+
+// Clean removes the specified data from the db within the provided block range
+func (c *Cleaner) Clean(rngs [][2]uint64, t shared.DataType) error {
+ tx, err := c.db.Beginx()
+ if err != nil {
+ return err
+ }
+ for _, rng := range rngs {
+ logrus.Infof("eth db cleaner cleaning up block range %d to %d", rng[0], rng[1])
+ if err := c.clean(tx, rng, t); err != nil {
+ if err := tx.Rollback(); err != nil {
+ logrus.Error(err)
+ }
+ return err
+ }
+ }
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+ logrus.Infof("eth db cleaner vacuum analyzing cleaned tables to free up space from deleted rows")
+ return c.vacuumAnalyze(t)
+}
+
+func (c *Cleaner) clean(tx *sqlx.Tx, rng [2]uint64, t shared.DataType) error {
+ switch t {
+ case shared.Full, shared.Headers:
+ return c.cleanFull(tx, rng)
+ case shared.Uncles:
+ if err := c.cleanUncleIPLDs(tx, rng); err != nil {
+ return err
+ }
+ return c.cleanUncleMetaData(tx, rng)
+ case shared.Transactions:
+ if err := c.cleanReceiptIPLDs(tx, rng); err != nil {
+ return err
+ }
+ if err := c.cleanTransactionIPLDs(tx, rng); err != nil {
+ return err
+ }
+ return c.cleanTransactionMetaData(tx, rng)
+ case shared.Receipts:
+ if err := c.cleanReceiptIPLDs(tx, rng); err != nil {
+ return err
+ }
+ return c.cleanReceiptMetaData(tx, rng)
+ case shared.State:
+ if err := c.cleanStorageIPLDs(tx, rng); err != nil {
+ return err
+ }
+ if err := c.cleanStateIPLDs(tx, rng); err != nil {
+ return err
+ }
+ return c.cleanStateMetaData(tx, rng)
+ case shared.Storage:
+ if err := c.cleanStorageIPLDs(tx, rng); err != nil {
+ return err
+ }
+ return c.cleanStorageMetaData(tx, rng)
+ default:
+ return fmt.Errorf("eth cleaner unrecognized type: %s", t.String())
+ }
+}
+
+func (c *Cleaner) vacuumAnalyze(t shared.DataType) error {
+ switch t {
+ case shared.Full, shared.Headers:
+ return c.vacuumFull()
+ case shared.Uncles:
+ if err := c.vacuumUncles(); err != nil {
+ return err
+ }
+ case shared.Transactions:
+ if err := c.vacuumTxs(); err != nil {
+ return err
+ }
+ if err := c.vacuumRcts(); err != nil {
+ return err
+ }
+ case shared.Receipts:
+ if err := c.vacuumRcts(); err != nil {
+ return err
+ }
+ case shared.State:
+ if err := c.vacuumState(); err != nil {
+ return err
+ }
+ if err := c.vacuumAccounts(); err != nil {
+ return err
+ }
+ if err := c.vacuumStorage(); err != nil {
+ return err
+ }
+ case shared.Storage:
+ if err := c.vacuumStorage(); err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("eth cleaner unrecognized type: %s", t.String())
+ }
+ return c.vacuumIPLDs()
+}
+
+func (c *Cleaner) vacuumFull() error {
+ if err := c.vacuumHeaders(); err != nil {
+ return err
+ }
+ if err := c.vacuumUncles(); err != nil {
+ return err
+ }
+ if err := c.vacuumTxs(); err != nil {
+ return err
+ }
+ if err := c.vacuumRcts(); err != nil {
+ return err
+ }
+ if err := c.vacuumState(); err != nil {
+ return err
+ }
+ if err := c.vacuumAccounts(); err != nil {
+ return err
+ }
+ return c.vacuumStorage()
+}
+
+func (c *Cleaner) vacuumHeaders() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE eth.header_cids`)
+ return err
+}
+
+func (c *Cleaner) vacuumUncles() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE eth.uncle_cids`)
+ return err
+}
+
+func (c *Cleaner) vacuumTxs() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE eth.transaction_cids`)
+ return err
+}
+
+func (c *Cleaner) vacuumRcts() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE eth.receipt_cids`)
+ return err
+}
+
+func (c *Cleaner) vacuumState() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE eth.state_cids`)
+ return err
+}
+
+func (c *Cleaner) vacuumAccounts() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE eth.state_accounts`)
+ return err
+}
+
+func (c *Cleaner) vacuumStorage() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE eth.storage_cids`)
+ return err
+}
+
+func (c *Cleaner) vacuumIPLDs() error {
+ _, err := c.db.Exec(`VACUUM ANALYZE public.blocks`)
+ return err
+}
+
+func (c *Cleaner) cleanFull(tx *sqlx.Tx, rng [2]uint64) error {
+ if err := c.cleanStorageIPLDs(tx, rng); err != nil {
+ return err
+ }
+ if err := c.cleanStateIPLDs(tx, rng); err != nil {
+ return err
+ }
+ if err := c.cleanReceiptIPLDs(tx, rng); err != nil {
+ return err
+ }
+ if err := c.cleanTransactionIPLDs(tx, rng); err != nil {
+ return err
+ }
+ if err := c.cleanUncleIPLDs(tx, rng); err != nil {
+ return err
+ }
+ if err := c.cleanHeaderIPLDs(tx, rng); err != nil {
+ return err
+ }
+ return c.cleanHeaderMetaData(tx, rng)
+}
+
+func (c *Cleaner) cleanStorageIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM public.blocks A
+ USING eth.storage_cids B, eth.state_cids C, eth.header_cids D
+ WHERE A.key = B.cid
+ AND B.state_id = C.id
+ AND C.header_id = D.id
+ AND D.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanStorageMetaData(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM eth.storage_cids A
+ USING eth.state_cids B, eth.header_cids C
+ WHERE A.state_id = B.id
+ AND B.header_id = C.id
+ AND C.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanStateIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM public.blocks A
+ USING eth.state_cids B, eth.header_cids C
+ WHERE A.key = B.cid
+ AND B.header_id = C.id
+ AND C.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanStateMetaData(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM eth.state_cids A
+ USING eth.header_cids B
+ WHERE A.header_id = B.id
+ AND B.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanReceiptIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM public.blocks A
+ USING eth.receipt_cids B, eth.transaction_cids C, eth.header_cids D
+ WHERE A.key = B.cid
+ AND B.tx_id = C.id
+ AND C.header_id = D.id
+ AND D.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanReceiptMetaData(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM eth.receipt_cids A
+ USING eth.transaction_cids B, eth.header_cids C
+ WHERE A.tx_id = B.id
+ AND B.header_id = C.id
+ AND C.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanTransactionIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM public.blocks A
+ USING eth.transaction_cids B, eth.header_cids C
+ WHERE A.key = B.cid
+ AND B.header_id = C.id
+ AND C.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanTransactionMetaData(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM eth.transaction_cids A
+ USING eth.header_cids B
+ WHERE A.header_id = B.id
+ AND B.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanUncleIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM public.blocks A
+ USING eth.uncle_cids B, eth.header_cids C
+ WHERE A.key = B.cid
+ AND B.header_id = C.id
+ AND C.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanUncleMetaData(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM eth.uncle_cids A
+ USING eth.header_cids B
+ WHERE A.header_id = B.id
+ AND B.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanHeaderIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM public.blocks A
+ USING eth.header_cids B
+ WHERE A.key = B.cid
+ AND B.block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
+
+func (c *Cleaner) cleanHeaderMetaData(tx *sqlx.Tx, rng [2]uint64) error {
+ pgStr := `DELETE FROM eth.header_cids
+ WHERE block_number BETWEEN $1 AND $2`
+ _, err := tx.Exec(pgStr, rng[0], rng[1])
+ return err
+}
diff --git a/pkg/super_node/eth/cleaner_test.go b/pkg/super_node/eth/cleaner_test.go
new file mode 100644
index 00000000..351ce77d
--- /dev/null
+++ b/pkg/super_node/eth/cleaner_test.go
@@ -0,0 +1,614 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth_test
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ eth2 "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+)
+
+var (
+ // Block 0
+ // header variables
+ blockHash1 = crypto.Keccak256Hash([]byte{00, 02})
+ blocKNumber1 = big.NewInt(0)
+ headerCID1 = "mockHeader1CID"
+ parentHash = crypto.Keccak256Hash([]byte{00, 01})
+ totalDifficulty = "50000000000000000000"
+ reward = "5000000000000000000"
+ headerModel = eth2.HeaderModel{
+ BlockHash: blockHash1.String(),
+ BlockNumber: blocKNumber1.String(),
+ CID: headerCID1,
+ ParentHash: parentHash.String(),
+ TotalDifficulty: totalDifficulty,
+ Reward: reward,
+ }
+
+ // tx variables
+ tx1CID = "mockTx1CID"
+ tx2CID = "mockTx2CID"
+ tx1Hash = crypto.Keccak256Hash([]byte{01, 01})
+ tx2Hash = crypto.Keccak256Hash([]byte{01, 02})
+ txSrc = common.HexToAddress("0x010a")
+ txDst = common.HexToAddress("0x020a")
+ txModels1 = []eth2.TxModel{
+ {
+ CID: tx1CID,
+ TxHash: tx1Hash.String(),
+ Index: 0,
+ },
+ {
+ CID: tx2CID,
+ TxHash: tx2Hash.String(),
+ Index: 1,
+ },
+ }
+
+ // uncle variables
+ uncleCID = "mockUncle1CID"
+ uncleHash = crypto.Keccak256Hash([]byte{02, 02})
+ uncleParentHash = crypto.Keccak256Hash([]byte{02, 01})
+ uncleReward = "1000000000000000000"
+ uncleModels1 = []eth2.UncleModel{
+ {
+ CID: uncleCID,
+ Reward: uncleReward,
+ BlockHash: uncleHash.String(),
+ ParentHash: uncleParentHash.String(),
+ },
+ }
+
+ // receipt variables
+ rct1CID = "mockRct1CID"
+ rct2CID = "mockRct2CID"
+ rct1Contract = common.Address{}
+ rct2Contract = common.HexToAddress("0x010c")
+ receiptModels1 = map[common.Hash]eth2.ReceiptModel{
+ tx1Hash: {
+ CID: rct1CID,
+ Contract: rct1Contract.String(),
+ },
+ tx2Hash: {
+ CID: rct2CID,
+ Contract: rct2Contract.String(),
+ },
+ }
+
+ // state variables
+ state1CID1 = "mockState1CID1"
+ state1Path = []byte{'\x01'}
+ state1Key = crypto.Keccak256Hash(txSrc.Bytes())
+ state2CID1 = "mockState2CID1"
+ state2Path = []byte{'\x02'}
+ state2Key = crypto.Keccak256Hash(txDst.Bytes())
+ stateModels1 = []eth2.StateNodeModel{
+ {
+ CID: state1CID1,
+ Path: state1Path,
+ NodeType: 2,
+ StateKey: state1Key.String(),
+ },
+ {
+ CID: state2CID1,
+ Path: state2Path,
+ NodeType: 2,
+ StateKey: state2Key.String(),
+ },
+ }
+
+ // storage variables
+ storageCID = "mockStorageCID1"
+ storagePath = []byte{'\x01'}
+ storageKey = crypto.Keccak256Hash(common.Hex2Bytes("0x0000000000000000000000000000000000000000000000000000000000000000"))
+ storageModels1 = map[common.Hash][]eth2.StorageNodeModel{
+ crypto.Keccak256Hash(state1Path): {
+ {
+ CID: storageCID,
+ StorageKey: storageKey.String(),
+ Path: storagePath,
+ NodeType: 2,
+ },
+ },
+ }
+ mockCIDPayload1 = ð.CIDPayload{
+ HeaderCID: headerModel,
+ UncleCIDs: uncleModels1,
+ TransactionCIDs: txModels1,
+ ReceiptCIDs: receiptModels1,
+ StateNodeCIDs: stateModels1,
+ StorageNodeCIDs: storageModels1,
+ }
+
+ // Block 1
+ // header variables
+ blockHash2 = crypto.Keccak256Hash([]byte{00, 03})
+ blocKNumber2 = big.NewInt(1)
+ headerCID2 = "mockHeaderCID2"
+ headerModel2 = eth2.HeaderModel{
+ BlockHash: blockHash2.String(),
+ BlockNumber: blocKNumber2.String(),
+ CID: headerCID2,
+ ParentHash: blockHash1.String(),
+ TotalDifficulty: totalDifficulty,
+ Reward: reward,
+ }
+ // tx variables
+ tx3CID = "mockTx3CID"
+ tx3Hash = crypto.Keccak256Hash([]byte{01, 03})
+ txModels2 = []eth2.TxModel{
+ {
+ CID: tx3CID,
+ TxHash: tx3Hash.String(),
+ Index: 0,
+ },
+ }
+ // receipt variables
+ rct3CID = "mockRct3CID"
+ receiptModels2 = map[common.Hash]eth2.ReceiptModel{
+ tx3Hash: {
+ CID: rct3CID,
+ Contract: rct1Contract.String(),
+ },
+ }
+
+ // state variables
+ state1CID2 = "mockState1CID2"
+ stateModels2 = []eth2.StateNodeModel{
+ {
+ CID: state1CID2,
+ Path: state1Path,
+ NodeType: 2,
+ StateKey: state1Key.String(),
+ },
+ }
+ mockCIDPayload2 = ð.CIDPayload{
+ HeaderCID: headerModel2,
+ TransactionCIDs: txModels2,
+ ReceiptCIDs: receiptModels2,
+ StateNodeCIDs: stateModels2,
+ }
+ rngs = [][2]uint64{{0, 1}}
+ cids = []string{
+ headerCID1,
+ headerCID2,
+ uncleCID,
+ tx1CID,
+ tx2CID,
+ tx3CID,
+ rct1CID,
+ rct2CID,
+ rct3CID,
+ state1CID1,
+ state2CID1,
+ state1CID2,
+ storageCID,
+ }
+ mockData = []byte{'\x01'}
+)
+
+var _ = Describe("Cleaner", func() {
+ var (
+ db *postgres.DB
+ repo *eth2.CIDIndexer
+ cleaner *eth2.Cleaner
+ )
+ BeforeEach(func() {
+ var err error
+ db, err = shared.SetupDB()
+ Expect(err).ToNot(HaveOccurred())
+ repo = eth2.NewCIDIndexer(db)
+ cleaner = eth2.NewCleaner(db)
+ })
+ Describe("Clean", func() {
+ BeforeEach(func() {
+ err := repo.Index(mockCIDPayload1)
+ Expect(err).ToNot(HaveOccurred())
+ err = repo.Index(mockCIDPayload2)
+ Expect(err).ToNot(HaveOccurred())
+
+ for _, cid := range cids {
+ _, err = db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, cid, mockData)
+ Expect(err).ToNot(HaveOccurred())
+ }
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+
+ var startingIPFSBlocksCount int
+ pgStr := `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&startingIPFSBlocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var startingStorageCount int
+ pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
+ err = tx.Get(&startingStorageCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var startingStateCount int
+ pgStr = `SELECT COUNT(*) FROM eth.state_cids`
+ err = tx.Get(&startingStateCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var startingReceiptCount int
+ pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
+ err = tx.Get(&startingReceiptCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var startingTxCount int
+ pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
+ err = tx.Get(&startingTxCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var startingUncleCount int
+ pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
+ err = tx.Get(&startingUncleCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var startingHeaderCount int
+ pgStr = `SELECT COUNT(*) FROM eth.header_cids`
+ err = tx.Get(&startingHeaderCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(startingIPFSBlocksCount).To(Equal(13))
+ Expect(startingStorageCount).To(Equal(1))
+ Expect(startingStateCount).To(Equal(3))
+ Expect(startingReceiptCount).To(Equal(3))
+ Expect(startingTxCount).To(Equal(3))
+ Expect(startingUncleCount).To(Equal(1))
+ Expect(startingHeaderCount).To(Equal(2))
+ })
+ AfterEach(func() {
+ eth.TearDownDB(db)
+ })
+ It("Cleans everything", func() {
+ err := cleaner.Clean(rngs, shared.Full)
+ Expect(err).ToNot(HaveOccurred())
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+
+ pgStr := `SELECT COUNT(*) FROM eth.header_cids`
+ var headerCount int
+ err = tx.Get(&headerCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var uncleCount int
+ pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
+ err = tx.Get(&uncleCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txCount int
+ pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
+ err = tx.Get(&txCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var rctCount int
+ pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
+ err = tx.Get(&rctCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var stateCount int
+ pgStr = `SELECT COUNT(*) FROM eth.state_cids`
+ err = tx.Get(&stateCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var storageCount int
+ pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
+ err = tx.Get(&storageCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var blocksCount int
+ pgStr = `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&blocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(headerCount).To(Equal(0))
+ Expect(uncleCount).To(Equal(0))
+ Expect(txCount).To(Equal(0))
+ Expect(rctCount).To(Equal(0))
+ Expect(stateCount).To(Equal(0))
+ Expect(storageCount).To(Equal(0))
+ Expect(blocksCount).To(Equal(0))
+ })
+ It("Cleans headers and all linked data (same as full)", func() {
+ err := cleaner.Clean(rngs, shared.Headers)
+ Expect(err).ToNot(HaveOccurred())
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+
+ var headerCount int
+ pgStr := `SELECT COUNT(*) FROM eth.header_cids`
+ err = tx.Get(&headerCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var uncleCount int
+ pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
+ err = tx.Get(&uncleCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txCount int
+ pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
+ err = tx.Get(&txCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var rctCount int
+ pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
+ err = tx.Get(&rctCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var stateCount int
+ pgStr = `SELECT COUNT(*) FROM eth.state_cids`
+ err = tx.Get(&stateCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var storageCount int
+ pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
+ err = tx.Get(&storageCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var blocksCount int
+ pgStr = `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&blocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(headerCount).To(Equal(0))
+ Expect(uncleCount).To(Equal(0))
+ Expect(txCount).To(Equal(0))
+ Expect(rctCount).To(Equal(0))
+ Expect(stateCount).To(Equal(0))
+ Expect(storageCount).To(Equal(0))
+ Expect(blocksCount).To(Equal(0))
+ })
+ It("Cleans uncles", func() {
+ err := cleaner.Clean(rngs, shared.Uncles)
+ Expect(err).ToNot(HaveOccurred())
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+
+ var headerCount int
+ pgStr := `SELECT COUNT(*) FROM eth.header_cids`
+ err = tx.Get(&headerCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var uncleCount int
+ pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
+ err = tx.Get(&uncleCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txCount int
+ pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
+ err = tx.Get(&txCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var rctCount int
+ pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
+ err = tx.Get(&rctCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var stateCount int
+ pgStr = `SELECT COUNT(*) FROM eth.state_cids`
+ err = tx.Get(&stateCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var storageCount int
+ pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
+ err = tx.Get(&storageCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var blocksCount int
+ pgStr = `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&blocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(headerCount).To(Equal(2))
+ Expect(uncleCount).To(Equal(0))
+ Expect(txCount).To(Equal(3))
+ Expect(rctCount).To(Equal(3))
+ Expect(stateCount).To(Equal(3))
+ Expect(storageCount).To(Equal(1))
+ Expect(blocksCount).To(Equal(12))
+ })
+ It("Cleans transactions and linked receipts", func() {
+ err := cleaner.Clean(rngs, shared.Transactions)
+ Expect(err).ToNot(HaveOccurred())
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+
+ var headerCount int
+ pgStr := `SELECT COUNT(*) FROM eth.header_cids`
+ err = tx.Get(&headerCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var uncleCount int
+ pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
+ err = tx.Get(&uncleCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txCount int
+ pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
+ err = tx.Get(&txCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var rctCount int
+ pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
+ err = tx.Get(&rctCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var stateCount int
+ pgStr = `SELECT COUNT(*) FROM eth.state_cids`
+ err = tx.Get(&stateCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var storageCount int
+ pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
+ err = tx.Get(&storageCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var blocksCount int
+ pgStr = `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&blocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(headerCount).To(Equal(2))
+ Expect(uncleCount).To(Equal(1))
+ Expect(txCount).To(Equal(0))
+ Expect(rctCount).To(Equal(0))
+ Expect(stateCount).To(Equal(3))
+ Expect(storageCount).To(Equal(1))
+ Expect(blocksCount).To(Equal(7))
+ })
+ It("Cleans receipts", func() {
+ err := cleaner.Clean(rngs, shared.Receipts)
+ Expect(err).ToNot(HaveOccurred())
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+
+ var headerCount int
+ pgStr := `SELECT COUNT(*) FROM eth.header_cids`
+ err = tx.Get(&headerCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var uncleCount int
+ pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
+ err = tx.Get(&uncleCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txCount int
+ pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
+ err = tx.Get(&txCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var rctCount int
+ pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
+ err = tx.Get(&rctCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var stateCount int
+ pgStr = `SELECT COUNT(*) FROM eth.state_cids`
+ err = tx.Get(&stateCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var storageCount int
+ pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
+ err = tx.Get(&storageCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var blocksCount int
+ pgStr = `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&blocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(headerCount).To(Equal(2))
+ Expect(uncleCount).To(Equal(1))
+ Expect(txCount).To(Equal(3))
+ Expect(rctCount).To(Equal(0))
+ Expect(stateCount).To(Equal(3))
+ Expect(storageCount).To(Equal(1))
+ Expect(blocksCount).To(Equal(10))
+ })
+ It("Cleans state and linked storage", func() {
+ err := cleaner.Clean(rngs, shared.State)
+ Expect(err).ToNot(HaveOccurred())
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+
+ var headerCount int
+ pgStr := `SELECT COUNT(*) FROM eth.header_cids`
+ err = tx.Get(&headerCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var uncleCount int
+ pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
+ err = tx.Get(&uncleCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txCount int
+ pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
+ err = tx.Get(&txCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var rctCount int
+ pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
+ err = tx.Get(&rctCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var stateCount int
+ pgStr = `SELECT COUNT(*) FROM eth.state_cids`
+ err = tx.Get(&stateCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var storageCount int
+ pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
+ err = tx.Get(&storageCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var blocksCount int
+ pgStr = `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&blocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(headerCount).To(Equal(2))
+ Expect(uncleCount).To(Equal(1))
+ Expect(txCount).To(Equal(3))
+ Expect(rctCount).To(Equal(3))
+ Expect(stateCount).To(Equal(0))
+ Expect(storageCount).To(Equal(0))
+ Expect(blocksCount).To(Equal(9))
+ })
+ It("Cleans storage", func() {
+ err := cleaner.Clean(rngs, shared.Storage)
+ Expect(err).ToNot(HaveOccurred())
+
+ tx, err := db.Beginx()
+ Expect(err).ToNot(HaveOccurred())
+
+ var headerCount int
+ pgStr := `SELECT COUNT(*) FROM eth.header_cids`
+ err = tx.Get(&headerCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var uncleCount int
+ pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
+ err = tx.Get(&uncleCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var txCount int
+ pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
+ err = tx.Get(&txCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var rctCount int
+ pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
+ err = tx.Get(&rctCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var stateCount int
+ pgStr = `SELECT COUNT(*) FROM eth.state_cids`
+ err = tx.Get(&stateCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var storageCount int
+ pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
+ err = tx.Get(&storageCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+ var blocksCount int
+ pgStr = `SELECT COUNT(*) FROM public.blocks`
+ err = tx.Get(&blocksCount, pgStr)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = tx.Commit()
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(headerCount).To(Equal(2))
+ Expect(uncleCount).To(Equal(1))
+ Expect(txCount).To(Equal(3))
+ Expect(rctCount).To(Equal(3))
+ Expect(stateCount).To(Equal(3))
+ Expect(storageCount).To(Equal(0))
+ Expect(blocksCount).To(Equal(12))
+ })
+ })
+})
diff --git a/pkg/super_node/eth/converter.go b/pkg/super_node/eth/converter.go
index a6c614c6..61a7785b 100644
--- a/pkg/super_node/eth/converter.go
+++ b/pkg/super_node/eth/converter.go
@@ -19,13 +19,14 @@ package eth
import (
"fmt"
- "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
+
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
// PayloadConverter satisfies the PayloadConverter interface for ethereum
@@ -42,7 +43,7 @@ func NewPayloadConverter(chainConfig *params.ChainConfig) *PayloadConverter {
// Convert method is used to convert a eth statediff.Payload to an IPLDPayload
// Satisfies the shared.PayloadConverter interface
-func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.StreamedIPLDs, error) {
+func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
stateDiffPayload, ok := payload.(statediff.Payload)
if !ok {
return nil, fmt.Errorf("eth converter: expected payload type %T got %T", statediff.Payload{}, payload)
@@ -53,7 +54,7 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Streame
return nil, err
}
trxLen := len(block.Transactions())
- convertedPayload := IPLDPayload{
+ convertedPayload := ConvertedPayload{
TotalDifficulty: stateDiffPayload.TotalDifficulty,
Block: block,
TxMetaData: make([]TxModel, 0, trxLen),
@@ -71,8 +72,8 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Streame
return nil, err
}
txMeta := TxModel{
- Dst: handleNullAddr(trx.To()),
- Src: handleNullAddr(&from),
+ Dst: shared.HandleNullAddr(trx.To()),
+ Src: shared.HandleNullAddr(&from),
TxHash: trx.Hash().String(),
Index: int64(i),
}
@@ -123,56 +124,55 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Streame
return nil, err
}
for _, createdAccount := range stateDiff.CreatedAccounts {
- hashKey := common.BytesToHash(createdAccount.Key)
+ statePathHash := crypto.Keccak256Hash(createdAccount.Path)
convertedPayload.StateNodes = append(convertedPayload.StateNodes, TrieNode{
- Key: hashKey,
- Value: createdAccount.Value,
- Leaf: createdAccount.Leaf,
+ Path: createdAccount.Path,
+ Value: createdAccount.NodeValue,
+ Type: createdAccount.NodeType,
+ LeafKey: common.BytesToHash(createdAccount.LeafKey),
})
for _, storageDiff := range createdAccount.Storage {
- convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], TrieNode{
- Key: common.BytesToHash(storageDiff.Key),
- Value: storageDiff.Value,
- Leaf: storageDiff.Leaf,
+ convertedPayload.StorageNodes[statePathHash] = append(convertedPayload.StorageNodes[statePathHash], TrieNode{
+ Path: storageDiff.Path,
+ Value: storageDiff.NodeValue,
+ Type: storageDiff.NodeType,
+ LeafKey: common.BytesToHash(storageDiff.LeafKey),
})
}
}
for _, deletedAccount := range stateDiff.DeletedAccounts {
- hashKey := common.BytesToHash(deletedAccount.Key)
+ statePathHash := crypto.Keccak256Hash(deletedAccount.Path)
convertedPayload.StateNodes = append(convertedPayload.StateNodes, TrieNode{
- Key: hashKey,
- Value: deletedAccount.Value,
- Leaf: deletedAccount.Leaf,
+ Path: deletedAccount.Path,
+ Value: deletedAccount.NodeValue,
+ Type: deletedAccount.NodeType,
+ LeafKey: common.BytesToHash(deletedAccount.LeafKey),
})
for _, storageDiff := range deletedAccount.Storage {
- convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], TrieNode{
- Key: common.BytesToHash(storageDiff.Key),
- Value: storageDiff.Value,
- Leaf: storageDiff.Leaf,
+ convertedPayload.StorageNodes[statePathHash] = append(convertedPayload.StorageNodes[statePathHash], TrieNode{
+ Path: storageDiff.Path,
+ Value: storageDiff.NodeValue,
+ Type: storageDiff.NodeType,
+ LeafKey: common.BytesToHash(storageDiff.LeafKey),
})
}
}
for _, updatedAccount := range stateDiff.UpdatedAccounts {
- hashKey := common.BytesToHash(updatedAccount.Key)
+ statePathHash := crypto.Keccak256Hash(updatedAccount.Path)
convertedPayload.StateNodes = append(convertedPayload.StateNodes, TrieNode{
- Key: hashKey,
- Value: updatedAccount.Value,
- Leaf: updatedAccount.Leaf,
+ Path: updatedAccount.Path,
+ Value: updatedAccount.NodeValue,
+ Type: updatedAccount.NodeType,
+ LeafKey: common.BytesToHash(updatedAccount.LeafKey),
})
for _, storageDiff := range updatedAccount.Storage {
- convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], TrieNode{
- Key: common.BytesToHash(storageDiff.Key),
- Value: storageDiff.Value,
- Leaf: storageDiff.Leaf,
+ convertedPayload.StorageNodes[statePathHash] = append(convertedPayload.StorageNodes[statePathHash], TrieNode{
+ Path: storageDiff.Path,
+ Value: storageDiff.NodeValue,
+ Type: storageDiff.NodeType,
+ LeafKey: common.BytesToHash(storageDiff.LeafKey),
})
}
}
return convertedPayload, nil
}
-
-func handleNullAddr(to *common.Address) string {
- if to == nil {
- return "0x0000000000000000000000000000000000000000000000000000000000000000"
- }
- return to.Hex()
-}
diff --git a/pkg/super_node/eth/converter_test.go b/pkg/super_node/eth/converter_test.go
index a75b4586..6c4f94bf 100644
--- a/pkg/super_node/eth/converter_test.go
+++ b/pkg/super_node/eth/converter_test.go
@@ -32,7 +32,7 @@ var _ = Describe("Converter", func() {
converter := eth.NewPayloadConverter(params.MainnetChainConfig)
payload, err := converter.Convert(mocks.MockStateDiffPayload)
Expect(err).ToNot(HaveOccurred())
- convertedPayload, ok := payload.(eth.IPLDPayload)
+ convertedPayload, ok := payload.(eth.ConvertedPayload)
Expect(ok).To(BeTrue())
Expect(convertedPayload.Block.Number().String()).To(Equal(mocks.BlockNumber.String()))
Expect(convertedPayload.Block.Hash().String()).To(Equal(mocks.MockBlock.Hash().String()))
diff --git a/pkg/super_node/eth/filterer.go b/pkg/super_node/eth/filterer.go
index 639cde50..eb941ae7 100644
--- a/pkg/super_node/eth/filterer.go
+++ b/pkg/super_node/eth/filterer.go
@@ -20,11 +20,16 @@ import (
"bytes"
"fmt"
+ "github.com/ethereum/go-ethereum/statediff"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/multiformats/go-multihash"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
@@ -37,58 +42,70 @@ func NewResponseFilterer() *ResponseFilterer {
}
// Filter is used to filter through eth data to extract and package requested data into a Payload
-func (s *ResponseFilterer) Filter(filter shared.SubscriptionSettings, payload shared.StreamedIPLDs) (shared.ServerResponse, error) {
+func (s *ResponseFilterer) Filter(filter shared.SubscriptionSettings, payload shared.ConvertedData) (shared.IPLDs, error) {
ethFilters, ok := filter.(*SubscriptionSettings)
if !ok {
- return StreamResponse{}, fmt.Errorf("eth filterer expected filter type %T got %T", &SubscriptionSettings{}, filter)
+ return IPLDs{}, fmt.Errorf("eth filterer expected filter type %T got %T", &SubscriptionSettings{}, filter)
}
- ethPayload, ok := payload.(IPLDPayload)
+ ethPayload, ok := payload.(ConvertedPayload)
if !ok {
- return StreamResponse{}, fmt.Errorf("eth filterer expected payload type %T got %T", IPLDPayload{}, payload)
+ return IPLDs{}, fmt.Errorf("eth filterer expected payload type %T got %T", ConvertedPayload{}, payload)
}
if checkRange(ethFilters.Start.Int64(), ethFilters.End.Int64(), ethPayload.Block.Number().Int64()) {
- response := new(StreamResponse)
+ response := new(IPLDs)
+ response.TotalDifficulty = ethPayload.TotalDifficulty
if err := s.filterHeaders(ethFilters.HeaderFilter, response, ethPayload); err != nil {
- return StreamResponse{}, err
+ return IPLDs{}, err
}
txHashes, err := s.filterTransactions(ethFilters.TxFilter, response, ethPayload)
if err != nil {
- return StreamResponse{}, err
+ return IPLDs{}, err
}
var filterTxs []common.Hash
if ethFilters.ReceiptFilter.MatchTxs {
filterTxs = txHashes
}
if err := s.filerReceipts(ethFilters.ReceiptFilter, response, ethPayload, filterTxs); err != nil {
- return StreamResponse{}, err
+ return IPLDs{}, err
}
- if err := s.filterState(ethFilters.StateFilter, response, ethPayload); err != nil {
- return StreamResponse{}, err
- }
- if err := s.filterStorage(ethFilters.StorageFilter, response, ethPayload); err != nil {
- return StreamResponse{}, err
+ if err := s.filterStateAndStorage(ethFilters.StateFilter, ethFilters.StorageFilter, response, ethPayload); err != nil {
+ return IPLDs{}, err
}
response.BlockNumber = ethPayload.Block.Number()
return *response, nil
}
- return StreamResponse{}, nil
+ return IPLDs{}, nil
}
-func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *StreamResponse, payload IPLDPayload) error {
+func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload ConvertedPayload) error {
if !headerFilter.Off {
headerRLP, err := rlp.EncodeToBytes(payload.Block.Header())
if err != nil {
return err
}
- response.HeadersRlp = append(response.HeadersRlp, headerRLP)
+ cid, err := ipld.RawdataToCid(ipld.MEthHeader, headerRLP, multihash.KECCAK_256)
+ if err != nil {
+ return err
+ }
+ response.Header = ipfs.BlockModel{
+ Data: headerRLP,
+ CID: cid.String(),
+ }
if headerFilter.Uncles {
- response.UnclesRlp = make([][]byte, 0, len(payload.Block.Body().Uncles))
- for _, uncle := range payload.Block.Body().Uncles {
+ response.Uncles = make([]ipfs.BlockModel, len(payload.Block.Body().Uncles))
+ for i, uncle := range payload.Block.Body().Uncles {
uncleRlp, err := rlp.EncodeToBytes(uncle)
if err != nil {
return err
}
- response.UnclesRlp = append(response.UnclesRlp, uncleRlp)
+ cid, err := ipld.RawdataToCid(ipld.MEthHeader, uncleRlp, multihash.KECCAK_256)
+ if err != nil {
+ return err
+ }
+ response.Uncles[i] = ipfs.BlockModel{
+ Data: uncleRlp,
+ CID: cid.String(),
+ }
}
}
}
@@ -102,17 +119,29 @@ func checkRange(start, end, actual int64) bool {
return false
}
-func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *StreamResponse, payload IPLDPayload) ([]common.Hash, error) {
- trxHashes := make([]common.Hash, 0, len(payload.Block.Body().Transactions))
+func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload ConvertedPayload) ([]common.Hash, error) {
+ var trxHashes []common.Hash
if !trxFilter.Off {
+ trxLen := len(payload.Block.Body().Transactions)
+ trxHashes = make([]common.Hash, 0, trxLen)
+ response.Transactions = make([]ipfs.BlockModel, 0, trxLen)
for i, trx := range payload.Block.Body().Transactions {
+ // TODO: check if want corresponding receipt and if we do we must include this transaction
if checkTransactionAddrs(trxFilter.Src, trxFilter.Dst, payload.TxMetaData[i].Src, payload.TxMetaData[i].Dst) {
trxBuffer := new(bytes.Buffer)
if err := trx.EncodeRLP(trxBuffer); err != nil {
return nil, err
}
+ data := trxBuffer.Bytes()
+ cid, err := ipld.RawdataToCid(ipld.MEthTx, data, multihash.KECCAK_256)
+ if err != nil {
+ return nil, err
+ }
+ response.Transactions = append(response.Transactions, ipfs.BlockModel{
+ Data: data,
+ CID: cid.String(),
+ })
trxHashes = append(trxHashes, trx.Hash())
- response.TransactionsRlp = append(response.TransactionsRlp, trxBuffer.Bytes())
}
}
}
@@ -138,18 +167,26 @@ func checkTransactionAddrs(wantedSrc, wantedDst []string, actualSrc, actualDst s
return false
}
-func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *StreamResponse, payload IPLDPayload, trxHashes []common.Hash) error {
+func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload ConvertedPayload, trxHashes []common.Hash) error {
if !receiptFilter.Off {
+ response.Receipts = make([]ipfs.BlockModel, 0, len(payload.Receipts))
for i, receipt := range payload.Receipts {
// topics is always length 4
topics := [][]string{payload.ReceiptMetaData[i].Topic0s, payload.ReceiptMetaData[i].Topic1s, payload.ReceiptMetaData[i].Topic2s, payload.ReceiptMetaData[i].Topic3s}
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.Contracts, payload.ReceiptMetaData[i].Contract, trxHashes) {
- receiptForStorage := (*types.ReceiptForStorage)(receipt)
receiptBuffer := new(bytes.Buffer)
- if err := receiptForStorage.EncodeRLP(receiptBuffer); err != nil {
+ if err := receipt.EncodeRLP(receiptBuffer); err != nil {
return err
}
- response.ReceiptsRlp = append(response.ReceiptsRlp, receiptBuffer.Bytes())
+ data := receiptBuffer.Bytes()
+ cid, err := ipld.RawdataToCid(ipld.MEthTxReceipt, data, multihash.KECCAK_256)
+ if err != nil {
+ return err
+ }
+ response.Receipts = append(response.Receipts, ipfs.BlockModel{
+ Data: data,
+ CID: cid.String(),
+ })
}
}
}
@@ -217,17 +254,57 @@ func slicesShareString(slice1, slice2 []string) int {
return 0
}
-func (s *ResponseFilterer) filterState(stateFilter StateFilter, response *StreamResponse, payload IPLDPayload) error {
- if !stateFilter.Off {
- response.StateNodesRlp = make(map[common.Hash][]byte)
- keyFilters := make([]common.Hash, len(stateFilter.Addresses))
- for i, addr := range stateFilter.Addresses {
- keyFilters[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes())
+// filterStateAndStorage filters state and storage nodes into the response according to the provided filters
+func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storageFilter StorageFilter, response *IPLDs, payload ConvertedPayload) error {
+ response.StateNodes = make([]StateNode, 0, len(payload.StateNodes))
+ response.StorageNodes = make([]StorageNode, 0)
+ stateAddressFilters := make([]common.Hash, len(stateFilter.Addresses))
+ for i, addr := range stateFilter.Addresses {
+ stateAddressFilters[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes())
+ }
+ storageAddressFilters := make([]common.Hash, len(storageFilter.Addresses))
+ for i, addr := range storageFilter.Addresses {
+ storageAddressFilters[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes())
+ }
+ storageKeyFilters := make([]common.Hash, len(storageFilter.StorageKeys))
+ for i, store := range storageFilter.StorageKeys {
+ storageKeyFilters[i] = common.HexToHash(store)
+ }
+ for _, stateNode := range payload.StateNodes {
+ if !stateFilter.Off && checkNodeKeys(stateAddressFilters, stateNode.LeafKey) {
+ if stateNode.Type == statediff.Leaf || stateFilter.IntermediateNodes {
+ cid, err := ipld.RawdataToCid(ipld.MEthStateTrie, stateNode.Value, multihash.KECCAK_256)
+ if err != nil {
+ return err
+ }
+ response.StateNodes = append(response.StateNodes, StateNode{
+ StateLeafKey: stateNode.LeafKey,
+ Path: stateNode.Path,
+ IPLD: ipfs.BlockModel{
+ Data: stateNode.Value,
+ CID: cid.String(),
+ },
+ Type: stateNode.Type,
+ })
+ }
}
- for _, stateNode := range payload.StateNodes {
- if checkNodeKeys(keyFilters, stateNode.Key) {
- if stateNode.Leaf || stateFilter.IntermediateNodes {
- response.StateNodesRlp[stateNode.Key] = stateNode.Value
+ if !storageFilter.Off && checkNodeKeys(storageAddressFilters, stateNode.LeafKey) {
+ for _, storageNode := range payload.StorageNodes[crypto.Keccak256Hash(stateNode.Path)] {
+ if checkNodeKeys(storageKeyFilters, storageNode.LeafKey) {
+ cid, err := ipld.RawdataToCid(ipld.MEthStorageTrie, storageNode.Value, multihash.KECCAK_256)
+ if err != nil {
+ return err
+ }
+ response.StorageNodes = append(response.StorageNodes, StorageNode{
+ StateLeafKey: stateNode.LeafKey,
+ StorageLeafKey: storageNode.LeafKey,
+ IPLD: ipfs.BlockModel{
+ Data: storageNode.Value,
+ CID: cid.String(),
+ },
+ Type: storageNode.Type,
+ Path: storageNode.Path,
+ })
}
}
}
@@ -247,28 +324,3 @@ func checkNodeKeys(wantedKeys []common.Hash, actualKey common.Hash) bool {
}
return false
}
-
-func (s *ResponseFilterer) filterStorage(storageFilter StorageFilter, response *StreamResponse, payload IPLDPayload) error {
- if !storageFilter.Off {
- response.StorageNodesRlp = make(map[common.Hash]map[common.Hash][]byte)
- stateKeyFilters := make([]common.Hash, len(storageFilter.Addresses))
- for i, addr := range storageFilter.Addresses {
- stateKeyFilters[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes())
- }
- storageKeyFilters := make([]common.Hash, len(storageFilter.StorageKeys))
- for i, store := range storageFilter.StorageKeys {
- storageKeyFilters[i] = common.HexToHash(store)
- }
- for stateKey, storageNodes := range payload.StorageNodes {
- if checkNodeKeys(stateKeyFilters, stateKey) {
- response.StorageNodesRlp[stateKey] = make(map[common.Hash][]byte)
- for _, storageNode := range storageNodes {
- if checkNodeKeys(storageKeyFilters, storageNode.Key) {
- response.StorageNodesRlp[stateKey][storageNode.Key] = storageNode.Value
- }
- }
- }
- }
- }
- return nil
-}
diff --git a/pkg/super_node/eth/filterer_test.go b/pkg/super_node/eth/filterer_test.go
index 745f1c0a..568f0e2e 100644
--- a/pkg/super_node/eth/filterer_test.go
+++ b/pkg/super_node/eth/filterer_test.go
@@ -19,165 +19,186 @@ package eth_test
import (
"bytes"
- "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/statediff"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
var (
- filterer *eth.ResponseFilterer
- expectedRctForStorageRLP1 []byte
- expectedRctForStorageRLP2 []byte
+ filterer *eth.ResponseFilterer
)
var _ = Describe("Filterer", func() {
Describe("FilterResponse", func() {
BeforeEach(func() {
filterer = eth.NewResponseFilterer()
- expectedRctForStorageRLP1 = getReceiptForStorageRLP(mocks.MockReceipts, 0)
- expectedRctForStorageRLP2 = getReceiptForStorageRLP(mocks.MockReceipts, 1)
})
It("Transcribes all the data from the IPLDPayload into the StreamPayload if given an open filter", func() {
- payload, err := filterer.Filter(openFilter, mocks.MockIPLDPayload)
+ payload, err := filterer.Filter(openFilter, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
- superNodePayload, ok := payload.(eth.StreamResponse)
+ iplds, ok := payload.(eth.IPLDs)
Expect(ok).To(BeTrue())
- Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
- Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeedNodePayload.HeadersRlp))
- var unclesRlp [][]byte
- Expect(superNodePayload.UnclesRlp).To(Equal(unclesRlp))
- Expect(len(superNodePayload.TransactionsRlp)).To(Equal(2))
- Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
- Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
- Expect(len(superNodePayload.ReceiptsRlp)).To(Equal(2))
- Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
- Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
- Expect(len(superNodePayload.StateNodesRlp)).To(Equal(2))
- Expect(superNodePayload.StateNodesRlp[mocks.ContractLeafKey]).To(Equal(mocks.ValueBytes))
- Expect(superNodePayload.StateNodesRlp[mocks.AnotherContractLeafKey]).To(Equal(mocks.AnotherValueBytes))
- Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeedNodePayload.StorageNodesRlp))
+ Expect(iplds.BlockNumber.Int64()).To(Equal(mocks.MockIPLDs.BlockNumber.Int64()))
+ Expect(iplds.Header).To(Equal(mocks.MockIPLDs.Header))
+ var expectedEmptyUncles []ipfs.BlockModel
+ Expect(iplds.Uncles).To(Equal(expectedEmptyUncles))
+ Expect(len(iplds.Transactions)).To(Equal(2))
+ Expect(shared.IPLDsContainBytes(iplds.Transactions, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
+ Expect(shared.IPLDsContainBytes(iplds.Transactions, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
+ Expect(len(iplds.Receipts)).To(Equal(2))
+ Expect(shared.IPLDsContainBytes(iplds.Receipts, mocks.MockReceipts.GetRlp(0))).To(BeTrue())
+ Expect(shared.IPLDsContainBytes(iplds.Receipts, mocks.MockReceipts.GetRlp(1))).To(BeTrue())
+ Expect(len(iplds.StateNodes)).To(Equal(2))
+ for _, stateNode := range iplds.StateNodes {
+ Expect(stateNode.Type).To(Equal(statediff.Leaf))
+ if bytes.Equal(stateNode.StateLeafKey.Bytes(), mocks.AccountLeafKey) {
+ Expect(stateNode.IPLD).To(Equal(ipfs.BlockModel{
+ Data: mocks.State2IPLD.RawData(),
+ CID: mocks.State2IPLD.Cid().String(),
+ }))
+ }
+ if bytes.Equal(stateNode.StateLeafKey.Bytes(), mocks.ContractLeafKey) {
+ Expect(stateNode.IPLD).To(Equal(ipfs.BlockModel{
+ Data: mocks.State1IPLD.RawData(),
+ CID: mocks.State1IPLD.Cid().String(),
+ }))
+ }
+ }
+ Expect(iplds.StorageNodes).To(Equal(mocks.MockIPLDs.StorageNodes))
})
It("Applies filters from the provided config.Subscription", func() {
- payload1, err := filterer.Filter(rctContractFilter, mocks.MockIPLDPayload)
+ payload1, err := filterer.Filter(rctContractFilter, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
- superNodePayload1, ok := payload1.(eth.StreamResponse)
+ iplds1, ok := payload1.(eth.IPLDs)
Expect(ok).To(BeTrue())
- Expect(superNodePayload1.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
- Expect(len(superNodePayload1.HeadersRlp)).To(Equal(0))
- Expect(len(superNodePayload1.UnclesRlp)).To(Equal(0))
- Expect(len(superNodePayload1.TransactionsRlp)).To(Equal(0))
- Expect(len(superNodePayload1.StorageNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload1.StateNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload1.ReceiptsRlp)).To(Equal(1))
- Expect(superNodePayload1.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
+ Expect(iplds1.BlockNumber.Int64()).To(Equal(mocks.MockIPLDs.BlockNumber.Int64()))
+ Expect(iplds1.Header).To(Equal(ipfs.BlockModel{}))
+ Expect(len(iplds1.Uncles)).To(Equal(0))
+ Expect(len(iplds1.Transactions)).To(Equal(0))
+ Expect(len(iplds1.StorageNodes)).To(Equal(0))
+ Expect(len(iplds1.StateNodes)).To(Equal(0))
+ Expect(len(iplds1.Receipts)).To(Equal(1))
+ Expect(iplds1.Receipts[0]).To(Equal(ipfs.BlockModel{
+ Data: mocks.Rct2IPLD.RawData(),
+ CID: mocks.Rct2IPLD.Cid().String(),
+ }))
- payload2, err := filterer.Filter(rctTopicsFilter, mocks.MockIPLDPayload)
+ payload2, err := filterer.Filter(rctTopicsFilter, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
- superNodePayload2, ok := payload2.(eth.StreamResponse)
+ iplds2, ok := payload2.(eth.IPLDs)
Expect(ok).To(BeTrue())
- Expect(superNodePayload2.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
- Expect(len(superNodePayload2.HeadersRlp)).To(Equal(0))
- Expect(len(superNodePayload2.UnclesRlp)).To(Equal(0))
- Expect(len(superNodePayload2.TransactionsRlp)).To(Equal(0))
- Expect(len(superNodePayload2.StorageNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload2.StateNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload2.ReceiptsRlp)).To(Equal(1))
- Expect(superNodePayload2.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP1))
+ Expect(iplds2.BlockNumber.Int64()).To(Equal(mocks.MockIPLDs.BlockNumber.Int64()))
+ Expect(iplds2.Header).To(Equal(ipfs.BlockModel{}))
+ Expect(len(iplds2.Uncles)).To(Equal(0))
+ Expect(len(iplds2.Transactions)).To(Equal(0))
+ Expect(len(iplds2.StorageNodes)).To(Equal(0))
+ Expect(len(iplds2.StateNodes)).To(Equal(0))
+ Expect(len(iplds2.Receipts)).To(Equal(1))
+ Expect(iplds2.Receipts[0]).To(Equal(ipfs.BlockModel{
+ Data: mocks.Rct1IPLD.RawData(),
+ CID: mocks.Rct1IPLD.Cid().String(),
+ }))
- payload3, err := filterer.Filter(rctTopicsAndContractFilter, mocks.MockIPLDPayload)
+ payload3, err := filterer.Filter(rctTopicsAndContractFilter, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
- superNodePayload3, ok := payload3.(eth.StreamResponse)
+ iplds3, ok := payload3.(eth.IPLDs)
Expect(ok).To(BeTrue())
- Expect(superNodePayload3.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
- Expect(len(superNodePayload3.HeadersRlp)).To(Equal(0))
- Expect(len(superNodePayload3.UnclesRlp)).To(Equal(0))
- Expect(len(superNodePayload3.TransactionsRlp)).To(Equal(0))
- Expect(len(superNodePayload3.StorageNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload3.StateNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload3.ReceiptsRlp)).To(Equal(1))
- Expect(superNodePayload3.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP1))
+ Expect(iplds3.BlockNumber.Int64()).To(Equal(mocks.MockIPLDs.BlockNumber.Int64()))
+ Expect(iplds3.Header).To(Equal(ipfs.BlockModel{}))
+ Expect(len(iplds3.Uncles)).To(Equal(0))
+ Expect(len(iplds3.Transactions)).To(Equal(0))
+ Expect(len(iplds3.StorageNodes)).To(Equal(0))
+ Expect(len(iplds3.StateNodes)).To(Equal(0))
+ Expect(len(iplds3.Receipts)).To(Equal(1))
+ Expect(iplds3.Receipts[0]).To(Equal(ipfs.BlockModel{
+ Data: mocks.Rct1IPLD.RawData(),
+ CID: mocks.Rct1IPLD.Cid().String(),
+ }))
- payload4, err := filterer.Filter(rctContractsAndTopicFilter, mocks.MockIPLDPayload)
+ payload4, err := filterer.Filter(rctContractsAndTopicFilter, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
- superNodePayload4, ok := payload4.(eth.StreamResponse)
+ iplds4, ok := payload4.(eth.IPLDs)
Expect(ok).To(BeTrue())
- Expect(superNodePayload4.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
- Expect(len(superNodePayload4.HeadersRlp)).To(Equal(0))
- Expect(len(superNodePayload4.UnclesRlp)).To(Equal(0))
- Expect(len(superNodePayload4.TransactionsRlp)).To(Equal(0))
- Expect(len(superNodePayload4.StorageNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload4.StateNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload4.ReceiptsRlp)).To(Equal(1))
- Expect(superNodePayload4.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
+ Expect(iplds4.BlockNumber.Int64()).To(Equal(mocks.MockIPLDs.BlockNumber.Int64()))
+ Expect(iplds4.Header).To(Equal(ipfs.BlockModel{}))
+ Expect(len(iplds4.Uncles)).To(Equal(0))
+ Expect(len(iplds4.Transactions)).To(Equal(0))
+ Expect(len(iplds4.StorageNodes)).To(Equal(0))
+ Expect(len(iplds4.StateNodes)).To(Equal(0))
+ Expect(len(iplds4.Receipts)).To(Equal(1))
+ Expect(iplds4.Receipts[0]).To(Equal(ipfs.BlockModel{
+ Data: mocks.Rct2IPLD.RawData(),
+ CID: mocks.Rct2IPLD.Cid().String(),
+ }))
- payload5, err := filterer.Filter(rctsForAllCollectedTrxs, mocks.MockIPLDPayload)
+ payload5, err := filterer.Filter(rctsForAllCollectedTrxs, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
- superNodePayload5, ok := payload5.(eth.StreamResponse)
+ iplds5, ok := payload5.(eth.IPLDs)
Expect(ok).To(BeTrue())
- Expect(superNodePayload5.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
- Expect(len(superNodePayload5.HeadersRlp)).To(Equal(0))
- Expect(len(superNodePayload5.UnclesRlp)).To(Equal(0))
- Expect(len(superNodePayload5.TransactionsRlp)).To(Equal(2))
- Expect(shared.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
- Expect(shared.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
- Expect(len(superNodePayload5.StorageNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload5.StateNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload5.ReceiptsRlp)).To(Equal(2))
- Expect(shared.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
- Expect(shared.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
+ Expect(iplds5.BlockNumber.Int64()).To(Equal(mocks.MockIPLDs.BlockNumber.Int64()))
+ Expect(iplds5.Header).To(Equal(ipfs.BlockModel{}))
+ Expect(len(iplds5.Uncles)).To(Equal(0))
+ Expect(len(iplds5.Transactions)).To(Equal(2))
+ Expect(shared.IPLDsContainBytes(iplds5.Transactions, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
+ Expect(shared.IPLDsContainBytes(iplds5.Transactions, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
+ Expect(len(iplds5.StorageNodes)).To(Equal(0))
+ Expect(len(iplds5.StateNodes)).To(Equal(0))
+ Expect(len(iplds5.Receipts)).To(Equal(2))
+ Expect(shared.IPLDsContainBytes(iplds5.Receipts, mocks.MockReceipts.GetRlp(0))).To(BeTrue())
+ Expect(shared.IPLDsContainBytes(iplds5.Receipts, mocks.MockReceipts.GetRlp(1))).To(BeTrue())
- payload6, err := filterer.Filter(rctsForSelectCollectedTrxs, mocks.MockIPLDPayload)
+ payload6, err := filterer.Filter(rctsForSelectCollectedTrxs, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
- superNodePayload6, ok := payload6.(eth.StreamResponse)
+ iplds6, ok := payload6.(eth.IPLDs)
Expect(ok).To(BeTrue())
- Expect(superNodePayload6.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
- Expect(len(superNodePayload6.HeadersRlp)).To(Equal(0))
- Expect(len(superNodePayload6.UnclesRlp)).To(Equal(0))
- Expect(len(superNodePayload6.TransactionsRlp)).To(Equal(1))
- Expect(shared.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
- Expect(len(superNodePayload6.StorageNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload6.StateNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload6.ReceiptsRlp)).To(Equal(1))
- Expect(superNodePayload4.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
+ Expect(iplds6.BlockNumber.Int64()).To(Equal(mocks.MockIPLDs.BlockNumber.Int64()))
+ Expect(iplds6.Header).To(Equal(ipfs.BlockModel{}))
+ Expect(len(iplds6.Uncles)).To(Equal(0))
+ Expect(len(iplds6.Transactions)).To(Equal(1))
+ Expect(shared.IPLDsContainBytes(iplds5.Transactions, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
+ Expect(len(iplds6.StorageNodes)).To(Equal(0))
+ Expect(len(iplds6.StateNodes)).To(Equal(0))
+ Expect(len(iplds6.Receipts)).To(Equal(1))
+ Expect(iplds4.Receipts[0]).To(Equal(ipfs.BlockModel{
+ Data: mocks.Rct2IPLD.RawData(),
+ CID: mocks.Rct2IPLD.Cid().String(),
+ }))
- payload7, err := filterer.Filter(stateFilter, mocks.MockIPLDPayload)
+ payload7, err := filterer.Filter(stateFilter, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
- superNodePayload7, ok := payload7.(eth.StreamResponse)
+ iplds7, ok := payload7.(eth.IPLDs)
Expect(ok).To(BeTrue())
- Expect(superNodePayload7.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
- Expect(len(superNodePayload7.HeadersRlp)).To(Equal(0))
- Expect(len(superNodePayload7.UnclesRlp)).To(Equal(0))
- Expect(len(superNodePayload7.TransactionsRlp)).To(Equal(0))
- Expect(len(superNodePayload7.StorageNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload7.ReceiptsRlp)).To(Equal(0))
- Expect(len(superNodePayload7.StateNodesRlp)).To(Equal(1))
- Expect(superNodePayload7.StateNodesRlp[mocks.ContractLeafKey]).To(Equal(mocks.ValueBytes))
+ Expect(iplds7.BlockNumber.Int64()).To(Equal(mocks.MockIPLDs.BlockNumber.Int64()))
+ Expect(iplds7.Header).To(Equal(ipfs.BlockModel{}))
+ Expect(len(iplds7.Uncles)).To(Equal(0))
+ Expect(len(iplds7.Transactions)).To(Equal(0))
+ Expect(len(iplds7.StorageNodes)).To(Equal(0))
+ Expect(len(iplds7.Receipts)).To(Equal(0))
+ Expect(len(iplds7.StateNodes)).To(Equal(1))
+ Expect(iplds7.StateNodes[0].StateLeafKey.Bytes()).To(Equal(mocks.AccountLeafKey))
+ Expect(iplds7.StateNodes[0].IPLD).To(Equal(ipfs.BlockModel{
+ Data: mocks.State2IPLD.RawData(),
+ CID: mocks.State2IPLD.Cid().String(),
+ }))
- payload8, err := filterer.Filter(rctTopicsAndContractFilterFail, mocks.MockIPLDPayload)
+ payload8, err := filterer.Filter(rctTopicsAndContractFilterFail, mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
- superNodePayload8, ok := payload8.(eth.StreamResponse)
+ iplds8, ok := payload8.(eth.IPLDs)
Expect(ok).To(BeTrue())
- Expect(superNodePayload8.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
- Expect(len(superNodePayload8.HeadersRlp)).To(Equal(0))
- Expect(len(superNodePayload8.UnclesRlp)).To(Equal(0))
- Expect(len(superNodePayload8.TransactionsRlp)).To(Equal(0))
- Expect(len(superNodePayload8.StorageNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload8.StateNodesRlp)).To(Equal(0))
- Expect(len(superNodePayload8.ReceiptsRlp)).To(Equal(0))
+ Expect(iplds8.BlockNumber.Int64()).To(Equal(mocks.MockIPLDs.BlockNumber.Int64()))
+ Expect(iplds8.Header).To(Equal(ipfs.BlockModel{}))
+ Expect(len(iplds8.Uncles)).To(Equal(0))
+ Expect(len(iplds8.Transactions)).To(Equal(0))
+ Expect(len(iplds8.StorageNodes)).To(Equal(0))
+ Expect(len(iplds8.StateNodes)).To(Equal(0))
+ Expect(len(iplds8.Receipts)).To(Equal(0))
})
})
})
-
-func getReceiptForStorageRLP(receipts types.Receipts, i int) []byte {
- receiptForStorage := (*types.ReceiptForStorage)(receipts[i])
- receiptBuffer := new(bytes.Buffer)
- err := receiptForStorage.EncodeRLP(receiptBuffer)
- Expect(err).ToNot(HaveOccurred())
- return receiptBuffer.Bytes()
-}
diff --git a/pkg/eth/contract_watcher/shared/getter/getter_suite_test.go b/pkg/super_node/eth/helpers.go
similarity index 58%
rename from pkg/eth/contract_watcher/shared/getter/getter_suite_test.go
rename to pkg/super_node/eth/helpers.go
index 75b2e014..c46cf430 100644
--- a/pkg/eth/contract_watcher/shared/getter/getter_suite_test.go
+++ b/pkg/super_node/eth/helpers.go
@@ -14,22 +14,32 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package getter_test
+package eth
-import (
- "io/ioutil"
- "log"
- "testing"
+import "github.com/ethereum/go-ethereum/statediff"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-func TestRepository(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Getter Suite Test")
+func ResolveFromNodeType(nodeType statediff.NodeType) int {
+ switch nodeType {
+ case statediff.Branch:
+ return 0
+ case statediff.Extension:
+ return 1
+ case statediff.Leaf:
+ return 2
+ default:
+ return -1
+ }
}
-var _ = BeforeSuite(func() {
- log.SetOutput(ioutil.Discard)
-})
+func ResolveToNodeType(nodeType int) statediff.NodeType {
+ switch nodeType {
+ case 0:
+ return statediff.Branch
+ case 1:
+ return statediff.Extension
+ case 2:
+ return statediff.Leaf
+ default:
+ return statediff.Unknown
+ }
+}
diff --git a/pkg/super_node/eth/indexer.go b/pkg/super_node/eth/indexer.go
index cc70720c..bbef66ef 100644
--- a/pkg/super_node/eth/indexer.go
+++ b/pkg/super_node/eth/indexer.go
@@ -19,6 +19,8 @@ package eth
import (
"fmt"
+ "github.com/ethereum/go-ethereum/crypto"
+
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
"github.com/ethereum/go-ethereum/common"
@@ -28,6 +30,10 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
+var (
+ nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
+)
+
// Indexer satisfies the Indexer interface for ethereum
type CIDIndexer struct {
db *postgres.DB
@@ -55,6 +61,7 @@ func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
+ log.Error("eth indexer error when indexing header")
return err
}
for _, uncle := range cidPayload.UncleCIDs {
@@ -62,6 +69,7 @@ func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
+ log.Error("eth indexer error when indexing uncle")
return err
}
}
@@ -69,12 +77,14 @@ func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
+ log.Error("eth indexer error when indexing transactions and receipts")
return err
}
if err := in.indexStateAndStorageCIDs(tx, cidPayload, headerID); err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
+ log.Error("eth indexer error when indexing state and storage nodes")
return err
}
return tx.Commit()
@@ -82,17 +92,19 @@ func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel, nodeID int64) (int64, error) {
var headerID int64
- err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id) VALUES ($1, $2, $3, $4, $5, $6)
- ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id) = ($3, $4, $5, $6)
+ err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp)
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
+ ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
RETURNING id`,
- header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, nodeID).Scan(&headerID)
+ header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, nodeID, header.Reward, header.StateRoot, header.TxRoot,
+ header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp).Scan(&headerID)
return headerID, err
}
func (in *CIDIndexer) indexUncleCID(tx *sqlx.Tx, uncle UncleModel, headerID int64) error {
- _, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid) VALUES ($1, $2, $3, $4)
- ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid) = ($3, $4)`,
- uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID)
+ _, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward) VALUES ($1, $2, $3, $4, $5)
+ ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward) = ($3, $4, $5)`,
+ uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID, uncle.Reward)
return err
}
@@ -125,25 +137,49 @@ func (in *CIDIndexer) indexReceiptCID(tx *sqlx.Tx, cidMeta ReceiptModel, txID in
func (in *CIDIndexer) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
for _, stateCID := range payload.StateNodeCIDs {
var stateID int64
- err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_key, cid, leaf) VALUES ($1, $2, $3, $4)
- ON CONFLICT (header_id, state_key) DO UPDATE SET (cid, leaf) = ($3, $4)
+ var stateKey string
+ if stateCID.StateKey != nullHash.String() {
+ stateKey = stateCID.StateKey
+ }
+ err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type) VALUES ($1, $2, $3, $4, $5)
+ ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type) = ($2, $3, $5)
RETURNING id`,
- headerID, stateCID.StateKey, stateCID.CID, stateCID.Leaf).Scan(&stateID)
+ headerID, stateKey, stateCID.CID, stateCID.Path, stateCID.NodeType).Scan(&stateID)
if err != nil {
return err
}
- for _, storageCID := range payload.StorageNodeCIDs[common.HexToHash(stateCID.StateKey)] {
- if err := in.indexStorageCID(tx, storageCID, stateID); err != nil {
- return err
+ // If we have a state leaf node, index the associated account and storage nodes
+ if stateCID.NodeType == 2 {
+ pathKey := crypto.Keccak256Hash(stateCID.Path)
+ for _, storageCID := range payload.StorageNodeCIDs[pathKey] {
+ if err := in.indexStorageCID(tx, storageCID, stateID); err != nil {
+ return err
+ }
+ }
+ if stateAccount, ok := payload.StateAccounts[pathKey]; ok {
+ if err := in.indexStateAccount(tx, stateAccount, stateID); err != nil {
+ return err
+ }
}
}
}
return nil
}
-func (in *CIDIndexer) indexStorageCID(tx *sqlx.Tx, storageCID StorageNodeModel, stateID int64) error {
- _, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_key, cid, leaf) VALUES ($1, $2, $3, $4)
- ON CONFLICT (state_id, storage_key) DO UPDATE SET (cid, leaf) = ($3, $4)`,
- stateID, storageCID.StorageKey, storageCID.CID, storageCID.Leaf)
+func (in *CIDIndexer) indexStateAccount(tx *sqlx.Tx, stateAccount StateAccountModel, stateID int64) error {
+ _, err := tx.Exec(`INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5)
+ ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)`,
+ stateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot)
+ return err
+}
+
+func (in *CIDIndexer) indexStorageCID(tx *sqlx.Tx, storageCID StorageNodeModel, stateID int64) error {
+ var storageKey string
+ if storageCID.StorageKey != nullHash.String() {
+ storageKey = storageCID.StorageKey
+ }
+ _, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type) VALUES ($1, $2, $3, $4, $5)
+ ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type) = ($2, $3, $5)`,
+ stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType)
return err
}
diff --git a/pkg/super_node/eth/indexer_test.go b/pkg/super_node/eth/indexer_test.go
index c2c66a4b..0a0e89bf 100644
--- a/pkg/super_node/eth/indexer_test.go
+++ b/pkg/super_node/eth/indexer_test.go
@@ -17,6 +17,7 @@
package eth_test
import (
+ "github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -45,18 +46,22 @@ var _ = Describe("Indexer", func() {
It("Indexes CIDs and related metadata into vulcanizedb", func() {
err = repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
- pgStr := `SELECT cid, td FROM eth.header_cids
+ pgStr := `SELECT cid, td, reward, id
+ FROM eth.header_cids
WHERE block_number = $1`
// check header was properly indexed
type res struct {
- CID string
- TD string
+ CID string
+ TD string
+ Reward string
+ ID int
}
headers := new(res)
err = db.QueryRowx(pgStr, 1).StructScan(headers)
Expect(err).ToNot(HaveOccurred())
- Expect(headers.CID).To(Equal("mockHeaderCID"))
- Expect(headers.TD).To(Equal("1337"))
+ Expect(headers.CID).To(Equal(mocks.HeaderCID.String()))
+ Expect(headers.TD).To(Equal(mocks.MockBlock.Difficulty().String()))
+ Expect(headers.Reward).To(Equal("5000000000000000000"))
// check trxs were properly indexed
trxs := make([]string, 0)
pgStr = `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
@@ -64,8 +69,8 @@ var _ = Describe("Indexer", func() {
err = db.Select(&trxs, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(trxs)).To(Equal(2))
- Expect(shared.ListContainsString(trxs, "mockTrxCID1")).To(BeTrue())
- Expect(shared.ListContainsString(trxs, "mockTrxCID2")).To(BeTrue())
+ Expect(shared.ListContainsString(trxs, mocks.Trx1CID.String())).To(BeTrue())
+ Expect(shared.ListContainsString(trxs, mocks.Trx2CID.String())).To(BeTrue())
// check receipts were properly indexed
rcts := make([]string, 0)
pgStr = `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
@@ -75,28 +80,32 @@ var _ = Describe("Indexer", func() {
err = db.Select(&rcts, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(rcts)).To(Equal(2))
- Expect(shared.ListContainsString(rcts, "mockRctCID1")).To(BeTrue())
- Expect(shared.ListContainsString(rcts, "mockRctCID2")).To(BeTrue())
+ Expect(shared.ListContainsString(rcts, mocks.Rct1CID.String())).To(BeTrue())
+ Expect(shared.ListContainsString(rcts, mocks.Rct2CID.String())).To(BeTrue())
// check that state nodes were properly indexed
stateNodes := make([]eth.StateNodeModel, 0)
- pgStr = `SELECT state_cids.cid, state_cids.state_key, state_cids.leaf FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
+ pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
+ FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&stateNodes, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(stateNodes)).To(Equal(2))
for _, stateNode := range stateNodes {
- if stateNode.CID == "mockStateCID1" {
- Expect(stateNode.Leaf).To(Equal(true))
- Expect(stateNode.StateKey).To(Equal(mocks.ContractLeafKey.Hex()))
+ if stateNode.CID == mocks.State1CID.String() {
+ Expect(stateNode.NodeType).To(Equal(2))
+ Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.ContractLeafKey).Hex()))
+ Expect(stateNode.Path).To(Equal([]byte{'\x06'}))
}
- if stateNode.CID == "mockStateCID2" {
- Expect(stateNode.Leaf).To(Equal(true))
- Expect(stateNode.StateKey).To(Equal(mocks.AnotherContractLeafKey.Hex()))
+ if stateNode.CID == mocks.State2CID.String() {
+ Expect(stateNode.NodeType).To(Equal(2))
+ Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.AccountLeafKey).Hex()))
+ Expect(stateNode.Path).To(Equal([]byte{'\x0c'}))
}
}
// check that storage nodes were properly indexed
storageNodes := make([]eth.StorageNodeWithStateKeyModel, 0)
- pgStr = `SELECT storage_cids.cid, state_cids.state_key, storage_cids.storage_key, storage_cids.leaf FROM eth.storage_cids, eth.state_cids, eth.header_cids
+ pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
+ FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
@@ -104,10 +113,11 @@ var _ = Describe("Indexer", func() {
Expect(err).ToNot(HaveOccurred())
Expect(len(storageNodes)).To(Equal(1))
Expect(storageNodes[0]).To(Equal(eth.StorageNodeWithStateKeyModel{
- CID: "mockStorageCID",
- Leaf: true,
- StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
- StateKey: mocks.ContractLeafKey.Hex(),
+ CID: mocks.StorageCID.String(),
+ NodeType: 2,
+ StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
+ StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
+ Path: []byte{},
}))
})
})
diff --git a/pkg/super_node/eth/ipld_fetcher.go b/pkg/super_node/eth/ipld_fetcher.go
index c9220f5e..64596b3a 100644
--- a/pkg/super_node/eth/ipld_fetcher.go
+++ b/pkg/super_node/eth/ipld_fetcher.go
@@ -20,8 +20,7 @@ import (
"context"
"errors"
"fmt"
-
- "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+ "math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-block-format"
@@ -30,6 +29,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
var (
@@ -53,16 +53,20 @@ func NewIPLDFetcher(ipfsPath string) (*IPLDFetcher, error) {
}
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
-func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.FetchedIPLDs, error) {
+func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) {
cidWrapper, ok := cids.(*CIDWrapper)
if !ok {
return nil, fmt.Errorf("eth fetcher: expected cids type %T got %T", &CIDWrapper{}, cids)
}
log.Debug("fetching iplds")
- iplds := new(IPLDWrapper)
- iplds.BlockNumber = cidWrapper.BlockNumber
var err error
- iplds.Headers, err = f.FetchHeaders(cidWrapper.Headers)
+ iplds := IPLDs{}
+ iplds.TotalDifficulty, ok = new(big.Int).SetString(cidWrapper.Header.TotalDifficulty, 10)
+ if !ok {
+ return nil, errors.New("eth fetcher: unable to set total difficulty")
+ }
+ iplds.BlockNumber = cidWrapper.BlockNumber
+ iplds.Header, err = f.FetchHeader(cidWrapper.Header)
if err != nil {
return nil, err
}
@@ -90,92 +94,112 @@ func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.FetchedIPLDs, e
}
// FetchHeaders fetches headers
-// It uses the f.fetchBatch method
-func (f *IPLDFetcher) FetchHeaders(cids []HeaderModel) ([]blocks.Block, error) {
- log.Debug("fetching header iplds")
- headerCids := make([]cid.Cid, 0, len(cids))
- for _, c := range cids {
- dc, err := cid.Decode(c.CID)
- if err != nil {
- return nil, err
- }
- headerCids = append(headerCids, dc)
+// It uses the f.fetch method
+func (f *IPLDFetcher) FetchHeader(c HeaderModel) (ipfs.BlockModel, error) {
+ log.Debug("fetching header ipld")
+ dc, err := cid.Decode(c.CID)
+ if err != nil {
+ return ipfs.BlockModel{}, err
}
- headers := f.fetchBatch(headerCids)
- if len(headers) != len(headerCids) {
- log.Errorf("ipfs fetcher: number of header blocks returned (%d) does not match number expected (%d)", len(headers), len(headerCids))
- return headers, errUnexpectedNumberOfIPLDs
+ header, err := f.fetch(dc)
+ if err != nil {
+ return ipfs.BlockModel{}, err
}
- return headers, nil
+ return ipfs.BlockModel{
+ Data: header.RawData(),
+ CID: header.Cid().String(),
+ }, nil
}
// FetchUncles fetches uncles
// It uses the f.fetchBatch method
-func (f *IPLDFetcher) FetchUncles(cids []UncleModel) ([]blocks.Block, error) {
+func (f *IPLDFetcher) FetchUncles(cids []UncleModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching uncle iplds")
- uncleCids := make([]cid.Cid, 0, len(cids))
- for _, c := range cids {
+ uncleCids := make([]cid.Cid, len(cids))
+ for i, c := range cids {
dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
- uncleCids = append(uncleCids, dc)
+ uncleCids[i] = dc
}
uncles := f.fetchBatch(uncleCids)
- if len(uncles) != len(uncleCids) {
- log.Errorf("ipfs fetcher: number of uncle blocks returned (%d) does not match number expected (%d)", len(uncles), len(uncleCids))
- return uncles, errUnexpectedNumberOfIPLDs
+ uncleIPLDs := make([]ipfs.BlockModel, len(uncles))
+ for i, uncle := range uncles {
+ uncleIPLDs[i] = ipfs.BlockModel{
+ Data: uncle.RawData(),
+ CID: uncle.Cid().String(),
+ }
}
- return uncles, nil
+ if len(uncleIPLDs) != len(uncleCids) {
+ log.Errorf("ipfs fetcher: number of uncle blocks returned (%d) does not match number expected (%d)", len(uncles), len(uncleCids))
+ return uncleIPLDs, errUnexpectedNumberOfIPLDs
+ }
+ return uncleIPLDs, nil
}
// FetchTrxs fetches transactions
// It uses the f.fetchBatch method
-func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]blocks.Block, error) {
+func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching transaction iplds")
- trxCids := make([]cid.Cid, 0, len(cids))
- for _, c := range cids {
+ trxCids := make([]cid.Cid, len(cids))
+ for i, c := range cids {
dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
- trxCids = append(trxCids, dc)
+ trxCids[i] = dc
}
trxs := f.fetchBatch(trxCids)
- if len(trxs) != len(trxCids) {
- log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids))
- return trxs, errUnexpectedNumberOfIPLDs
+ trxIPLDs := make([]ipfs.BlockModel, len(trxs))
+ for i, trx := range trxs {
+ trxIPLDs[i] = ipfs.BlockModel{
+ Data: trx.RawData(),
+ CID: trx.Cid().String(),
+ }
}
- return trxs, nil
+ if len(trxIPLDs) != len(trxCids) {
+ log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids))
+ return trxIPLDs, errUnexpectedNumberOfIPLDs
+ }
+ return trxIPLDs, nil
}
// FetchRcts fetches receipts
// It uses the f.fetchBatch method
-func (f *IPLDFetcher) FetchRcts(cids []ReceiptModel) ([]blocks.Block, error) {
+// batch fetch preserves order?
+func (f *IPLDFetcher) FetchRcts(cids []ReceiptModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching receipt iplds")
- rctCids := make([]cid.Cid, 0, len(cids))
- for _, c := range cids {
+ rctCids := make([]cid.Cid, len(cids))
+ for i, c := range cids {
dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
- rctCids = append(rctCids, dc)
+ rctCids[i] = dc
}
rcts := f.fetchBatch(rctCids)
- if len(rcts) != len(rctCids) {
- log.Errorf("ipfs fetcher: number of receipt blocks returned (%d) does not match number expected (%d)", len(rcts), len(rctCids))
- return rcts, errUnexpectedNumberOfIPLDs
+ rctIPLDs := make([]ipfs.BlockModel, len(rcts))
+ for i, rct := range rcts {
+ rctIPLDs[i] = ipfs.BlockModel{
+ Data: rct.RawData(),
+ CID: rct.Cid().String(),
+ }
}
- return rcts, nil
+ if len(rctIPLDs) != len(rctCids) {
+ log.Errorf("ipfs fetcher: number of receipt blocks returned (%d) does not match number expected (%d)", len(rcts), len(rctCids))
+ return rctIPLDs, errUnexpectedNumberOfIPLDs
+ }
+ return rctIPLDs, nil
}
// FetchState fetches state nodes
// It uses the single f.fetch method instead of the batch fetch, because it
// needs to maintain the data's relation to state keys
-func (f *IPLDFetcher) FetchState(cids []StateNodeModel) (map[common.Hash]blocks.Block, error) {
+func (f *IPLDFetcher) FetchState(cids []StateNodeModel) ([]StateNode, error) {
log.Debug("fetching state iplds")
- stateNodes := make(map[common.Hash]blocks.Block)
- for _, stateNode := range cids {
+ stateNodes := make([]StateNode, len(cids))
+ for i, stateNode := range cids {
if stateNode.CID == "" || stateNode.StateKey == "" {
continue
}
@@ -187,7 +211,15 @@ func (f *IPLDFetcher) FetchState(cids []StateNodeModel) (map[common.Hash]blocks.
if err != nil {
return nil, err
}
- stateNodes[common.HexToHash(stateNode.StateKey)] = state
+ stateNodes[i] = StateNode{
+ IPLD: ipfs.BlockModel{
+ Data: state.RawData(),
+ CID: state.Cid().String(),
+ },
+ StateLeafKey: common.HexToHash(stateNode.StateKey),
+ Type: ResolveToNodeType(stateNode.NodeType),
+ Path: stateNode.Path,
+ }
}
return stateNodes, nil
}
@@ -195,10 +227,10 @@ func (f *IPLDFetcher) FetchState(cids []StateNodeModel) (map[common.Hash]blocks.
// FetchStorage fetches storage nodes
// It uses the single f.fetch method instead of the batch fetch, because it
// needs to maintain the data's relation to state and storage keys
-func (f *IPLDFetcher) FetchStorage(cids []StorageNodeWithStateKeyModel) (map[common.Hash]map[common.Hash]blocks.Block, error) {
+func (f *IPLDFetcher) FetchStorage(cids []StorageNodeWithStateKeyModel) ([]StorageNode, error) {
log.Debug("fetching storage iplds")
- storageNodes := make(map[common.Hash]map[common.Hash]blocks.Block)
- for _, storageNode := range cids {
+ storageNodes := make([]StorageNode, len(cids))
+ for i, storageNode := range cids {
if storageNode.CID == "" || storageNode.StorageKey == "" || storageNode.StateKey == "" {
continue
}
@@ -210,10 +242,16 @@ func (f *IPLDFetcher) FetchStorage(cids []StorageNodeWithStateKeyModel) (map[com
if err != nil {
return nil, err
}
- if storageNodes[common.HexToHash(storageNode.StateKey)] == nil {
- storageNodes[common.HexToHash(storageNode.StateKey)] = make(map[common.Hash]blocks.Block)
+ storageNodes[i] = StorageNode{
+ IPLD: ipfs.BlockModel{
+ Data: storage.RawData(),
+ CID: storage.Cid().String(),
+ },
+ StateLeafKey: common.HexToHash(storageNode.StateKey),
+ StorageLeafKey: common.HexToHash(storageNode.StorageKey),
+ Type: ResolveToNodeType(storageNode.NodeType),
+ Path: storageNode.Path,
}
- storageNodes[common.HexToHash(storageNode.StateKey)][common.HexToHash(storageNode.StorageKey)] = storage
}
return storageNodes, nil
}
diff --git a/pkg/super_node/eth/ipld_fetcher_test.go b/pkg/super_node/eth/ipld_fetcher_test.go
index ee0d954c..5192685d 100644
--- a/pkg/super_node/eth/ipld_fetcher_test.go
+++ b/pkg/super_node/eth/ipld_fetcher_test.go
@@ -17,13 +17,16 @@
package eth_test
import (
+ "bytes"
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/statediff"
"github.com/ipfs/go-block-format"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
@@ -47,10 +50,9 @@ var (
mockBlockService *mocks.MockIPFSBlockService
mockCIDWrapper = ð.CIDWrapper{
BlockNumber: big.NewInt(9000),
- Headers: []eth.HeaderModel{
- {
- CID: mockHeaderBlock.Cid().String(),
- },
+ Header: eth.HeaderModel{
+ TotalDifficulty: "1337",
+ CID: mockHeaderBlock.Cid().String(),
},
Uncles: []eth.UncleModel{
{
@@ -69,18 +71,18 @@ var (
},
StateNodes: []eth.StateNodeModel{{
CID: mockStateBlock.Cid().String(),
- Leaf: true,
+ NodeType: 2,
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
}},
StorageNodes: []eth.StorageNodeWithStateKeyModel{{
CID: mockStorageBlock1.Cid().String(),
- Leaf: true,
+ NodeType: 2,
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
StorageKey: "0000000000000000000000000000000000000000000000000000000000000001",
},
{
CID: mockStorageBlock2.Cid().String(),
- Leaf: true,
+ NodeType: 2,
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
StorageKey: "0000000000000000000000000000000000000000000000000000000000000002",
}},
@@ -101,30 +103,53 @@ var _ = Describe("Fetcher", func() {
fetcher.BlockService = mockBlockService
i, err := fetcher.Fetch(mockCIDWrapper)
Expect(err).ToNot(HaveOccurred())
- iplds, ok := i.(*eth.IPLDWrapper)
+ iplds, ok := i.(eth.IPLDs)
Expect(ok).To(BeTrue())
+ Expect(iplds.TotalDifficulty).To(Equal(big.NewInt(1337)))
Expect(iplds.BlockNumber).To(Equal(mockCIDWrapper.BlockNumber))
- Expect(len(iplds.Headers)).To(Equal(1))
- Expect(iplds.Headers[0]).To(Equal(mockHeaderBlock))
+ Expect(iplds.Header).To(Equal(ipfs.BlockModel{
+ Data: mockHeaderBlock.RawData(),
+ CID: mockHeaderBlock.Cid().String(),
+ }))
Expect(len(iplds.Uncles)).To(Equal(1))
- Expect(iplds.Uncles[0]).To(Equal(mockUncleBlock))
+ Expect(iplds.Uncles[0]).To(Equal(ipfs.BlockModel{
+ Data: mockUncleBlock.RawData(),
+ CID: mockUncleBlock.Cid().String(),
+ }))
Expect(len(iplds.Transactions)).To(Equal(1))
- Expect(iplds.Transactions[0]).To(Equal(mockTrxBlock))
+ Expect(iplds.Transactions[0]).To(Equal(ipfs.BlockModel{
+ Data: mockTrxBlock.RawData(),
+ CID: mockTrxBlock.Cid().String(),
+ }))
Expect(len(iplds.Receipts)).To(Equal(1))
- Expect(iplds.Receipts[0]).To(Equal(mockReceiptBlock))
+ Expect(iplds.Receipts[0]).To(Equal(ipfs.BlockModel{
+ Data: mockReceiptBlock.RawData(),
+ CID: mockReceiptBlock.Cid().String(),
+ }))
Expect(len(iplds.StateNodes)).To(Equal(1))
- stateNode, ok := iplds.StateNodes[common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")]
- Expect(ok).To(BeTrue())
- Expect(stateNode).To(Equal(mockStateBlock))
- Expect(len(iplds.StorageNodes)).To(Equal(1))
- storageNodes := iplds.StorageNodes[common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")]
- Expect(len(storageNodes)).To(Equal(2))
- storageNode1, ok := storageNodes[common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001")]
- Expect(ok).To(BeTrue())
- Expect(storageNode1).To(Equal(mockStorageBlock1))
- storageNode2, ok := storageNodes[common.HexToHash("0000000000000000000000000000000000000000000000000000000000000002")]
- Expect(storageNode2).To(Equal(mockStorageBlock2))
- Expect(ok).To(BeTrue())
+ Expect(iplds.StateNodes[0].StateLeafKey).To(Equal(common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")))
+ Expect(iplds.StateNodes[0].Type).To(Equal(statediff.Leaf))
+ Expect(iplds.StateNodes[0].IPLD).To(Equal(ipfs.BlockModel{
+ Data: mockStateBlock.RawData(),
+ CID: mockStateBlock.Cid().String(),
+ }))
+ Expect(len(iplds.StorageNodes)).To(Equal(2))
+ for _, storage := range iplds.StorageNodes {
+ Expect(storage.Type).To(Equal(statediff.Leaf))
+ Expect(storage.StateLeafKey).To(Equal(common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")))
+ if bytes.Equal(storage.StorageLeafKey.Bytes(), common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001").Bytes()) {
+ Expect(storage.IPLD).To(Equal(ipfs.BlockModel{
+ Data: mockStorageBlock1.RawData(),
+ CID: mockStorageBlock1.Cid().String(),
+ }))
+ }
+ if bytes.Equal(storage.StorageLeafKey.Bytes(), common.HexToHash("0000000000000000000000000000000000000000000000000000000000000002").Bytes()) {
+ Expect(storage.IPLD).To(Equal(ipfs.BlockModel{
+ Data: mockStorageBlock2.RawData(),
+ CID: mockStorageBlock2.Cid().String(),
+ }))
+ }
+ }
})
})
})
diff --git a/pkg/super_node/eth/mocks/converter.go b/pkg/super_node/eth/mocks/converter.go
index 09326998..eea84595 100644
--- a/pkg/super_node/eth/mocks/converter.go
+++ b/pkg/super_node/eth/mocks/converter.go
@@ -29,12 +29,12 @@ import (
// PayloadConverter is the underlying struct for the Converter interface
type PayloadConverter struct {
PassedStatediffPayload statediff.Payload
- ReturnIPLDPayload eth.IPLDPayload
+ ReturnIPLDPayload eth.ConvertedPayload
ReturnErr error
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
-func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.StreamedIPLDs, error) {
+func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
stateDiffPayload, ok := payload.(statediff.Payload)
if !ok {
return nil, fmt.Errorf("convert expected payload type %T got %T", statediff.Payload{}, payload)
@@ -46,13 +46,13 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Streame
// IterativePayloadConverter is the underlying struct for the Converter interface
type IterativePayloadConverter struct {
PassedStatediffPayload []statediff.Payload
- ReturnIPLDPayload []eth.IPLDPayload
+ ReturnIPLDPayload []eth.ConvertedPayload
ReturnErr error
iteration int
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
-func (pc *IterativePayloadConverter) Convert(payload shared.RawChainData) (shared.StreamedIPLDs, error) {
+func (pc *IterativePayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
stateDiffPayload, ok := payload.(statediff.Payload)
if !ok {
return nil, fmt.Errorf("convert expected payload type %T got %T", statediff.Payload{}, payload)
diff --git a/pkg/super_node/eth/mocks/publisher.go b/pkg/super_node/eth/mocks/publisher.go
index 14887938..a33e1211 100644
--- a/pkg/super_node/eth/mocks/publisher.go
+++ b/pkg/super_node/eth/mocks/publisher.go
@@ -26,16 +26,16 @@ import (
// IPLDPublisher is the underlying struct for the Publisher interface
type IPLDPublisher struct {
- PassedIPLDPayload eth.IPLDPayload
+ PassedIPLDPayload eth.ConvertedPayload
ReturnCIDPayload *eth.CIDPayload
ReturnErr error
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
-func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForIndexing, error) {
- ipldPayload, ok := payload.(eth.IPLDPayload)
+func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
+ ipldPayload, ok := payload.(eth.ConvertedPayload)
if !ok {
- return nil, fmt.Errorf("publish expected payload type %T got %T", ð.IPLDPayload{}, payload)
+ return nil, fmt.Errorf("publish expected payload type %T got %T", ð.ConvertedPayload{}, payload)
}
pub.PassedIPLDPayload = ipldPayload
return pub.ReturnCIDPayload, pub.ReturnErr
@@ -43,17 +43,17 @@ func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForI
// IterativeIPLDPublisher is the underlying struct for the Publisher interface; used in testing
type IterativeIPLDPublisher struct {
- PassedIPLDPayload []eth.IPLDPayload
+ PassedIPLDPayload []eth.ConvertedPayload
ReturnCIDPayload []*eth.CIDPayload
ReturnErr error
iteration int
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
-func (pub *IterativeIPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForIndexing, error) {
- ipldPayload, ok := payload.(eth.IPLDPayload)
+func (pub *IterativeIPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
+ ipldPayload, ok := payload.(eth.ConvertedPayload)
if !ok {
- return nil, fmt.Errorf("publish expected payload type %T got %T", ð.IPLDPayload{}, payload)
+ return nil, fmt.Errorf("publish expected payload type %T got %T", ð.ConvertedPayload{}, payload)
}
pub.PassedIPLDPayload = append(pub.PassedIPLDPayload, ipldPayload)
if len(pub.ReturnCIDPayload) < pub.iteration+1 {
diff --git a/pkg/super_node/eth/mocks/test_data.go b/pkg/super_node/eth/mocks/test_data.go
index f9398cf2..dfefcc71 100644
--- a/pkg/super_node/eth/mocks/test_data.go
+++ b/pkg/super_node/eth/mocks/test_data.go
@@ -21,7 +21,6 @@ import (
"crypto/elliptic"
"crypto/rand"
"math/big"
- rand2 "math/rand"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
@@ -30,9 +29,13 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
+ "github.com/ethereum/go-ethereum/statediff/testhelpers"
"github.com/ipfs/go-block-format"
+ "github.com/multiformats/go-multihash"
log "github.com/sirupsen/logrus"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
eth2 "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
@@ -47,6 +50,8 @@ var (
Root: common.HexToHash("0x0"),
TxHash: common.HexToHash("0x0"),
ReceiptHash: common.HexToHash("0x0"),
+ Difficulty: big.NewInt(5000000),
+ Extra: []byte{},
}
MockTransactions, MockReceipts, senderAddr = createTransactionsAndReceipts()
ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts)
@@ -59,7 +64,23 @@ var (
mockTopic12 = common.HexToHash("0x06")
mockTopic21 = common.HexToHash("0x05")
mockTopic22 = common.HexToHash("0x07")
- MockTrxMeta = []eth.TxModel{
+ MockLog1 = &types.Log{
+ Topics: []common.Hash{mockTopic11, mockTopic12},
+ Data: []byte{},
+ }
+ MockLog2 = &types.Log{
+ Topics: []common.Hash{mockTopic21, mockTopic22},
+ Data: []byte{},
+ }
+ HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
+ Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(0), multihash.KECCAK_256)
+ Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(1), multihash.KECCAK_256)
+ Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(0), multihash.KECCAK_256)
+ Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(1), multihash.KECCAK_256)
+ State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256)
+ State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256)
+ StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256)
+ MockTrxMeta = []eth.TxModel{
{
CID: "", // This is empty until we go to publish to ipfs
Src: senderAddr.Hex(),
@@ -77,14 +98,14 @@ var (
}
MockTrxMetaPostPublsh = []eth.TxModel{
{
- CID: "mockTrxCID1", // This is empty until we go to publish to ipfs
+ CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
Src: senderAddr.Hex(),
Dst: Address.String(),
Index: 0,
TxHash: MockTransactions[0].Hash().String(),
},
{
- CID: "mockTrxCID2",
+ CID: Trx2CID.String(),
Src: senderAddr.Hex(),
Dst: AnotherAddress.String(),
Index: 1,
@@ -115,7 +136,7 @@ var (
}
MockRctMetaPostPublish = []eth.ReceiptModel{
{
- CID: "mockRctCID1",
+ CID: Rct1CID.String(),
Topic0s: []string{
mockTopic11.String(),
},
@@ -125,7 +146,7 @@ var (
Contract: Address.String(),
},
{
- CID: "mockRctCID2",
+ CID: Rct2CID.String(),
Topic0s: []string{
mockTopic21.String(),
},
@@ -137,51 +158,72 @@ var (
}
// statediff data
- CodeHash = common.Hex2Bytes("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
- NonceValue = rand2.Uint64()
- anotherNonceValue = rand2.Uint64()
- BalanceValue = rand2.Int63()
- anotherBalanceValue = rand2.Int63()
- ContractRoot = common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
- StoragePath = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes()
- StorageKey = common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001").Bytes()
- StorageValue = common.Hex2Bytes("0x03")
- storage = []statediff.StorageDiff{{
- Key: StorageKey,
- Value: StorageValue,
- Path: StoragePath,
- Proof: [][]byte{},
- Leaf: true,
- }}
- emptyStorage = make([]statediff.StorageDiff, 0)
- ContractLeafKey = crypto.Keccak256Hash(Address.Bytes())
- AnotherContractLeafKey = crypto.Keccak256Hash(AnotherAddress.Bytes())
- testAccount = state.Account{
- Nonce: NonceValue,
- Balance: big.NewInt(BalanceValue),
- Root: ContractRoot,
- CodeHash: CodeHash,
- }
- anotherTestAccount = state.Account{
- Nonce: anotherNonceValue,
- Balance: big.NewInt(anotherBalanceValue),
- Root: common.HexToHash("0x"),
- CodeHash: nil,
- }
- ValueBytes, _ = rlp.EncodeToBytes(testAccount)
- AnotherValueBytes, _ = rlp.EncodeToBytes(anotherTestAccount)
- CreatedAccountDiffs = []statediff.AccountDiff{
+ storageLocation = common.HexToHash("0")
+ StorageLeafKey = crypto.Keccak256Hash(storageLocation[:]).Bytes()
+ StorageValue = common.Hex2Bytes("01")
+ StoragePartialPath = common.Hex2Bytes("20290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
+ StorageLeafNode, _ = rlp.EncodeToBytes([]interface{}{
+ StoragePartialPath,
+ StorageValue,
+ })
+
+ nonce1 = uint64(1)
+ contractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
+ contractCodeHash = common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea")
+ contractPathHash = crypto.Keccak256Hash([]byte{'\x06'})
+ ContractAddress = common.HexToAddress("0x703c4b2bD70c169f5717101CaeE543299Fc946C7")
+ ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress)
+ ContractAccount, _ = rlp.EncodeToBytes(state.Account{
+ Nonce: nonce1,
+ Balance: big.NewInt(0),
+ CodeHash: contractCodeHash.Bytes(),
+ Root: common.HexToHash(contractRoot),
+ })
+ ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
+ ContractLeafNode, _ = rlp.EncodeToBytes([]interface{}{
+ ContractPartialPath,
+ ContractAccount,
+ })
+
+ nonce0 = uint64(0)
+ accountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
+ accountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
+ accountPathHash = crypto.Keccak256Hash([]byte{'\x0c'})
+ AccountAddresss = common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e")
+ AccountLeafKey = testhelpers.Account2LeafKey
+ Account, _ = rlp.EncodeToBytes(state.Account{
+ Nonce: nonce0,
+ Balance: big.NewInt(1000),
+ CodeHash: accountCodeHash.Bytes(),
+ Root: common.HexToHash(accountRoot),
+ })
+ AccountPartialPath = common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45")
+ AccountLeafNode, _ = rlp.EncodeToBytes([]interface{}{
+ AccountPartialPath,
+ Account,
+ })
+
+ CreatedAccountDiffs = []statediff.AccountDiff{
{
- Key: ContractLeafKey.Bytes(),
- Value: ValueBytes,
- Storage: storage,
- Leaf: true,
+ Path: []byte{'\x06'},
+ NodeType: statediff.Leaf,
+ LeafKey: ContractLeafKey,
+ NodeValue: ContractLeafNode,
+ Storage: []statediff.StorageDiff{
+ {
+ Path: []byte{},
+ NodeType: statediff.Leaf,
+ LeafKey: StorageLeafKey,
+ NodeValue: StorageLeafNode,
+ },
+ },
},
{
- Key: AnotherContractLeafKey.Bytes(),
- Value: AnotherValueBytes,
- Storage: emptyStorage,
- Leaf: true,
+ Path: []byte{'\x0c'},
+ NodeType: statediff.Leaf,
+ LeafKey: AccountLeafKey,
+ NodeValue: AccountLeafNode,
+ Storage: []statediff.StorageDiff{},
},
}
@@ -193,34 +235,39 @@ var (
MockStateDiffBytes, _ = rlp.EncodeToBytes(MockStateDiff)
MockStateNodes = []eth.TrieNode{
{
- Key: ContractLeafKey,
- Value: ValueBytes,
- Leaf: true,
+ LeafKey: common.BytesToHash(ContractLeafKey),
+ Path: []byte{'\x06'},
+ Value: ContractLeafNode,
+ Type: statediff.Leaf,
},
{
- Key: AnotherContractLeafKey,
- Value: AnotherValueBytes,
- Leaf: true,
+ LeafKey: common.BytesToHash(AccountLeafKey),
+ Path: []byte{'\x0c'},
+ Value: AccountLeafNode,
+ Type: statediff.Leaf,
},
}
MockStateMetaPostPublish = []eth.StateNodeModel{
{
- CID: "mockStateCID1",
- Leaf: true,
- StateKey: ContractLeafKey.String(),
+ CID: State1CID.String(),
+ Path: []byte{'\x06'},
+ NodeType: 2,
+ StateKey: common.BytesToHash(ContractLeafKey).Hex(),
},
{
- CID: "mockStateCID2",
- Leaf: true,
- StateKey: AnotherContractLeafKey.String(),
+ CID: State2CID.String(),
+ Path: []byte{'\x0c'},
+ NodeType: 2,
+ StateKey: common.BytesToHash(AccountLeafKey).Hex(),
},
}
MockStorageNodes = map[common.Hash][]eth.TrieNode{
- ContractLeafKey: {
+ contractPathHash: {
{
- Key: common.BytesToHash(StorageKey),
- Value: StorageValue,
- Leaf: true,
+ LeafKey: common.BytesToHash(StorageLeafKey),
+ Value: StorageLeafNode,
+ Type: statediff.Leaf,
+ Path: []byte{},
},
},
}
@@ -230,11 +277,11 @@ var (
BlockRlp: MockBlockRlp,
StateDiffRlp: MockStateDiffBytes,
ReceiptsRlp: ReceiptsRlp,
- TotalDifficulty: big.NewInt(1337),
+ TotalDifficulty: MockBlock.Difficulty(),
}
- MockIPLDPayload = eth.IPLDPayload{
- TotalDifficulty: big.NewInt(1337),
+ MockConvertedPayload = eth.ConvertedPayload{
+ TotalDifficulty: MockBlock.Difficulty(),
Block: MockBlock,
Receipts: MockReceipts,
TxMetaData: MockTrxMeta,
@@ -247,9 +294,16 @@ var (
HeaderCID: eth2.HeaderModel{
BlockHash: MockBlock.Hash().String(),
BlockNumber: MockBlock.Number().String(),
- CID: "mockHeaderCID",
+ CID: HeaderCID.String(),
ParentHash: MockBlock.ParentHash().String(),
- TotalDifficulty: "1337",
+ TotalDifficulty: MockBlock.Difficulty().String(),
+ Reward: "5000000000000000000",
+ StateRoot: MockBlock.Root().String(),
+ RctRoot: MockBlock.ReceiptHash().String(),
+ TxRoot: MockBlock.TxHash().String(),
+ UncleRoot: MockBlock.UncleHash().String(),
+ Bloom: MockBlock.Bloom().Bytes(),
+ Timestamp: MockBlock.Time(),
},
UncleCIDs: []eth2.UncleModel{},
TransactionCIDs: MockTrxMetaPostPublsh,
@@ -259,26 +313,46 @@ var (
},
StateNodeCIDs: MockStateMetaPostPublish,
StorageNodeCIDs: map[common.Hash][]eth.StorageNodeModel{
- ContractLeafKey: {
+ contractPathHash: {
{
- CID: "mockStorageCID",
- StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
- Leaf: true,
+ CID: StorageCID.String(),
+ Path: []byte{},
+ StorageKey: common.BytesToHash(StorageLeafKey).Hex(),
+ NodeType: 2,
},
},
},
+ StateAccounts: map[common.Hash]eth.StateAccountModel{
+ contractPathHash: {
+ Balance: big.NewInt(0).String(),
+ Nonce: nonce1,
+ CodeHash: contractCodeHash.Bytes(),
+ StorageRoot: common.HexToHash(contractRoot).String(),
+ },
+ accountPathHash: {
+ Balance: big.NewInt(1000).String(),
+ Nonce: nonce0,
+ CodeHash: accountCodeHash.Bytes(),
+ StorageRoot: common.HexToHash(accountRoot).String(),
+ },
+ },
}
MockCIDWrapper = ð.CIDWrapper{
BlockNumber: big.NewInt(1),
- Headers: []eth2.HeaderModel{
- {
- BlockNumber: "1",
- BlockHash: MockBlock.Hash().String(),
- ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
- CID: "mockHeaderCID",
- TotalDifficulty: "1337",
- },
+ Header: eth2.HeaderModel{
+ BlockNumber: "1",
+ BlockHash: MockBlock.Hash().String(),
+ ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
+ CID: HeaderCID.String(),
+ TotalDifficulty: MockBlock.Difficulty().String(),
+ Reward: "5000000000000000000",
+ StateRoot: MockBlock.Root().String(),
+ RctRoot: MockBlock.ReceiptHash().String(),
+ TxRoot: MockBlock.TxHash().String(),
+ UncleRoot: MockBlock.UncleHash().String(),
+ Bloom: MockBlock.Bloom().Bytes(),
+ Timestamp: MockBlock.Time(),
},
Transactions: MockTrxMetaPostPublsh,
Receipts: MockRctMetaPostPublish,
@@ -286,51 +360,80 @@ var (
StateNodes: MockStateMetaPostPublish,
StorageNodes: []eth.StorageNodeWithStateKeyModel{
{
- CID: "mockStorageCID",
- Leaf: true,
- StateKey: ContractLeafKey.Hex(),
- StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
+ Path: []byte{},
+ CID: StorageCID.String(),
+ NodeType: 2,
+ StateKey: common.BytesToHash(ContractLeafKey).Hex(),
+ StorageKey: common.BytesToHash(StorageLeafKey).Hex(),
},
},
}
- MockIPLDWrapper = ð.IPLDWrapper{
+ HeaderIPLD, _ = blocks.NewBlockWithCid(MockHeaderRlp, HeaderCID)
+ Trx1IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(0), Trx1CID)
+ Trx2IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(1), Trx2CID)
+ Rct1IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(0), Rct1CID)
+ Rct2IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(1), Rct2CID)
+ State1IPLD, _ = blocks.NewBlockWithCid(ContractLeafNode, State1CID)
+ State2IPLD, _ = blocks.NewBlockWithCid(AccountLeafNode, State2CID)
+ StorageIPLD, _ = blocks.NewBlockWithCid(StorageLeafNode, StorageCID)
+
+ MockIPLDs = eth.IPLDs{
BlockNumber: big.NewInt(1),
- Headers: []blocks.Block{
- blocks.NewBlock(MockHeaderRlp),
+ Header: ipfs.BlockModel{
+ Data: HeaderIPLD.RawData(),
+ CID: HeaderIPLD.Cid().String(),
},
- Transactions: []blocks.Block{
- blocks.NewBlock(MockTransactions.GetRlp(0)),
- blocks.NewBlock(MockTransactions.GetRlp(1)),
- },
- Receipts: []blocks.Block{
- blocks.NewBlock(MockReceipts.GetRlp(0)),
- blocks.NewBlock(MockReceipts.GetRlp(1)),
- },
- StateNodes: map[common.Hash]blocks.Block{
- ContractLeafKey: blocks.NewBlock(ValueBytes),
- AnotherContractLeafKey: blocks.NewBlock(AnotherValueBytes),
- },
- StorageNodes: map[common.Hash]map[common.Hash]blocks.Block{
- ContractLeafKey: {
- common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): blocks.NewBlock(StorageValue),
+ Transactions: []ipfs.BlockModel{
+ {
+ Data: Trx1IPLD.RawData(),
+ CID: Trx1IPLD.Cid().String(),
+ },
+ {
+ Data: Trx2IPLD.RawData(),
+ CID: Trx2IPLD.Cid().String(),
},
},
- }
-
- MockSeedNodePayload = eth2.StreamResponse{
- BlockNumber: big.NewInt(1),
- HeadersRlp: [][]byte{MockHeaderRlp},
- UnclesRlp: [][]byte{},
- TransactionsRlp: [][]byte{MockTransactions.GetRlp(0), MockTransactions.GetRlp(1)},
- ReceiptsRlp: [][]byte{MockTransactions.GetRlp(0), MockTransactions.GetRlp(1)},
- StateNodesRlp: map[common.Hash][]byte{
- ContractLeafKey: ValueBytes,
- AnotherContractLeafKey: AnotherValueBytes,
+ Receipts: []ipfs.BlockModel{
+ {
+ Data: Rct1IPLD.RawData(),
+ CID: Rct1IPLD.Cid().String(),
+ },
+ {
+ Data: Rct2IPLD.RawData(),
+ CID: Rct2IPLD.Cid().String(),
+ },
},
- StorageNodesRlp: map[common.Hash]map[common.Hash][]byte{
- ContractLeafKey: {
- common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): StorageValue,
+ StateNodes: []eth2.StateNode{
+ {
+ StateLeafKey: common.BytesToHash(ContractLeafKey),
+ Type: statediff.Leaf,
+ IPLD: ipfs.BlockModel{
+ Data: State1IPLD.RawData(),
+ CID: State1IPLD.Cid().String(),
+ },
+ Path: []byte{'\x06'},
+ },
+ {
+ StateLeafKey: common.BytesToHash(AccountLeafKey),
+ Type: statediff.Leaf,
+ IPLD: ipfs.BlockModel{
+ Data: State2IPLD.RawData(),
+ CID: State2IPLD.Cid().String(),
+ },
+ Path: []byte{'\x0c'},
+ },
+ },
+ StorageNodes: []eth2.StorageNode{
+ {
+ StateLeafKey: common.BytesToHash(ContractLeafKey),
+ StorageLeafKey: common.BytesToHash(StorageLeafKey),
+ Type: statediff.Leaf,
+ IPLD: ipfs.BlockModel{
+ Data: StorageIPLD.RawData(),
+ CID: StorageIPLD.Cid().String(),
+ },
+ Path: []byte{},
},
},
}
@@ -339,8 +442,8 @@ var (
// createTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common.Address) {
// make transactions
- trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), nil)
- trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), nil)
+ trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
+ trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
transactionSigner := types.MakeSigner(params.MainnetChainConfig, BlockNumber)
mockCurve := elliptic.P256()
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
@@ -361,16 +464,10 @@ func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common
}
// make receipts
mockReceipt1 := types.NewReceipt(common.HexToHash("0x0").Bytes(), false, 50)
- mockLog1 := &types.Log{
- Topics: []common.Hash{mockTopic11, mockTopic12},
- }
- mockReceipt1.Logs = []*types.Log{mockLog1}
+ mockReceipt1.Logs = []*types.Log{MockLog1}
mockReceipt1.TxHash = signedTrx1.Hash()
mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
- mockLog2 := &types.Log{
- Topics: []common.Hash{mockTopic21, mockTopic22},
- }
- mockReceipt2.Logs = []*types.Log{mockLog2}
+ mockReceipt2.Logs = []*types.Log{MockLog2}
mockReceipt2.TxHash = signedTrx2.Hash()
return types.Transactions{signedTrx1, signedTrx2}, types.Receipts{mockReceipt1, mockReceipt2}, senderAddr
}
diff --git a/pkg/super_node/eth/models.go b/pkg/super_node/eth/models.go
index 91ee4d3c..b2a84b44 100644
--- a/pkg/super_node/eth/models.go
+++ b/pkg/super_node/eth/models.go
@@ -27,6 +27,13 @@ type HeaderModel struct {
CID string `db:"cid"`
TotalDifficulty string `db:"td"`
NodeID int64 `db:"node_id"`
+ Reward string `db:"reward"`
+ StateRoot string `db:"state_root"`
+ UncleRoot string `db:"uncle_root"`
+ TxRoot string `db:"tx_root"`
+ RctRoot string `db:"receipt_root"`
+ Bloom []byte `db:"bloom"`
+ Timestamp uint64 `db:"timestamp"`
}
// UncleModel is the db model for eth.uncle_cids
@@ -36,6 +43,7 @@ type UncleModel struct {
BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"`
CID string `db:"cid"`
+ Reward string `db:"reward"`
}
// TxModel is the db model for eth.transaction_cids
@@ -65,8 +73,9 @@ type ReceiptModel struct {
type StateNodeModel struct {
ID int64 `db:"id"`
HeaderID int64 `db:"header_id"`
- StateKey string `db:"state_key"`
- Leaf bool `db:"leaf"`
+ Path []byte `db:"state_path"`
+ StateKey string `db:"state_leaf_key"`
+ NodeType int `db:"node_type"`
CID string `db:"cid"`
}
@@ -74,8 +83,9 @@ type StateNodeModel struct {
type StorageNodeModel struct {
ID int64 `db:"id"`
StateID int64 `db:"state_id"`
- StorageKey string `db:"storage_key"`
- Leaf bool `db:"leaf"`
+ Path []byte `db:"storage_path"`
+ StorageKey string `db:"storage_leaf_key"`
+ NodeType int `db:"node_type"`
CID string `db:"cid"`
}
@@ -83,8 +93,19 @@ type StorageNodeModel struct {
type StorageNodeWithStateKeyModel struct {
ID int64 `db:"id"`
StateID int64 `db:"state_id"`
- StateKey string `db:"state_key"`
- StorageKey string `db:"storage_key"`
- Leaf bool `db:"leaf"`
+ Path []byte `db:"storage_path"`
+ StateKey string `db:"state_leaf_key"`
+ StorageKey string `db:"storage_leaf_key"`
+ NodeType int `db:"node_type"`
CID string `db:"cid"`
}
+
+// StateAccountModel is a db model for an eth state account (decoded value of state leaf node)
+type StateAccountModel struct {
+ ID int64 `db:"id"`
+ StateID int64 `db:"state_id"`
+ Balance string `db:"balance"`
+ Nonce uint64 `db:"nonce"`
+ CodeHash []byte `db:"code_hash"`
+ StorageRoot string `db:"storage_root"`
+}
diff --git a/pkg/super_node/eth/publisher.go b/pkg/super_node/eth/publisher.go
index f584893d..0d09c403 100644
--- a/pkg/super_node/eth/publisher.go
+++ b/pkg/super_node/eth/publisher.go
@@ -17,24 +17,32 @@
package eth
import (
- "errors"
"fmt"
- "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/statediff"
+
+ common2 "github.com/vulcanize/vulcanizedb/pkg/eth/converters/common"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/dag_putters"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
// IPLDPublisher satisfies the IPLDPublisher for ethereum
type IPLDPublisher struct {
- HeaderPutter shared.DagPutter
- TransactionPutter shared.DagPutter
- ReceiptPutter shared.DagPutter
- StatePutter shared.DagPutter
- StoragePutter shared.DagPutter
+ HeaderPutter shared.DagPutter
+ TransactionPutter shared.DagPutter
+ TransactionTriePutter shared.DagPutter
+ ReceiptPutter shared.DagPutter
+ ReceiptTriePutter shared.DagPutter
+ StatePutter shared.DagPutter
+ StoragePutter shared.DagPutter
}
// NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface
@@ -44,61 +52,79 @@ func NewIPLDPublisher(ipfsPath string) (*IPLDPublisher, error) {
return nil, err
}
return &IPLDPublisher{
- HeaderPutter: dag_putters.NewEthBlockHeaderDagPutter(node),
- TransactionPutter: dag_putters.NewEthTxsDagPutter(node),
- ReceiptPutter: dag_putters.NewEthReceiptDagPutter(node),
- StatePutter: dag_putters.NewEthStateDagPutter(node),
- StoragePutter: dag_putters.NewEthStorageDagPutter(node),
+ HeaderPutter: dag_putters.NewEthBlockHeaderDagPutter(node),
+ TransactionPutter: dag_putters.NewEthTxsDagPutter(node),
+ TransactionTriePutter: dag_putters.NewEthTxTrieDagPutter(node),
+ ReceiptPutter: dag_putters.NewEthReceiptDagPutter(node),
+ ReceiptTriePutter: dag_putters.NewEthRctTrieDagPutter(node),
+ StatePutter: dag_putters.NewEthStateDagPutter(node),
+ StoragePutter: dag_putters.NewEthStorageDagPutter(node),
}, nil
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
-func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForIndexing, error) {
- ipldPayload, ok := payload.(IPLDPayload)
+func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
+ ipldPayload, ok := payload.(ConvertedPayload)
if !ok {
- return nil, fmt.Errorf("eth publisher expected payload type %T got %T", IPLDPayload{}, payload)
+ return nil, fmt.Errorf("eth publisher expected payload type %T got %T", ConvertedPayload{}, payload)
}
- // Process and publish headers
- headerCid, err := pub.publishHeader(ipldPayload.Block.Header())
+ // Generate the nodes for publishing
+ headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, err := ipld.FromBlockAndReceipts(ipldPayload.Block, ipldPayload.Receipts)
if err != nil {
return nil, err
}
+
+ // Process and publish headers
+ headerCid, err := pub.publishHeader(headerNode)
+ if err != nil {
+ return nil, err
+ }
+ reward := common2.CalcEthBlockReward(ipldPayload.Block.Header(), ipldPayload.Block.Uncles(), ipldPayload.Block.Transactions(), ipldPayload.Receipts)
header := HeaderModel{
CID: headerCid,
ParentHash: ipldPayload.Block.ParentHash().String(),
BlockNumber: ipldPayload.Block.Number().String(),
BlockHash: ipldPayload.Block.Hash().String(),
TotalDifficulty: ipldPayload.TotalDifficulty.String(),
+ Reward: reward.String(),
+ Bloom: ipldPayload.Block.Bloom().Bytes(),
+ StateRoot: ipldPayload.Block.Root().String(),
+ RctRoot: ipldPayload.Block.ReceiptHash().String(),
+ TxRoot: ipldPayload.Block.TxHash().String(),
+ UncleRoot: ipldPayload.Block.UncleHash().String(),
+ Timestamp: ipldPayload.Block.Time(),
}
// Process and publish uncles
- uncleCids := make([]UncleModel, 0, len(ipldPayload.Block.Uncles()))
- for _, uncle := range ipldPayload.Block.Uncles() {
+ uncleCids := make([]UncleModel, len(uncleNodes))
+ for i, uncle := range uncleNodes {
uncleCid, err := pub.publishHeader(uncle)
if err != nil {
return nil, err
}
- uncleCids = append(uncleCids, UncleModel{
+ uncleReward := common2.CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncle.Number.Int64())
+ uncleCids[i] = UncleModel{
CID: uncleCid,
ParentHash: uncle.ParentHash.String(),
BlockHash: uncle.Hash().String(),
- })
+ Reward: uncleReward.String(),
+ }
}
// Process and publish transactions
- transactionCids, err := pub.publishTransactions(ipldPayload.Block.Body().Transactions, ipldPayload.TxMetaData)
+ transactionCids, err := pub.publishTransactions(txNodes, txTrieNodes, ipldPayload.TxMetaData)
if err != nil {
return nil, err
}
// Process and publish receipts
- receiptsCids, err := pub.publishReceipts(ipldPayload.Receipts, ipldPayload.ReceiptMetaData)
+ receiptsCids, err := pub.publishReceipts(rctNodes, rctTrieNodes, ipldPayload.ReceiptMetaData)
if err != nil {
return nil, err
}
// Process and publish state leafs
- stateNodeCids, err := pub.publishStateNodes(ipldPayload.StateNodes)
+ stateNodeCids, stateAccounts, err := pub.publishStateNodes(ipldPayload.StateNodes)
if err != nil {
return nil, err
}
@@ -117,28 +143,27 @@ func (pub *IPLDPublisher) Publish(payload shared.StreamedIPLDs) (shared.CIDsForI
ReceiptCIDs: receiptsCids,
StateNodeCIDs: stateNodeCids,
StorageNodeCIDs: storageNodeCids,
+ StateAccounts: stateAccounts,
}, nil
}
-func (pub *IPLDPublisher) publishHeader(header *types.Header) (string, error) {
- cids, err := pub.HeaderPutter.DagPut(header)
- if err != nil {
- return "", err
- }
- return cids[0], nil
+func (pub *IPLDPublisher) generateBlockNodes(body *types.Block, receipts types.Receipts) (*ipld.EthHeader,
+ []*ipld.EthHeader, []*ipld.EthTx, []*ipld.EthTxTrie, []*ipld.EthReceipt, []*ipld.EthRctTrie, error) {
+ return ipld.FromBlockAndReceipts(body, receipts)
}
-func (pub *IPLDPublisher) publishTransactions(transactions types.Transactions, trxMeta []TxModel) ([]TxModel, error) {
- transactionCids, err := pub.TransactionPutter.DagPut(transactions)
- if err != nil {
- return nil, err
- }
- if len(transactionCids) != len(trxMeta) {
- return nil, errors.New("expected one CID for each transaction")
- }
- mappedTrxCids := make([]TxModel, len(transactionCids))
- for i, cid := range transactionCids {
- mappedTrxCids[i] = TxModel{
+func (pub *IPLDPublisher) publishHeader(header *ipld.EthHeader) (string, error) {
+ return pub.HeaderPutter.DagPut(header)
+}
+
+func (pub *IPLDPublisher) publishTransactions(transactions []*ipld.EthTx, txTrie []*ipld.EthTxTrie, trxMeta []TxModel) ([]TxModel, error) {
+ trxCids := make([]TxModel, len(transactions))
+ for i, tx := range transactions {
+ cid, err := pub.TransactionPutter.DagPut(tx)
+ if err != nil {
+ return nil, err
+ }
+ trxCids[i] = TxModel{
CID: cid,
Index: trxMeta[i].Index,
TxHash: trxMeta[i].TxHash,
@@ -146,22 +171,24 @@ func (pub *IPLDPublisher) publishTransactions(transactions types.Transactions, t
Dst: trxMeta[i].Dst,
}
}
- return mappedTrxCids, nil
+ for _, txNode := range txTrie {
+ // We don't do anything with the tx trie cids atm
+ if _, err := pub.TransactionTriePutter.DagPut(txNode); err != nil {
+ return nil, err
+ }
+ }
+ return trxCids, nil
}
-func (pub *IPLDPublisher) publishReceipts(receipts types.Receipts, receiptMeta []ReceiptModel) (map[common.Hash]ReceiptModel, error) {
- receiptsCids, err := pub.ReceiptPutter.DagPut(receipts)
- if err != nil {
- return nil, err
- }
- if len(receiptsCids) != len(receipts) {
- return nil, errors.New("expected one CID for each receipt")
- }
- // Map receipt cids to their transaction hashes
- mappedRctCids := make(map[common.Hash]ReceiptModel, len(receiptsCids))
+func (pub *IPLDPublisher) publishReceipts(receipts []*ipld.EthReceipt, receiptTrie []*ipld.EthRctTrie, receiptMeta []ReceiptModel) (map[common.Hash]ReceiptModel, error) {
+ rctCids := make(map[common.Hash]ReceiptModel)
for i, rct := range receipts {
- mappedRctCids[rct.TxHash] = ReceiptModel{
- CID: receiptsCids[i],
+ cid, err := pub.ReceiptPutter.DagPut(rct)
+ if err != nil {
+ return nil, err
+ }
+ rctCids[rct.TxHash] = ReceiptModel{
+ CID: cid,
Contract: receiptMeta[i].Contract,
Topic0s: receiptMeta[i].Topic0s,
Topic1s: receiptMeta[i].Topic1s,
@@ -169,39 +196,78 @@ func (pub *IPLDPublisher) publishReceipts(receipts types.Receipts, receiptMeta [
Topic3s: receiptMeta[i].Topic3s,
}
}
- return mappedRctCids, nil
-}
-
-func (pub *IPLDPublisher) publishStateNodes(stateNodes []TrieNode) ([]StateNodeModel, error) {
- stateNodeCids := make([]StateNodeModel, 0, len(stateNodes))
- for _, node := range stateNodes {
- cids, err := pub.StatePutter.DagPut(node.Value)
- if err != nil {
+ for _, rctNode := range receiptTrie {
+ // We don't do anything with the rct trie cids atm
+ if _, err := pub.ReceiptTriePutter.DagPut(rctNode); err != nil {
return nil, err
}
- stateNodeCids = append(stateNodeCids, StateNodeModel{
- StateKey: node.Key.String(),
- CID: cids[0],
- Leaf: node.Leaf,
- })
}
- return stateNodeCids, nil
+ return rctCids, nil
+}
+
+func (pub *IPLDPublisher) publishStateNodes(stateNodes []TrieNode) ([]StateNodeModel, map[common.Hash]StateAccountModel, error) {
+ stateNodeCids := make([]StateNodeModel, 0, len(stateNodes))
+ stateAccounts := make(map[common.Hash]StateAccountModel)
+ for _, stateNode := range stateNodes {
+ node, err := ipld.FromStateTrieRLP(stateNode.Value)
+ if err != nil {
+ return nil, nil, err
+ }
+ cid, err := pub.StatePutter.DagPut(node)
+ if err != nil {
+ return nil, nil, err
+ }
+ stateNodeCids = append(stateNodeCids, StateNodeModel{
+ Path: stateNode.Path,
+ StateKey: stateNode.LeafKey.String(),
+ CID: cid,
+ NodeType: ResolveFromNodeType(stateNode.Type),
+ })
+ // If we have a leaf, decode the account to extract additional metadata for indexing
+ if stateNode.Type == statediff.Leaf {
+ var i []interface{}
+ if err := rlp.DecodeBytes(stateNode.Value, &i); err != nil {
+ return nil, nil, err
+ }
+ if len(i) != 2 {
+ return nil, nil, fmt.Errorf("IPLDPublisher expected state leaf node rlp to decode into two elements")
+ }
+ var account state.Account
+ if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
+ return nil, nil, err
+ }
+ // Map state account to the state path hash
+ statePathHash := crypto.Keccak256Hash(stateNode.Path)
+ stateAccounts[statePathHash] = StateAccountModel{
+ Balance: account.Balance.String(),
+ Nonce: account.Nonce,
+ CodeHash: account.CodeHash,
+ StorageRoot: account.Root.String(),
+ }
+ }
+ }
+ return stateNodeCids, stateAccounts, nil
}
func (pub *IPLDPublisher) publishStorageNodes(storageNodes map[common.Hash][]TrieNode) (map[common.Hash][]StorageNodeModel, error) {
storageLeafCids := make(map[common.Hash][]StorageNodeModel)
- for addrKey, storageTrie := range storageNodes {
- storageLeafCids[addrKey] = make([]StorageNodeModel, 0, len(storageTrie))
- for _, node := range storageTrie {
- cids, err := pub.StoragePutter.DagPut(node.Value)
+ for pathHash, storageTrie := range storageNodes {
+ storageLeafCids[pathHash] = make([]StorageNodeModel, 0, len(storageTrie))
+ for _, storageNode := range storageTrie {
+ node, err := ipld.FromStorageTrieRLP(storageNode.Value)
if err != nil {
return nil, err
}
- // Map storage node cids to their state key hashes
- storageLeafCids[addrKey] = append(storageLeafCids[addrKey], StorageNodeModel{
- StorageKey: node.Key.Hex(),
- CID: cids[0],
- Leaf: node.Leaf,
+ cid, err := pub.StoragePutter.DagPut(node)
+ if err != nil {
+ return nil, err
+ }
+ // Map storage node cids to the state path hash
+ storageLeafCids[pathHash] = append(storageLeafCids[pathHash], StorageNodeModel{
+ Path: storageNode.Path,
+ StorageKey: storageNode.LeafKey.Hex(),
+ CID: cid,
+ NodeType: ResolveFromNodeType(storageNode.Type),
})
}
}
diff --git a/pkg/super_node/eth/publisher_test.go b/pkg/super_node/eth/publisher_test.go
index e7292f49..62c517de 100644
--- a/pkg/super_node/eth/publisher_test.go
+++ b/pkg/super_node/eth/publisher_test.go
@@ -27,48 +27,63 @@ import (
)
var (
- mockHeaderDagPutter *mocks2.DagPutter
- mockTrxDagPutter *mocks2.DagPutter
- mockRctDagPutter *mocks2.DagPutter
+ mockHeaderDagPutter *mocks2.MappedDagPutter
+ mockTrxDagPutter *mocks2.MappedDagPutter
+ mockTrxTrieDagPutter *mocks2.DagPutter
+ mockRctDagPutter *mocks2.MappedDagPutter
+ mockRctTrieDagPutter *mocks2.DagPutter
mockStateDagPutter *mocks2.MappedDagPutter
- mockStorageDagPutter *mocks2.DagPutter
+ mockStorageDagPutter *mocks2.MappedDagPutter
)
var _ = Describe("Publisher", func() {
BeforeEach(func() {
- mockHeaderDagPutter = new(mocks2.DagPutter)
- mockTrxDagPutter = new(mocks2.DagPutter)
- mockRctDagPutter = new(mocks2.DagPutter)
+ mockHeaderDagPutter = new(mocks2.MappedDagPutter)
+ mockTrxDagPutter = new(mocks2.MappedDagPutter)
+ mockTrxTrieDagPutter = new(mocks2.DagPutter)
+ mockRctDagPutter = new(mocks2.MappedDagPutter)
+ mockRctTrieDagPutter = new(mocks2.DagPutter)
mockStateDagPutter = new(mocks2.MappedDagPutter)
- mockStorageDagPutter = new(mocks2.DagPutter)
+ mockStorageDagPutter = new(mocks2.MappedDagPutter)
})
Describe("Publish", func() {
It("Publishes the passed IPLDPayload objects to IPFS and returns a CIDPayload for indexing", func() {
- mockHeaderDagPutter.CIDsToReturn = []string{"mockHeaderCID"}
- mockTrxDagPutter.CIDsToReturn = []string{"mockTrxCID1", "mockTrxCID2"}
- mockRctDagPutter.CIDsToReturn = []string{"mockRctCID1", "mockRctCID2"}
- val1 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[0].Value)
- val2 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[1].Value)
- mockStateDagPutter.CIDsToReturn = map[common.Hash][]string{
- val1: {"mockStateCID1"},
- val2: {"mockStateCID2"},
+ mockHeaderDagPutter.CIDsToReturn = map[common.Hash]string{
+ common.BytesToHash(mocks.HeaderIPLD.RawData()): mocks.HeaderCID.String(),
+ }
+ mockTrxDagPutter.CIDsToReturn = map[common.Hash]string{
+ common.BytesToHash(mocks.Trx1IPLD.RawData()): mocks.Trx1CID.String(),
+ common.BytesToHash(mocks.Trx2IPLD.RawData()): mocks.Trx2CID.String(),
+ }
+ mockRctDagPutter.CIDsToReturn = map[common.Hash]string{
+ common.BytesToHash(mocks.Rct1IPLD.RawData()): mocks.Rct1CID.String(),
+ common.BytesToHash(mocks.Rct2IPLD.RawData()): mocks.Rct2CID.String(),
+ }
+ mockStateDagPutter.CIDsToReturn = map[common.Hash]string{
+ common.BytesToHash(mocks.State1IPLD.RawData()): mocks.State1CID.String(),
+ common.BytesToHash(mocks.State2IPLD.RawData()): mocks.State2CID.String(),
+ }
+ mockStorageDagPutter.CIDsToReturn = map[common.Hash]string{
+ common.BytesToHash(mocks.StorageIPLD.RawData()): mocks.StorageCID.String(),
}
- mockStorageDagPutter.CIDsToReturn = []string{"mockStorageCID"}
publisher := eth.IPLDPublisher{
- HeaderPutter: mockHeaderDagPutter,
- TransactionPutter: mockTrxDagPutter,
- ReceiptPutter: mockRctDagPutter,
- StatePutter: mockStateDagPutter,
- StoragePutter: mockStorageDagPutter,
+ HeaderPutter: mockHeaderDagPutter,
+ TransactionPutter: mockTrxDagPutter,
+ TransactionTriePutter: mockTrxTrieDagPutter,
+ ReceiptPutter: mockRctDagPutter,
+ ReceiptTriePutter: mockRctTrieDagPutter,
+ StatePutter: mockStateDagPutter,
+ StoragePutter: mockStorageDagPutter,
}
- payload, err := publisher.Publish(mocks.MockIPLDPayload)
+ payload, err := publisher.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
cidPayload, ok := payload.(*eth.CIDPayload)
Expect(ok).To(BeTrue())
- Expect(cidPayload.HeaderCID.TotalDifficulty).To(Equal(mocks.MockIPLDPayload.TotalDifficulty.String()))
+ Expect(cidPayload.HeaderCID.TotalDifficulty).To(Equal(mocks.MockConvertedPayload.TotalDifficulty.String()))
Expect(cidPayload.HeaderCID.BlockNumber).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockNumber))
Expect(cidPayload.HeaderCID.BlockHash).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockHash))
+ Expect(cidPayload.HeaderCID.Reward).To(Equal(mocks.MockCIDPayload.HeaderCID.Reward))
Expect(cidPayload.UncleCIDs).To(Equal(mocks.MockCIDPayload.UncleCIDs))
Expect(cidPayload.HeaderCID).To(Equal(mocks.MockCIDPayload.HeaderCID))
Expect(len(cidPayload.TransactionCIDs)).To(Equal(2))
diff --git a/pkg/super_node/eth/resolver.go b/pkg/super_node/eth/resolver.go
deleted file mode 100644
index acc038ff..00000000
--- a/pkg/super_node/eth/resolver.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package eth
-
-import (
- "fmt"
-
- "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ipfs/go-block-format"
-)
-
-// IPLDResolver satisfies the IPLDResolver interface for ethereum
-type IPLDResolver struct{}
-
-// NewIPLDResolver returns a pointer to an IPLDResolver which satisfies the IPLDResolver interface
-func NewIPLDResolver() *IPLDResolver {
- return &IPLDResolver{}
-}
-
-// Resolve is the exported method for resolving all of the ETH IPLDs packaged in an IpfsBlockWrapper
-func (eir *IPLDResolver) Resolve(iplds shared.FetchedIPLDs) (shared.ServerResponse, error) {
- ipfsBlocks, ok := iplds.(*IPLDWrapper)
- if !ok {
- return StreamResponse{}, fmt.Errorf("eth resolver expected iplds type %T got %T", &IPLDWrapper{}, iplds)
- }
- return StreamResponse{
- BlockNumber: ipfsBlocks.BlockNumber,
- HeadersRlp: eir.resolve(ipfsBlocks.Headers),
- UnclesRlp: eir.resolve(ipfsBlocks.Uncles),
- TransactionsRlp: eir.resolve(ipfsBlocks.Transactions),
- ReceiptsRlp: eir.resolve(ipfsBlocks.Receipts),
- StateNodesRlp: eir.resolveState(ipfsBlocks.StateNodes),
- StorageNodesRlp: eir.resolveStorage(ipfsBlocks.StorageNodes),
- }, nil
-}
-
-func (eir *IPLDResolver) resolve(iplds []blocks.Block) [][]byte {
- rlps := make([][]byte, 0, len(iplds))
- for _, ipld := range iplds {
- rlps = append(rlps, ipld.RawData())
- }
- return rlps
-}
-
-func (eir *IPLDResolver) resolveState(iplds map[common.Hash]blocks.Block) map[common.Hash][]byte {
- stateNodes := make(map[common.Hash][]byte, len(iplds))
- for key, ipld := range iplds {
- stateNodes[key] = ipld.RawData()
- }
- return stateNodes
-}
-
-func (eir *IPLDResolver) resolveStorage(iplds map[common.Hash]map[common.Hash]blocks.Block) map[common.Hash]map[common.Hash][]byte {
- storageNodes := make(map[common.Hash]map[common.Hash][]byte)
- for stateKey, storageIPLDs := range iplds {
- storageNodes[stateKey] = make(map[common.Hash][]byte)
- for storageKey, storageVal := range storageIPLDs {
- storageNodes[stateKey][storageKey] = storageVal.RawData()
- }
- }
- return storageNodes
-}
diff --git a/pkg/super_node/eth/resolver_test.go b/pkg/super_node/eth/resolver_test.go
deleted file mode 100644
index 66115aca..00000000
--- a/pkg/super_node/eth/resolver_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package eth_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
- "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
-
- "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
- "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
-)
-
-var (
- resolver *eth.IPLDResolver
-)
-
-var _ = Describe("Resolver", func() {
- Describe("ResolveIPLDs", func() {
- BeforeEach(func() {
- resolver = eth.NewIPLDResolver()
- })
- It("Resolves IPLD data to their correct geth data types and packages them to send to requesting transformers", func() {
- payload, err := resolver.Resolve(mocks.MockIPLDWrapper)
- Expect(err).ToNot(HaveOccurred())
- superNodePayload, ok := payload.(eth.StreamResponse)
- Expect(ok).To(BeTrue())
- Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
- Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeedNodePayload.HeadersRlp))
- Expect(superNodePayload.UnclesRlp).To(Equal(mocks.MockSeedNodePayload.UnclesRlp))
- Expect(len(superNodePayload.TransactionsRlp)).To(Equal(2))
- Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
- Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
- Expect(len(superNodePayload.ReceiptsRlp)).To(Equal(2))
- Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(0))).To(BeTrue())
- Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(1))).To(BeTrue())
- Expect(len(superNodePayload.StateNodesRlp)).To(Equal(2))
- Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeedNodePayload.StorageNodesRlp))
- })
- })
-})
diff --git a/pkg/super_node/eth/retriever.go b/pkg/super_node/eth/retriever.go
index d58baf0d..125b0769 100644
--- a/pkg/super_node/eth/retriever.go
+++ b/pkg/super_node/eth/retriever.go
@@ -57,7 +57,7 @@ func (ecr *CIDRetriever) RetrieveLastBlockNumber() (int64, error) {
}
// Retrieve is used to retrieve all of the CIDs which conform to the passed StreamFilters
-func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumber int64) (shared.CIDsForFetching, bool, error) {
+func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumber int64) ([]shared.CIDsForFetching, bool, error) {
streamFilter, ok := filter.(*SubscriptionSettings)
if !ok {
return nil, true, fmt.Errorf("eth retriever expected filter type %T got %T", &SubscriptionSettings{}, filter)
@@ -68,21 +68,26 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
return nil, true, err
}
- cw := new(CIDWrapper)
- cw.BlockNumber = big.NewInt(blockNumber)
- // Retrieve cached header CIDs
- if !streamFilter.HeaderFilter.Off {
- cw.Headers, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
- if err != nil {
- if err := tx.Rollback(); err != nil {
- log.Error(err)
- }
- log.Error("header cid retrieval error")
- return nil, true, err
+ // Retrieve cached header CIDs at this block height
+ headers, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
+ if err != nil {
+ if err := tx.Rollback(); err != nil {
+ log.Error(err)
}
- if streamFilter.HeaderFilter.Uncles {
- for _, headerCID := range cw.Headers {
- uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.ID)
+ log.Error("header cid retrieval error")
+ return nil, true, err
+ }
+ cws := make([]shared.CIDsForFetching, len(headers))
+ empty := true
+ for i, header := range headers {
+ cw := new(CIDWrapper)
+ cw.BlockNumber = big.NewInt(blockNumber)
+ if !streamFilter.HeaderFilter.Off {
+ cw.Header = header
+ empty = false
+ if streamFilter.HeaderFilter.Uncles {
+ // Retrieve uncle cids for this header id
+ uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, header.ID)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
@@ -90,66 +95,73 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
log.Error("uncle cid retrieval error")
return nil, true, err
}
- cw.Uncles = append(cw.Uncles, uncleCIDs...)
+ cw.Uncles = uncleCIDs
}
}
- }
- // Retrieve cached trx CIDs
- if !streamFilter.TxFilter.Off {
- cw.Transactions, err = ecr.RetrieveTxCIDs(tx, streamFilter.TxFilter, blockNumber)
- if err != nil {
- if err := tx.Rollback(); err != nil {
- log.Error(err)
+ // Retrieve cached trx CIDs
+ if !streamFilter.TxFilter.Off {
+ cw.Transactions, err = ecr.RetrieveTxCIDs(tx, streamFilter.TxFilter, header.ID)
+ if err != nil {
+ if err := tx.Rollback(); err != nil {
+ log.Error(err)
+ }
+ log.Error("transaction cid retrieval error")
+ return nil, true, err
}
- log.Error("transaction cid retrieval error")
- return nil, true, err
- }
- }
- trxIds := make([]int64, 0, len(cw.Transactions))
- for _, tx := range cw.Transactions {
- trxIds = append(trxIds, tx.ID)
- }
- // Retrieve cached receipt CIDs
- if !streamFilter.ReceiptFilter.Off {
- cw.Receipts, err = ecr.RetrieveRctCIDs(tx, streamFilter.ReceiptFilter, blockNumber, nil, trxIds)
- if err != nil {
- if err := tx.Rollback(); err != nil {
- log.Error(err)
+ if len(cw.Transactions) > 0 {
+ empty = false
}
- log.Error("receipt cid retrieval error")
- return nil, true, err
}
- }
- // Retrieve cached state CIDs
- if !streamFilter.StateFilter.Off {
- cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, streamFilter.StateFilter, blockNumber)
- if err != nil {
- if err := tx.Rollback(); err != nil {
- log.Error(err)
+ trxIds := make([]int64, len(cw.Transactions))
+ for j, tx := range cw.Transactions {
+ trxIds[j] = tx.ID
+ }
+ // Retrieve cached receipt CIDs
+ if !streamFilter.ReceiptFilter.Off {
+ cw.Receipts, err = ecr.RetrieveRctCIDsByHeaderID(tx, streamFilter.ReceiptFilter, header.ID, trxIds)
+ if err != nil {
+ if err := tx.Rollback(); err != nil {
+ log.Error(err)
+ }
+ log.Error("receipt cid retrieval error")
+ return nil, true, err
}
- log.Error("state cid retrieval error")
- return nil, true, err
- }
- }
- // Retrieve cached storage CIDs
- if !streamFilter.StorageFilter.Off {
- cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, streamFilter.StorageFilter, blockNumber)
- if err != nil {
- if err := tx.Rollback(); err != nil {
- log.Error(err)
+ if len(cw.Receipts) > 0 {
+ empty = false
}
- log.Error("storage cid retrieval error")
- return nil, true, err
}
+ // Retrieve cached state CIDs
+ if !streamFilter.StateFilter.Off {
+ cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, streamFilter.StateFilter, header.ID)
+ if err != nil {
+ if err := tx.Rollback(); err != nil {
+ log.Error(err)
+ }
+ log.Error("state cid retrieval error")
+ return nil, true, err
+ }
+ if len(cw.StateNodes) > 0 {
+ empty = false
+ }
+ }
+ // Retrieve cached storage CIDs
+ if !streamFilter.StorageFilter.Off {
+ cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, streamFilter.StorageFilter, header.ID)
+ if err != nil {
+ if err := tx.Rollback(); err != nil {
+ log.Error(err)
+ }
+ log.Error("storage cid retrieval error")
+ return nil, true, err
+ }
+ if len(cw.StorageNodes) > 0 {
+ empty = false
+ }
+ }
+ cws[i] = cw
}
- return cw, empty(cw), tx.Commit()
-}
-func empty(cidWrapper *CIDWrapper) bool {
- if len(cidWrapper.Transactions) > 0 || len(cidWrapper.Headers) > 0 || len(cidWrapper.Uncles) > 0 || len(cidWrapper.Receipts) > 0 || len(cidWrapper.StateNodes) > 0 || len(cidWrapper.StorageNodes) > 0 {
- return false
- }
- return true
+ return cws, empty, tx.Commit()
}
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
@@ -172,8 +184,8 @@ func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID int64
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
// also returns the ids for the returned transaction cids
-func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, blockNumber int64) ([]TxModel, error) {
- log.Debug("retrieving transaction cids for block ", blockNumber)
+func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID int64) ([]TxModel, error) {
+ log.Debug("retrieving transaction cids for header id ", headerID)
args := make([]interface{}, 0, 3)
results := make([]TxModel, 0)
id := 1
@@ -181,8 +193,8 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, blockNum
transaction_cids.tx_hash, transaction_cids.cid,
transaction_cids.dst, transaction_cids.src, transaction_cids.index
FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
- WHERE header_cids.block_number = $%d`, id)
- args = append(args, blockNumber)
+ WHERE header_cids.id = $%d`, id)
+ args = append(args, headerID)
id++
if len(txFilter.Dst) > 0 {
pgStr += fmt.Sprintf(` AND transaction_cids.dst = ANY($%d::VARCHAR(66)[])`, id)
@@ -193,38 +205,31 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, blockNum
pgStr += fmt.Sprintf(` AND transaction_cids.src = ANY($%d::VARCHAR(66)[])`, id)
args = append(args, pq.Array(txFilter.Src))
}
+ pgStr += ` ORDER BY transaction_cids.index`
return results, tx.Select(&results, pgStr, args...)
}
-// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight that conform to the provided
+// RetrieveRctCIDsByHeaderID retrieves and returns all of the rct cids at the provided header ID that conform to the provided
// filter parameters and correspond to the provided tx ids
-func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, trxIds []int64) ([]ReceiptModel, error) {
- log.Debug("retrieving receipt cids for block ", blockNumber)
- id := 1
+func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID int64, trxIds []int64) ([]ReceiptModel, error) {
+ log.Debug("retrieving receipt cids for header id ", headerID)
args := make([]interface{}, 0, 4)
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid,
receipt_cids.contract, receipt_cids.topic0s, receipt_cids.topic1s,
receipt_cids.topic2s, receipt_cids.topic3s
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.id
- AND transaction_cids.header_id = header_cids.id`
- if blockNumber > 0 {
- pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
- args = append(args, blockNumber)
- id++
- }
- if blockHash != nil {
- pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
- args = append(args, blockHash.String())
- id++
- }
+ AND transaction_cids.header_id = header_cids.id
+ AND header_cids.id = $1`
+ id := 2
+ args = append(args, headerID)
if len(rctFilter.Contracts) > 0 {
// Filter on contract addresses if there are any
pgStr += fmt.Sprintf(` AND ((receipt_cids.contract = ANY($%d::VARCHAR(66)[])`, id)
args = append(args, pq.Array(rctFilter.Contracts))
id++
// Filter on topics if there are any
- if len(rctFilter.Topics) > 0 {
+ if hasTopics(rctFilter.Topics) {
pgStr += " AND ("
first := true
for i, topicSet := range rctFilter.Topics {
@@ -250,7 +255,7 @@ func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, b
pgStr += ")"
} else { // If there are no contract addresses to filter on
// Filter on topics if there are any
- if len(rctFilter.Topics) > 0 {
+ if hasTopics(rctFilter.Topics) {
pgStr += " AND (("
first := true
for i, topicSet := range rctFilter.Topics {
@@ -279,63 +284,161 @@ func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, b
args = append(args, pq.Array(trxIds))
}
}
+ pgStr += ` ORDER BY transaction_cids.index`
receiptCids := make([]ReceiptModel, 0)
return receiptCids, tx.Select(&receiptCids, pgStr, args...)
}
-// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided blockheight that conform to the provided filter parameters
-func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, blockNumber int64) ([]StateNodeModel, error) {
- log.Debug("retrieving state cids for block ", blockNumber)
+// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
+// filter parameters and correspond to the provided tx ids
+func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, trxIds []int64) ([]ReceiptModel, error) {
+ log.Debug("retrieving receipt cids for block ", blockNumber)
+ args := make([]interface{}, 0, 5)
+ pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid,
+ receipt_cids.contract, receipt_cids.topic0s, receipt_cids.topic1s,
+ receipt_cids.topic2s, receipt_cids.topic3s
+ FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
+ WHERE receipt_cids.tx_id = transaction_cids.id
+ AND transaction_cids.header_id = header_cids.id`
+ id := 1
+ if blockNumber > 0 {
+ pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
+ args = append(args, blockNumber)
+ id++
+ }
+ if blockHash != nil {
+ pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
+ args = append(args, blockHash.String())
+ id++
+ }
+ if len(rctFilter.Contracts) > 0 {
+ // Filter on contract addresses if there are any
+ pgStr += fmt.Sprintf(` AND ((receipt_cids.contract = ANY($%d::VARCHAR(66)[])`, id)
+ args = append(args, pq.Array(rctFilter.Contracts))
+ id++
+ // Filter on topics if there are any
+ if hasTopics(rctFilter.Topics) {
+ pgStr += " AND ("
+ first := true
+ for i, topicSet := range rctFilter.Topics {
+ if i < 4 && len(topicSet) > 0 {
+ if first {
+ pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
+ first = false
+ } else {
+ pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
+ }
+ args = append(args, pq.Array(topicSet))
+ id++
+ }
+ }
+ pgStr += ")"
+ }
+ pgStr += ")"
+ // Filter on txIDs if there are any and we are matching txs
+ if rctFilter.MatchTxs && len(trxIds) > 0 {
+ pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
+ args = append(args, pq.Array(trxIds))
+ }
+ pgStr += ")"
+ } else { // If there are no contract addresses to filter on
+ // Filter on topics if there are any
+ if hasTopics(rctFilter.Topics) {
+ pgStr += " AND (("
+ first := true
+ for i, topicSet := range rctFilter.Topics {
+ if i < 4 && len(topicSet) > 0 {
+ if first {
+ pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
+ first = false
+ } else {
+ pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
+ }
+ args = append(args, pq.Array(topicSet))
+ id++
+ }
+ }
+ pgStr += ")"
+ // Filter on txIDs if there are any and we are matching txs
+ if rctFilter.MatchTxs && len(trxIds) > 0 {
+ pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
+ args = append(args, pq.Array(trxIds))
+ }
+ pgStr += ")"
+ } else if rctFilter.MatchTxs && len(trxIds) > 0 {
+ // If there are no contract addresses or topics to filter on,
+ // Filter on txIDs if there are any and we are matching txs
+ pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
+ args = append(args, pq.Array(trxIds))
+ }
+ }
+ pgStr += ` ORDER BY transaction_cids.index`
+ receiptCids := make([]ReceiptModel, 0)
+ return receiptCids, tx.Select(&receiptCids, pgStr, args...)
+}
+
+func hasTopics(topics [][]string) bool {
+ for _, topicSet := range topics {
+ if len(topicSet) > 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided header ID that conform to the provided filter parameters
+func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID int64) ([]StateNodeModel, error) {
+ log.Debug("retrieving state cids for header id ", headerID)
args := make([]interface{}, 0, 2)
pgStr := `SELECT state_cids.id, state_cids.header_id,
- state_cids.state_key, state_cids.leaf, state_cids.cid
+ state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.state_path
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
- WHERE header_cids.block_number = $1`
- args = append(args, blockNumber)
+ WHERE header_cids.id = $1`
+ args = append(args, headerID)
addrLen := len(stateFilter.Addresses)
if addrLen > 0 {
keys := make([]string, addrLen)
for i, addr := range stateFilter.Addresses {
keys[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes()).String()
}
- pgStr += ` AND state_cids.state_key = ANY($2::VARCHAR(66)[])`
+ pgStr += ` AND state_cids.state_leaf_key = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(keys))
}
if !stateFilter.IntermediateNodes {
- pgStr += ` AND state_cids.leaf = TRUE`
+ pgStr += ` AND state_cids.node_type = 2`
}
stateNodeCIDs := make([]StateNodeModel, 0)
return stateNodeCIDs, tx.Select(&stateNodeCIDs, pgStr, args...)
}
-// RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided blockheight that conform to the provided filter parameters
-func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, blockNumber int64) ([]StorageNodeWithStateKeyModel, error) {
- log.Debug("retrieving storage cids for block ", blockNumber)
+// RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided header id that conform to the provided filter parameters
+func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID int64) ([]StorageNodeWithStateKeyModel, error) {
+ log.Debug("retrieving storage cids for header id ", headerID)
args := make([]interface{}, 0, 3)
- id := 1
- pgStr := fmt.Sprintf(`SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_key,
- storage_cids.leaf, storage_cids.cid, state_cids.state_key FROM eth.storage_cids, eth.state_cids, eth.header_cids
+ pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_leaf_key,
+ storage_cids.node_type, storage_cids.cid, storage_cids.storage_path, state_cids.state_leaf_key
+ FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.id
- AND header_cids.block_number = $%d`, id)
- args = append(args, blockNumber)
- id++
+ AND header_cids.id = $1`
+ args = append(args, headerID)
+ id := 2
addrLen := len(storageFilter.Addresses)
if addrLen > 0 {
keys := make([]string, addrLen)
for i, addr := range storageFilter.Addresses {
keys[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes()).String()
}
- pgStr += fmt.Sprintf(` AND state_cids.state_key = ANY($%d::VARCHAR(66)[])`, id)
+ pgStr += fmt.Sprintf(` AND state_cids.state_leaf_key = ANY($%d::VARCHAR(66)[])`, id)
args = append(args, pq.Array(keys))
id++
}
if len(storageFilter.StorageKeys) > 0 {
- pgStr += fmt.Sprintf(` AND storage_cids.storage_key = ANY($%d::VARCHAR(66)[])`, id)
+ pgStr += fmt.Sprintf(` AND storage_cids.storage_leaf_key = ANY($%d::VARCHAR(66)[])`, id)
args = append(args, pq.Array(storageFilter.StorageKeys))
}
if !storageFilter.IntermediateNodes {
- pgStr += ` AND storage_cids.leaf = TRUE`
+ pgStr += ` AND storage_cids.node_type = 2`
}
storageNodeCIDs := make([]StorageNodeWithStateKeyModel, 0)
return storageNodeCIDs, tx.Select(&storageNodeCIDs, pgStr, args...)
@@ -474,7 +577,8 @@ func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.H
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]TxModel, error) {
log.Debug("retrieving tx cids for block id ", headerID)
pgStr := `SELECT * FROM eth.transaction_cids
- WHERE header_id = $1`
+ WHERE header_id = $1
+ ORDER BY index`
var txCIDs []TxModel
return txCIDs, tx.Select(&txCIDs, pgStr, headerID)
}
@@ -482,8 +586,13 @@ func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) (
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]ReceiptModel, error) {
log.Debugf("retrieving receipt cids for tx ids %v", txIDs)
- pgStr := `SELECT * FROM eth.receipt_cids
- WHERE tx_id = ANY($1::INTEGER[])`
+ pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid,
+ receipt_cids.contract, receipt_cids.topic0s, receipt_cids.topic1s,
+ receipt_cids.topic2s, receipt_cids.topic3s
+ FROM eth.receipt_cids, eth.transaction_cids
+ WHERE tx_id = ANY($1::INTEGER[])
+ AND receipt_cids.tx_id = transaction_cids.id
+ ORDER BY transaction_cids.index`
var rctCIDs []ReceiptModel
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txIDs))
}
diff --git a/pkg/super_node/eth/retriever_test.go b/pkg/super_node/eth/retriever_test.go
index b010208f..d2de0e46 100644
--- a/pkg/super_node/eth/retriever_test.go
+++ b/pkg/super_node/eth/retriever_test.go
@@ -19,6 +19,8 @@ package eth_test
import (
"math/big"
+ "github.com/ethereum/go-ethereum/common"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -196,7 +198,7 @@ var (
Off: true,
},
StateFilter: eth.StateFilter{
- Addresses: []string{mocks.Address.Hex()},
+ Addresses: []string{mocks.AccountAddresss.Hex()},
},
StorageFilter: eth.StorageFilter{
Off: true,
@@ -230,14 +232,14 @@ var _ = Describe("Retriever", func() {
cids, empty, err := retriever.Retrieve(openFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
- cidWrapper, ok := cids.(*eth.CIDWrapper)
+ Expect(len(cids)).To(Equal(1))
+ cidWrapper, ok := cids[0].(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
- Expect(len(cidWrapper.Headers)).To(Equal(1))
- expectedHeaderCIDs := mocks.MockCIDWrapper.Headers
- expectedHeaderCIDs[0].ID = cidWrapper.Headers[0].ID
- expectedHeaderCIDs[0].NodeID = cidWrapper.Headers[0].NodeID
- Expect(cidWrapper.Headers).To(Equal(expectedHeaderCIDs))
+ expectedHeaderCID := mocks.MockCIDWrapper.Header
+ expectedHeaderCID.ID = cidWrapper.Header.ID
+ expectedHeaderCID.NodeID = cidWrapper.Header.NodeID
+ Expect(cidWrapper.Header).To(Equal(expectedHeaderCID))
Expect(len(cidWrapper.Transactions)).To(Equal(2))
Expect(eth.TxModelsContainsCID(cidWrapper.Transactions, mocks.MockCIDWrapper.Transactions[0].CID)).To(BeTrue())
Expect(eth.TxModelsContainsCID(cidWrapper.Transactions, mocks.MockCIDWrapper.Transactions[1].CID)).To(BeTrue())
@@ -246,13 +248,15 @@ var _ = Describe("Retriever", func() {
Expect(eth.ReceiptModelsContainsCID(cidWrapper.Receipts, mocks.MockCIDWrapper.Receipts[1].CID)).To(BeTrue())
Expect(len(cidWrapper.StateNodes)).To(Equal(2))
for _, stateNode := range cidWrapper.StateNodes {
- if stateNode.CID == "mockStateCID1" {
- Expect(stateNode.StateKey).To(Equal(mocks.ContractLeafKey.Hex()))
- Expect(stateNode.Leaf).To(Equal(true))
+ if stateNode.CID == mocks.State1CID.String() {
+ Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.ContractLeafKey).Hex()))
+ Expect(stateNode.NodeType).To(Equal(2))
+ Expect(stateNode.Path).To(Equal([]byte{'\x06'}))
}
- if stateNode.CID == "mockStateCID2" {
- Expect(stateNode.StateKey).To(Equal(mocks.AnotherContractLeafKey.Hex()))
- Expect(stateNode.Leaf).To(Equal(true))
+ if stateNode.CID == mocks.State2CID.String() {
+ Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.AccountLeafKey).Hex()))
+ Expect(stateNode.NodeType).To(Equal(2))
+ Expect(stateNode.Path).To(Equal([]byte{'\x0c'}))
}
}
Expect(len(cidWrapper.StorageNodes)).To(Equal(1))
@@ -266,10 +270,11 @@ var _ = Describe("Retriever", func() {
cids1, empty, err := retriever.Retrieve(rctContractFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
- cidWrapper1, ok := cids1.(*eth.CIDWrapper)
+ Expect(len(cids1)).To(Equal(1))
+ cidWrapper1, ok := cids1[0].(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper1.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
- Expect(len(cidWrapper1.Headers)).To(Equal(0))
+ Expect(cidWrapper1.Header).To(Equal(eth.HeaderModel{}))
Expect(len(cidWrapper1.Transactions)).To(Equal(0))
Expect(len(cidWrapper1.StateNodes)).To(Equal(0))
Expect(len(cidWrapper1.StorageNodes)).To(Equal(0))
@@ -282,10 +287,11 @@ var _ = Describe("Retriever", func() {
cids2, empty, err := retriever.Retrieve(rctTopicsFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
- cidWrapper2, ok := cids2.(*eth.CIDWrapper)
+ Expect(len(cids2)).To(Equal(1))
+ cidWrapper2, ok := cids2[0].(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper2.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
- Expect(len(cidWrapper2.Headers)).To(Equal(0))
+ Expect(cidWrapper2.Header).To(Equal(eth.HeaderModel{}))
Expect(len(cidWrapper2.Transactions)).To(Equal(0))
Expect(len(cidWrapper2.StateNodes)).To(Equal(0))
Expect(len(cidWrapper2.StorageNodes)).To(Equal(0))
@@ -298,10 +304,11 @@ var _ = Describe("Retriever", func() {
cids3, empty, err := retriever.Retrieve(rctTopicsAndContractFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
- cidWrapper3, ok := cids3.(*eth.CIDWrapper)
+ Expect(len(cids3)).To(Equal(1))
+ cidWrapper3, ok := cids3[0].(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper3.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
- Expect(len(cidWrapper3.Headers)).To(Equal(0))
+ Expect(cidWrapper3.Header).To(Equal(eth.HeaderModel{}))
Expect(len(cidWrapper3.Transactions)).To(Equal(0))
Expect(len(cidWrapper3.StateNodes)).To(Equal(0))
Expect(len(cidWrapper3.StorageNodes)).To(Equal(0))
@@ -314,10 +321,11 @@ var _ = Describe("Retriever", func() {
cids4, empty, err := retriever.Retrieve(rctContractsAndTopicFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
- cidWrapper4, ok := cids4.(*eth.CIDWrapper)
+ Expect(len(cids4)).To(Equal(1))
+ cidWrapper4, ok := cids4[0].(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper4.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
- Expect(len(cidWrapper4.Headers)).To(Equal(0))
+ Expect(cidWrapper4.Header).To(Equal(eth.HeaderModel{}))
Expect(len(cidWrapper4.Transactions)).To(Equal(0))
Expect(len(cidWrapper4.StateNodes)).To(Equal(0))
Expect(len(cidWrapper4.StorageNodes)).To(Equal(0))
@@ -330,26 +338,28 @@ var _ = Describe("Retriever", func() {
cids5, empty, err := retriever.Retrieve(rctsForAllCollectedTrxs, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
- cidWrapper5, ok := cids5.(*eth.CIDWrapper)
+ Expect(len(cids5)).To(Equal(1))
+ cidWrapper5, ok := cids5[0].(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper5.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
- Expect(len(cidWrapper5.Headers)).To(Equal(0))
+ Expect(cidWrapper5.Header).To(Equal(eth.HeaderModel{}))
Expect(len(cidWrapper5.Transactions)).To(Equal(2))
- Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, "mockTrxCID1")).To(BeTrue())
- Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, "mockTrxCID2")).To(BeTrue())
+ Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, mocks.Trx1CID.String())).To(BeTrue())
+ Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, mocks.Trx2CID.String())).To(BeTrue())
Expect(len(cidWrapper5.StateNodes)).To(Equal(0))
Expect(len(cidWrapper5.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper5.Receipts)).To(Equal(2))
- Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, "mockRctCID1")).To(BeTrue())
- Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, "mockRctCID2")).To(BeTrue())
+ Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, mocks.Rct1CID.String())).To(BeTrue())
+ Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, mocks.Rct2CID.String())).To(BeTrue())
cids6, empty, err := retriever.Retrieve(rctsForSelectCollectedTrxs, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
- cidWrapper6, ok := cids6.(*eth.CIDWrapper)
+ Expect(len(cids6)).To(Equal(1))
+ cidWrapper6, ok := cids6[0].(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper6.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
- Expect(len(cidWrapper6.Headers)).To(Equal(0))
+ Expect(cidWrapper6.Header).To(Equal(eth.HeaderModel{}))
Expect(len(cidWrapper6.Transactions)).To(Equal(1))
expectedTxCID := mocks.MockCIDWrapper.Transactions[1]
expectedTxCID.ID = cidWrapper6.Transactions[0].ID
@@ -366,10 +376,11 @@ var _ = Describe("Retriever", func() {
cids7, empty, err := retriever.Retrieve(stateFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
- cidWrapper7, ok := cids7.(*eth.CIDWrapper)
+ Expect(len(cids7)).To(Equal(1))
+ cidWrapper7, ok := cids7[0].(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper7.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
- Expect(len(cidWrapper7.Headers)).To(Equal(0))
+ Expect(cidWrapper7.Header).To(Equal(eth.HeaderModel{}))
Expect(len(cidWrapper7.Transactions)).To(Equal(0))
Expect(len(cidWrapper7.Receipts)).To(Equal(0))
Expect(len(cidWrapper7.StorageNodes)).To(Equal(0))
@@ -377,9 +388,10 @@ var _ = Describe("Retriever", func() {
Expect(cidWrapper7.StateNodes[0]).To(Equal(eth.StateNodeModel{
ID: cidWrapper7.StateNodes[0].ID,
HeaderID: cidWrapper7.StateNodes[0].HeaderID,
- Leaf: true,
- StateKey: mocks.ContractLeafKey.Hex(),
- CID: "mockStateCID1",
+ NodeType: 2,
+ StateKey: common.BytesToHash(mocks.AccountLeafKey).Hex(),
+ CID: mocks.State2CID.String(),
+ Path: []byte{'\x0c'},
}))
_, empty, err = retriever.Retrieve(rctTopicsAndContractFilterFail, 1)
diff --git a/pkg/super_node/eth/subscription_config.go b/pkg/super_node/eth/subscription_config.go
index f9c77496..a6d563e6 100644
--- a/pkg/super_node/eth/subscription_config.go
+++ b/pkg/super_node/eth/subscription_config.go
@@ -17,7 +17,6 @@
package eth
import (
- "errors"
"math/big"
"github.com/spf13/viper"
@@ -53,7 +52,8 @@ type TxFilter struct {
// ReceiptFilter contains filter settings for receipts
type ReceiptFilter struct {
- Off bool
+ Off bool
+ // TODO: change this so that we filter for receipts first and we always return the corresponding transaction
MatchTxs bool // turn on to retrieve receipts that pair with retrieved transactions
Contracts []string
Topics [][]string
@@ -70,7 +70,7 @@ type StateFilter struct {
type StorageFilter struct {
Off bool
Addresses []string
- StorageKeys []string
+ StorageKeys []string // need to be the hashs key themselves not slot position
IntermediateNodes bool
}
@@ -96,13 +96,12 @@ func NewEthSubscriptionConfig() (*SubscriptionSettings, error) {
Src: viper.GetStringSlice("superNode.ethSubscription.txFilter.src"),
Dst: viper.GetStringSlice("superNode.ethSubscription.txFilter.dst"),
}
- // Below defaults to false and one slice of length 0
- // Which means we get all receipts by default
- t := viper.Get("superNode.ethSubscription.receiptFilter.topics")
- topics, ok := t.([][]string)
- if !ok {
- return nil, errors.New("superNode.ethSubscription.receiptFilter.topics needs to be a slice of string slices")
- }
+ // By default all of the topic slices will be empty => match on any/all topics
+ topics := make([][]string, 4)
+ topics[0] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic0s")
+ topics[1] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic1s")
+ topics[2] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic2s")
+ topics[3] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic3s")
sc.ReceiptFilter = ReceiptFilter{
Off: viper.GetBool("superNode.ethSubscription.receiptFilter.off"),
MatchTxs: viper.GetBool("superNode.ethSubscription.receiptFilter.matchTxs"),
diff --git a/pkg/super_node/eth/types.go b/pkg/super_node/eth/types.go
index e30d8ce4..03244814 100644
--- a/pkg/super_node/eth/types.go
+++ b/pkg/super_node/eth/types.go
@@ -17,18 +17,20 @@
package eth
import (
- "encoding/json"
"math/big"
+ "github.com/ethereum/go-ethereum/statediff"
+
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ipfs/go-block-format"
)
-// IPLDPayload is a custom type which packages raw ETH data for publishing to IPFS and filtering to subscribers
+// ConvertedPayload is a custom type which packages raw ETH data for publishing to IPFS and filtering to subscribers
// Returned by PayloadConverter
// Passed to IPLDPublisher and ResponseFilterer
-type IPLDPayload struct {
+type ConvertedPayload struct {
TotalDifficulty *big.Int
Block *types.Block
TxMetaData []TxModel
@@ -38,11 +40,17 @@ type IPLDPayload struct {
StorageNodes map[common.Hash][]TrieNode
}
+// Height satisfies the StreamedIPLDs interface
+func (i ConvertedPayload) Height() int64 {
+ return i.Block.Number().Int64()
+}
+
// Trie struct used to flag node as leaf or not
type TrieNode struct {
- Key common.Hash
- Value []byte
- Leaf bool
+ Path []byte
+ LeafKey common.Hash
+ Value []byte
+ Type statediff.NodeType
}
// CIDPayload is a struct to hold all the CIDs and their associated meta data for indexing in Postgres
@@ -54,6 +62,7 @@ type CIDPayload struct {
TransactionCIDs []TxModel
ReceiptCIDs map[common.Hash]ReceiptModel
StateNodeCIDs []StateNodeModel
+ StateAccounts map[common.Hash]StateAccountModel
StorageNodeCIDs map[common.Hash][]StorageNodeModel
}
@@ -62,7 +71,7 @@ type CIDPayload struct {
// Passed to IPLDFetcher
type CIDWrapper struct {
BlockNumber *big.Int
- Headers []HeaderModel
+ Header HeaderModel
Uncles []UncleModel
Transactions []TxModel
Receipts []ReceiptModel
@@ -70,49 +79,35 @@ type CIDWrapper struct {
StorageNodes []StorageNodeWithStateKeyModel
}
-// IPLDWrapper is used to package raw IPLD block data fetched from IPFS
-// Returned by IPLDFetcher
-// Passed to IPLDResolver
-type IPLDWrapper struct {
- BlockNumber *big.Int
- Headers []blocks.Block
- Uncles []blocks.Block
- Transactions []blocks.Block
- Receipts []blocks.Block
- StateNodes map[common.Hash]blocks.Block
- StorageNodes map[common.Hash]map[common.Hash]blocks.Block
+// IPLDs is used to package raw IPLD block data fetched from IPFS and returned by the server
+// Returned by IPLDFetcher and ResponseFilterer
+type IPLDs struct {
+ BlockNumber *big.Int
+ TotalDifficulty *big.Int
+ Header ipfs.BlockModel
+ Uncles []ipfs.BlockModel
+ Transactions []ipfs.BlockModel
+ Receipts []ipfs.BlockModel
+ StateNodes []StateNode
+ StorageNodes []StorageNode
}
-// StreamResponse holds the data streamed from the super node eth service to the requesting clients
-// Returned by IPLDResolver and ResponseFilterer
-// Passed to client subscriptions
-type StreamResponse struct {
- BlockNumber *big.Int `json:"blockNumber"`
- HeadersRlp [][]byte `json:"headersRlp"`
- UnclesRlp [][]byte `json:"unclesRlp"`
- TransactionsRlp [][]byte `json:"transactionsRlp"`
- ReceiptsRlp [][]byte `json:"receiptsRlp"`
- StateNodesRlp map[common.Hash][]byte `json:"stateNodesRlp"`
- StorageNodesRlp map[common.Hash]map[common.Hash][]byte `json:"storageNodesRlp"`
-
- encoded []byte
- err error
+// Height satisfies the StreamedIPLDs interface
+func (i IPLDs) Height() int64 {
+ return i.BlockNumber.Int64()
}
-func (sr *StreamResponse) ensureEncoded() {
- if sr.encoded == nil && sr.err == nil {
- sr.encoded, sr.err = json.Marshal(sr)
- }
+type StateNode struct {
+ Type statediff.NodeType
+ StateLeafKey common.Hash
+ Path []byte
+ IPLD ipfs.BlockModel
}
-// Length to implement Encoder interface for StateDiff
-func (sr *StreamResponse) Length() int {
- sr.ensureEncoded()
- return len(sr.encoded)
-}
-
-// Encode to implement Encoder interface for StateDiff
-func (sr *StreamResponse) Encode() ([]byte, error) {
- sr.ensureEncoded()
- return sr.encoded, sr.err
+type StorageNode struct {
+ Type statediff.NodeType
+ StateLeafKey common.Hash
+ StorageLeafKey common.Hash
+ Path []byte
+ IPLD ipfs.BlockModel
}
diff --git a/pkg/super_node/helpers.go b/pkg/super_node/helpers.go
index 50f80afd..4a4b077b 100644
--- a/pkg/super_node/helpers.go
+++ b/pkg/super_node/helpers.go
@@ -21,7 +21,7 @@ import log "github.com/sirupsen/logrus"
func sendNonBlockingErr(sub Subscription, err error) {
log.Error(err)
select {
- case sub.PayloadChan <- SubscriptionPayload{nil, err.Error()}:
+ case sub.PayloadChan <- SubscriptionPayload{Data: nil, Err: err.Error(), Flag: EmptyFlag}:
default:
log.Infof("unable to send error to subscription %s", sub.ID)
}
diff --git a/pkg/super_node/resync/config.go b/pkg/super_node/resync/config.go
new file mode 100644
index 00000000..04c5b118
--- /dev/null
+++ b/pkg/super_node/resync/config.go
@@ -0,0 +1,123 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package resync
+
+import (
+ "fmt"
+
+ "github.com/spf13/viper"
+
+ "github.com/vulcanize/vulcanizedb/pkg/config"
+ "github.com/vulcanize/vulcanizedb/pkg/eth/core"
+ "github.com/vulcanize/vulcanizedb/pkg/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+ "github.com/vulcanize/vulcanizedb/utils"
+)
+
+// Env variables
+const (
+ RESYNC_CHAIN = "RESYNC_CHAIN"
+ RESYNC_START = "RESYNC_START"
+ RESYNC_STOP = "RESYNC_STOP"
+ RESYNC_BATCH_SIZE = "RESYNC_BATCH_SIZE"
+ RESYNC_BATCH_NUMBER = "RESYNC_BATCH_NUMBER"
+ RESYNC_CLEAR_OLD_CACHE = "RESYNC_CLEAR_OLD_CACHE"
+ RESYNC_TYPE = "RESYNC_TYPE"
+)
+
+// Config holds the parameters needed to perform a resync
+type Config struct {
+ Chain shared.ChainType // The type of resync to perform
+ ResyncType shared.DataType // The type of data to resync
+ ClearOldCache bool // Resync will first clear all the data within the range
+
+ // DB info
+ DB *postgres.DB
+ DBConfig config.Database
+ IPFSPath string
+
+ HTTPClient interface{} // Note this client is expected to support the retrieval of the specified data type(s)
+ NodeInfo core.Node // Info for the associated node
+ Ranges [][2]uint64 // The block height ranges to resync
+ BatchSize uint64 // BatchSize for the resync http calls (client has to support batch sizing)
+ BatchNumber uint64
+
+ Quit chan bool // Channel for shutting down
+}
+
+// NewReSyncConfig fills and returns a resync config from toml parameters
+func NewReSyncConfig() (*Config, error) {
+ c := new(Config)
+ var err error
+
+ viper.BindEnv("resync.start", RESYNC_START)
+ viper.BindEnv("resync.stop", RESYNC_STOP)
+ viper.BindEnv("resync.clearOldCache", RESYNC_CLEAR_OLD_CACHE)
+ viper.BindEnv("resync.type", RESYNC_TYPE)
+ viper.BindEnv("resync.chain", RESYNC_CHAIN)
+ viper.BindEnv("ethereum.httpPath", shared.ETH_HTTP_PATH)
+ viper.BindEnv("bitcoin.httpPath", shared.BTC_HTTP_PATH)
+ viper.BindEnv("resync.batchSize", RESYNC_BATCH_SIZE)
+ viper.BindEnv("resync.batchNumber", RESYNC_BATCH_NUMBER)
+
+ start := uint64(viper.GetInt64("resync.start"))
+ stop := uint64(viper.GetInt64("resync.stop"))
+ c.Ranges = [][2]uint64{{start, stop}}
+ c.ClearOldCache = viper.GetBool("resync.clearOldCache")
+
+ c.IPFSPath, err = shared.GetIPFSPath()
+ if err != nil {
+ return nil, err
+ }
+ resyncType := viper.GetString("resync.type")
+ c.ResyncType, err = shared.GenerateResyncTypeFromString(resyncType)
+ if err != nil {
+ return nil, err
+ }
+ chain := viper.GetString("resync.chain")
+ c.Chain, err = shared.NewChainType(chain)
+ if err != nil {
+ return nil, err
+ }
+ if ok, err := shared.SupportedResyncType(c.ResyncType, c.Chain); !ok {
+ if err != nil {
+ return nil, err
+ }
+ return nil, fmt.Errorf("chain type %s does not support data type %s", c.Chain.String(), c.ResyncType.String())
+ }
+
+ switch c.Chain {
+ case shared.Ethereum:
+ ethHTTP := viper.GetString("ethereum.httpPath")
+ c.NodeInfo, c.HTTPClient, err = shared.GetEthNodeAndClient(fmt.Sprintf("http://%s", ethHTTP))
+ if err != nil {
+ return nil, err
+ }
+ case shared.Bitcoin:
+ btcHTTP := viper.GetString("bitcoin.httpPath")
+ c.NodeInfo, c.HTTPClient = shared.GetBtcNodeAndClient(btcHTTP)
+ }
+
+ c.DBConfig.Init()
+ db := utils.LoadPostgres(c.DBConfig, c.NodeInfo)
+ c.DB = &db
+
+ c.Quit = make(chan bool)
+ c.BatchSize = uint64(viper.GetInt64("resync.batchSize"))
+ c.BatchNumber = uint64(viper.GetInt64("resync.batchNumber"))
+ return c, nil
+}
diff --git a/pkg/super_node/resync/service.go b/pkg/super_node/resync/service.go
new file mode 100644
index 00000000..18dacb21
--- /dev/null
+++ b/pkg/super_node/resync/service.go
@@ -0,0 +1,198 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package resync
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+)
+
+type Resync interface {
+ Resync() error
+}
+
+type Service struct {
+ // Interface for converting payloads into IPLD object payloads
+ Converter shared.PayloadConverter
+ // Interface for publishing the IPLD payloads to IPFS
+ Publisher shared.IPLDPublisher
+ // Interface for indexing the CIDs of the published IPLDs in Postgres
+ Indexer shared.CIDIndexer
+ // Interface for searching and retrieving CIDs from Postgres index
+ Retriever shared.CIDRetriever
+ // Interface for fetching payloads over at historical blocks; over http
+ Fetcher shared.PayloadFetcher
+ // Interface for cleaning out data before resyncing (if clearOldCache is on)
+ Cleaner shared.Cleaner
+ // Size of batch fetches
+ BatchSize uint64
+ // Number of goroutines
+ BatchNumber int64
+ // Channel for receiving quit signal
+ QuitChan chan bool
+ // Chain type
+ chain shared.ChainType
+ // Resync data type
+ data shared.DataType
+ // Resync ranges
+ ranges [][2]uint64
+ // Flag to turn on or off old cache destruction
+ clearOldCache bool
+}
+
+// NewResyncService creates and returns a resync service from the provided settings
+func NewResyncService(settings *Config) (Resync, error) {
+ publisher, err := super_node.NewIPLDPublisher(settings.Chain, settings.IPFSPath)
+ if err != nil {
+ return nil, err
+ }
+ indexer, err := super_node.NewCIDIndexer(settings.Chain, settings.DB)
+ if err != nil {
+ return nil, err
+ }
+ converter, err := super_node.NewPayloadConverter(settings.Chain)
+ if err != nil {
+ return nil, err
+ }
+ retriever, err := super_node.NewCIDRetriever(settings.Chain, settings.DB)
+ if err != nil {
+ return nil, err
+ }
+ fetcher, err := super_node.NewPaylaodFetcher(settings.Chain, settings.HTTPClient)
+ if err != nil {
+ return nil, err
+ }
+ cleaner, err := super_node.NewCleaner(settings.Chain, settings.DB)
+ if err != nil {
+ return nil, err
+ }
+ batchSize := settings.BatchSize
+ if batchSize == 0 {
+ batchSize = super_node.DefaultMaxBatchSize
+ }
+ batchNumber := int64(settings.BatchNumber)
+ if batchNumber == 0 {
+ batchNumber = super_node.DefaultMaxBatchNumber
+ }
+ return &Service{
+ Indexer: indexer,
+ Converter: converter,
+ Publisher: publisher,
+ Retriever: retriever,
+ Fetcher: fetcher,
+ Cleaner: cleaner,
+ BatchSize: batchSize,
+ BatchNumber: int64(batchNumber),
+ QuitChan: settings.Quit,
+ chain: settings.Chain,
+ ranges: settings.Ranges,
+ data: settings.ResyncType,
+ clearOldCache: settings.ClearOldCache,
+ }, nil
+}
+
+func (rs *Service) Resync() error {
+ if rs.clearOldCache {
+ logrus.Infof("cleaning out old data from Postgres")
+ if err := rs.Cleaner.Clean(rs.ranges, rs.data); err != nil {
+ return fmt.Errorf("%s %s data resync cleaning error: %v", rs.chain.String(), rs.data.String(), err)
+ }
+ }
+ for _, rng := range rs.ranges {
+ if err := rs.resync(rng[0], rng[1]); err != nil {
+ return fmt.Errorf("%s %s data resync initialization error: %v", rs.chain.String(), rs.data.String(), err)
+ }
+ }
+ return nil
+}
+
+func (rs *Service) resync(startingBlock, endingBlock uint64) error {
+ logrus.Infof("resyncing %s data from %d to %d", rs.chain.String(), startingBlock, endingBlock)
+ if endingBlock < startingBlock {
+ return fmt.Errorf("%s resync range ending block number needs to be greater than the starting block number", rs.chain.String())
+ }
+ // break the range up into bins of smaller ranges
+ blockRangeBins, err := utils.GetBlockHeightBins(startingBlock, endingBlock, rs.BatchSize)
+ if err != nil {
+ return err
+ }
+ // int64 for atomic incrementing and decrementing to track the number of active processing goroutines we have
+ var activeCount int64
+ // channel for processing goroutines to signal when they are done
+ processingDone := make(chan bool)
+ forwardDone := make(chan bool)
+
+ // for each block range bin spin up a goroutine to batch fetch and process state diffs for that range
+ go func() {
+ for _, blockHeights := range blockRangeBins {
+ // if we have reached our limit of active goroutines
+ // wait for one to finish before starting the next
+ if atomic.AddInt64(&activeCount, 1) > rs.BatchNumber {
+ // this blocks until a process signals it has finished
+ <-forwardDone
+ }
+ go func(blockHeights []uint64) {
+ payloads, err := rs.Fetcher.FetchAt(blockHeights)
+ if err != nil {
+ logrus.Errorf("%s resync fetcher error: %s", rs.chain.String(), err.Error())
+ }
+ for _, payload := range payloads {
+ ipldPayload, err := rs.Converter.Convert(payload)
+ if err != nil {
+ logrus.Errorf("%s resync converter error: %s", rs.chain.String(), err.Error())
+ }
+ cidPayload, err := rs.Publisher.Publish(ipldPayload)
+ if err != nil {
+ logrus.Errorf("%s resync publisher error: %s", rs.chain.String(), err.Error())
+ }
+ if err := rs.Indexer.Index(cidPayload); err != nil {
+ logrus.Errorf("%s resync indexer error: %s", rs.chain.String(), err.Error())
+ }
+ }
+ // when this goroutine is done, send out a signal
+ logrus.Infof("finished %s resync section from %d to %d", rs.chain.String(), blockHeights[0], blockHeights[len(blockHeights)-1])
+ processingDone <- true
+ }(blockHeights)
+ }
+ }()
+
+ // listen on the processingDone chan and
+ // keep track of the number of processing goroutines that have finished
+ // when they have all finished, sends the final signal out
+ goroutinesFinished := 0
+ for {
+ select {
+ case <-processingDone:
+ atomic.AddInt64(&activeCount, -1)
+ select {
+ // if we are waiting for a process to finish, signal that one has
+ case forwardDone <- true:
+ default:
+ }
+ goroutinesFinished++
+ if goroutinesFinished >= len(blockRangeBins) {
+ return nil
+ }
+ }
+ }
+}
diff --git a/pkg/super_node/service.go b/pkg/super_node/service.go
index 943cc275..79eb0270 100644
--- a/pkg/super_node/service.go
+++ b/pkg/super_node/service.go
@@ -43,16 +43,18 @@ const (
type SuperNode interface {
// APIs(), Protocols(), Start() and Stop()
node.Service
- // Main event loop for syncAndPublish processes
- SyncAndPublish(wg *sync.WaitGroup, forwardPayloadChan chan<- shared.StreamedIPLDs) error
- // Main event loop for handling client pub-sub
- ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan shared.StreamedIPLDs)
- // Method to subscribe to receive state diff processing output
+ // Data processing event loop
+ ProcessData(wg *sync.WaitGroup, forwardPayloadChan chan<- shared.ConvertedData) error
+ // Pub-Sub handling event loop
+ FilterAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan shared.ConvertedData)
+ // Method to subscribe to the service
Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params shared.SubscriptionSettings)
- // Method to unsubscribe from state diff processing
+ // Method to unsubscribe from the service
Unsubscribe(id rpc.ID)
- // Method to access the node info for this service
- Node() core.Node
+ // Method to access the node info for the service
+ Node() *core.Node
+ // Method to access chain type
+ Chain() shared.ChainType
}
// Service is the underlying struct for the super node
@@ -73,8 +75,6 @@ type Service struct {
IPLDFetcher shared.IPLDFetcher
// Interface for searching and retrieving CIDs from Postgres index
Retriever shared.CIDRetriever
- // Interface for resolving IPLDs to their data types
- Resolver shared.IPLDResolver
// Chan the processor uses to subscribe to payloads from the Streamer
PayloadChan chan shared.RawChainData
// Used to signal shutdown of the service
@@ -84,7 +84,7 @@ type Service struct {
// A mapping of subscription params hash to the corresponding subscription params
SubscriptionTypes map[common.Hash]shared.SubscriptionSettings
// Info for the Geth node that this super node is working with
- NodeInfo core.Node
+ NodeInfo *core.Node
// Number of publishAndIndex workers
WorkerPoolSize int
// chain type for this service
@@ -96,7 +96,7 @@ type Service struct {
}
// NewSuperNode creates a new super_node.Interface using an underlying super_node.Service struct
-func NewSuperNode(settings *shared.SuperNodeConfig) (SuperNode, error) {
+func NewSuperNode(settings *Config) (SuperNode, error) {
sn := new(Service)
var err error
// If we are syncing, initialize the needed interfaces
@@ -132,16 +132,12 @@ func NewSuperNode(settings *shared.SuperNodeConfig) (SuperNode, error) {
if err != nil {
return nil, err
}
- sn.Resolver, err = NewIPLDResolver(settings.Chain)
- if err != nil {
- return nil, err
- }
}
sn.QuitChan = settings.Quit
sn.Subscriptions = make(map[common.Hash]map[rpc.ID]Subscription)
sn.SubscriptionTypes = make(map[common.Hash]shared.SubscriptionSettings)
sn.WorkerPoolSize = settings.Workers
- sn.NodeInfo = settings.NodeInfo
+ sn.NodeInfo = &settings.NodeInfo
sn.ipfsPath = settings.IPFSPath
sn.chain = settings.Chain
sn.db = settings.DB
@@ -155,6 +151,7 @@ func (sap *Service) Protocols() []p2p.Protocol {
// APIs returns the RPC descriptors the super node service offers
func (sap *Service) APIs() []rpc.API {
+ ifnoAPI := NewInfoAPI()
apis := []rpc.API{
{
Namespace: APIName,
@@ -162,6 +159,24 @@ func (sap *Service) APIs() []rpc.API {
Service: NewPublicSuperNodeAPI(sap),
Public: true,
},
+ {
+ Namespace: "rpc",
+ Version: APIVersion,
+ Service: ifnoAPI,
+ Public: true,
+ },
+ {
+ Namespace: "net",
+ Version: APIVersion,
+ Service: ifnoAPI,
+ Public: true,
+ },
+ {
+ Namespace: "admin",
+ Version: APIVersion,
+ Service: ifnoAPI,
+ Public: true,
+ },
}
chainAPI, err := NewPublicAPI(sap.chain, sap.db, sap.ipfsPath)
if err != nil {
@@ -171,10 +186,11 @@ func (sap *Service) APIs() []rpc.API {
return append(apis, chainAPI)
}
-// SyncAndPublish is the backend processing loop which streams data, converts it to iplds, publishes them to ipfs, and indexes their cids
-// This continues on no matter if or how many subscribers there are, it then forwards the data to the ScreenAndServe() loop
-// which filters and sends relevant data to client subscriptions, if there are any
-func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload chan<- shared.StreamedIPLDs) error {
+// ProcessData streams incoming raw chain data and converts it for further processing
+// It forwards the converted data to the publishAndIndex process(es) it spins up
+// If forwards the converted data to a ScreenAndServe process if it there is one listening on the passed screenAndServePayload channel
+// This continues on no matter if or how many subscribers there are
+func (sap *Service) ProcessData(wg *sync.WaitGroup, screenAndServePayload chan<- shared.ConvertedData) error {
sub, err := sap.Streamer.Stream(sap.PayloadChan)
if err != nil {
return err
@@ -182,7 +198,7 @@ func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload cha
wg.Add(1)
// Channels for forwarding data to the publishAndIndex workers
- publishAndIndexPayload := make(chan shared.StreamedIPLDs, PayloadChanBufferSize)
+ publishAndIndexPayload := make(chan shared.ConvertedData, PayloadChanBufferSize)
// publishAndIndex worker pool to handle publishing and indexing concurrently, while
// limiting the number of Postgres connections we can possibly open so as to prevent error
for i := 0; i < sap.WorkerPoolSize; i++ {
@@ -194,9 +210,10 @@ func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload cha
case payload := <-sap.PayloadChan:
ipldPayload, err := sap.Converter.Convert(payload)
if err != nil {
- log.Error(err)
+ log.Errorf("super node conversion error for chain %s: %v", sap.chain.String(), err)
continue
}
+ log.Infof("processing %s data streamed at head height %d", sap.chain.String(), ipldPayload.Height())
// If we have a ScreenAndServe process running, forward the iplds to it
select {
case screenAndServePayload <- ipldPayload:
@@ -205,88 +222,105 @@ func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload cha
// Forward the payload to the publishAndIndex workers
publishAndIndexPayload <- ipldPayload
case err := <-sub.Err():
- log.Error(err)
+ log.Errorf("super node subscription error for chain %s: %v", sap.chain.String(), err)
case <-sap.QuitChan:
- log.Info("quiting SyncAndPublish process")
+ log.Infof("quiting %s SyncAndPublish process", sap.chain.String())
wg.Done()
return
}
}
}()
- log.Info("syncAndPublish goroutine successfully spun up")
+ log.Infof("%s ProcessData goroutine successfully spun up", sap.chain.String())
return nil
}
-func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan shared.StreamedIPLDs) {
+// publishAndIndex is spun up by SyncAndConvert and receives converted chain data from that process
+// it publishes this data to IPFS and indexes their CIDs with useful metadata in Postgres
+func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan shared.ConvertedData) {
go func() {
for {
select {
case payload := <-publishAndIndexPayload:
cidPayload, err := sap.Publisher.Publish(payload)
if err != nil {
- log.Errorf("worker %d error: %v", id, err)
+ log.Errorf("super node publishAndIndex worker %d error for chain %s: %v", id, sap.chain.String(), err)
continue
}
if err := sap.Indexer.Index(cidPayload); err != nil {
- log.Errorf("worker %d error: %v", id, err)
+ log.Errorf("super node publishAndIndex worker %d error for chain %s: %v", id, sap.chain.String(), err)
}
}
}
}()
- log.Info("publishAndIndex goroutine successfully spun up")
+ log.Debugf("%s publishAndIndex goroutine successfully spun up", sap.chain.String())
}
-// ScreenAndServe is the loop used to screen data streamed from the state diffing eth node
-// and send the appropriate portions of it to a requesting client subscription, according to their subscription configuration
-func (sap *Service) ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan shared.StreamedIPLDs) {
+// FilterAndServe listens for incoming converter data off the screenAndServePayload from the SyncAndConvert process
+// It filters and sends this data to any subscribers to the service
+// This process can be stood up alone, without an screenAndServePayload attached to a SyncAndConvert process
+// and it will hang on the WaitGroup indefinitely, allowing the Service to serve historical data requests only
+func (sap *Service) FilterAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan shared.ConvertedData) {
wg.Add(1)
go func() {
for {
select {
case payload := <-screenAndServePayload:
- sap.sendResponse(payload)
+ sap.filterAndServe(payload)
case <-sap.QuitChan:
- log.Info("quiting ScreenAndServe process")
+ log.Infof("quiting %s ScreenAndServe process", sap.chain.String())
wg.Done()
return
}
}
}()
- log.Info("screenAndServe goroutine successfully spun up")
+ log.Infof("%s FilterAndServe goroutine successfully spun up", sap.chain.String())
}
-func (sap *Service) sendResponse(payload shared.StreamedIPLDs) {
+// filterAndServe filters the payload according to each subscription type and sends to the subscriptions
+func (sap *Service) filterAndServe(payload shared.ConvertedData) {
+ log.Debugf("Sending %s payload to subscriptions", sap.chain.String())
sap.Lock()
for ty, subs := range sap.Subscriptions {
// Retrieve the subscription parameters for this subscription type
subConfig, ok := sap.SubscriptionTypes[ty]
if !ok {
- log.Errorf("subscription configuration for subscription type %s not available", ty.Hex())
+ log.Errorf("super node %s subscription configuration for subscription type %s not available", sap.chain.String(), ty.Hex())
+ sap.closeType(ty)
+ continue
+ }
+ if subConfig.EndingBlock().Int64() > 0 && subConfig.EndingBlock().Int64() < payload.Height() {
+ // We are not out of range for this subscription type
+ // close it, and continue to the next
sap.closeType(ty)
continue
}
response, err := sap.Filterer.Filter(subConfig, payload)
if err != nil {
- log.Error(err)
+ log.Errorf("super node filtering error for chain %s: %v", sap.chain.String(), err)
sap.closeType(ty)
continue
}
+ responseRLP, err := rlp.EncodeToBytes(response)
+ if err != nil {
+ log.Errorf("super node rlp encoding error for chain %s: %v", sap.chain.String(), err)
+ continue
+ }
for id, sub := range subs {
select {
- case sub.PayloadChan <- SubscriptionPayload{response, ""}:
- log.Infof("sending super node payload to subscription %s", id)
+ case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.Height()}:
+ log.Debugf("sending super node %s payload to subscription %s", sap.chain.String(), id)
default:
- log.Infof("unable to send payload to subscription %s; channel has no receiver", id)
+ log.Infof("unable to send %s payload to subscription %s; channel has no receiver", sap.chain.String(), id)
}
}
}
sap.Unlock()
}
-// Subscribe is used by the API to subscribe to the service loop
+// Subscribe is used by the API to remotely subscribe to the service loop
// The params must be rlp serializable and satisfy the SubscriptionSettings() interface
func (sap *Service) Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params shared.SubscriptionSettings) {
- log.Info("Subscribing to the super node service")
+ log.Infof("New %s subscription %s", sap.chain.String(), id)
subscription := Subscription{
ID: id,
PayloadChan: sub,
@@ -297,7 +331,7 @@ func (sap *Service) Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitCha
sendNonBlockingQuit(subscription)
return
}
- // Subscription type is defined as the hash of the subscription settings
+ // Subscription type is defined as the hash of the rlp-serialized subscription settings
by, err := rlp.EncodeToBytes(params)
if err != nil {
sendNonBlockingErr(subscription, err)
@@ -305,15 +339,6 @@ func (sap *Service) Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitCha
return
}
subscriptionType := crypto.Keccak256Hash(by)
- // If the subscription requests a backfill, use the Postgres index to lookup and retrieve historical data
- // Otherwise we only filter new data as it is streamed in from the state diffing geth node
- if params.HistoricalData() || params.HistoricalDataOnly() {
- if err := sap.backFill(subscription, id, params); err != nil {
- sendNonBlockingErr(subscription, err)
- sendNonBlockingQuit(subscription)
- return
- }
- }
if !params.HistoricalDataOnly() {
// Add subscriber
sap.Lock()
@@ -324,10 +349,20 @@ func (sap *Service) Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitCha
sap.SubscriptionTypes[subscriptionType] = params
sap.Unlock()
}
+ // If the subscription requests a backfill, use the Postgres index to lookup and retrieve historical data
+ // Otherwise we only filter new data as it is streamed in from the state diffing geth node
+ if params.HistoricalData() || params.HistoricalDataOnly() {
+ if err := sap.sendHistoricalData(subscription, id, params); err != nil {
+ sendNonBlockingErr(subscription, fmt.Errorf("super node subscriber backfill error for chain %s: %v", sap.chain.String(), err))
+ sendNonBlockingQuit(subscription)
+ return
+ }
+ }
}
-func (sap *Service) backFill(sub Subscription, id rpc.ID, params shared.SubscriptionSettings) error {
- log.Debug("sending historical data for subscriber", id)
+// sendHistoricalData sends historical data to the requesting subscription
+func (sap *Service) sendHistoricalData(sub Subscription, id rpc.ID, params shared.SubscriptionSettings) error {
+ log.Infof("Sending %s historical data to subscription %s", sap.chain.String(), id)
// Retrieve cached CIDs relevant to this subscriber
var endingBlock int64
var startingBlock int64
@@ -346,42 +381,51 @@ func (sap *Service) backFill(sub Subscription, id rpc.ID, params shared.Subscrip
if endingBlock > params.EndingBlock().Int64() && params.EndingBlock().Int64() > 0 && params.EndingBlock().Int64() > startingBlock {
endingBlock = params.EndingBlock().Int64()
}
- log.Debug("historical data starting block:", params.StartingBlock())
- log.Debug("histocial data ending block:", endingBlock)
+ log.Debugf("%s historical data starting block: %d", sap.chain.String(), params.StartingBlock().Int64())
+ log.Debugf("%s historical data ending block: %d", sap.chain.String(), endingBlock)
go func() {
for i := startingBlock; i <= endingBlock; i++ {
- cidWrapper, empty, err := sap.Retriever.Retrieve(params, i)
+ cidWrappers, empty, err := sap.Retriever.Retrieve(params, i)
if err != nil {
- sendNonBlockingErr(sub, fmt.Errorf("CID Retrieval error at block %d\r%s", i, err.Error()))
+ sendNonBlockingErr(sub, fmt.Errorf("super node %s CID Retrieval error at block %d\r%s", sap.chain.String(), i, err.Error()))
continue
}
if empty {
continue
}
- blocksWrapper, err := sap.IPLDFetcher.Fetch(cidWrapper)
- if err != nil {
- sendNonBlockingErr(sub, fmt.Errorf("IPLD Fetching error at block %d\r%s", i, err.Error()))
- continue
- }
- backFillIplds, err := sap.Resolver.Resolve(blocksWrapper)
- if err != nil {
- sendNonBlockingErr(sub, fmt.Errorf("IPLD Resolving error at block %d\r%s", i, err.Error()))
- continue
- }
- select {
- case sub.PayloadChan <- SubscriptionPayload{backFillIplds, ""}:
- log.Infof("sending super node historical data payload to subscription %s", id)
- default:
- log.Infof("unable to send back-fill payload to subscription %s; channel has no receiver", id)
+ for _, cids := range cidWrappers {
+ response, err := sap.IPLDFetcher.Fetch(cids)
+ if err != nil {
+ sendNonBlockingErr(sub, fmt.Errorf("super node %s IPLD Fetching error at block %d\r%s", sap.chain.String(), i, err.Error()))
+ continue
+ }
+ responseRLP, err := rlp.EncodeToBytes(response)
+ if err != nil {
+ log.Error(err)
+ continue
+ }
+ select {
+ case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.Height()}:
+ log.Debugf("sending super node historical data payload to %s subscription %s", sap.chain.String(), id)
+ default:
+ log.Infof("unable to send back-fill payload to %s subscription %s; channel has no receiver", sap.chain.String(), id)
+ }
}
}
+ // when we are done backfilling send an empty payload signifying so in the msg
+ select {
+ case sub.PayloadChan <- SubscriptionPayload{Data: nil, Err: "", Flag: BackFillCompleteFlag}:
+ log.Debugf("sending backfill completion notice to %s subscription %s", sap.chain.String(), id)
+ default:
+ log.Infof("unable to send backfill completion notice to %s subscription %s", sap.chain.String(), id)
+ }
}()
return nil
}
-// Unsubscribe is used to unsubscribe to the StateDiffingService loop
+// Unsubscribe is used by the API to remotely unsubscribe to the StateDiffingService loop
func (sap *Service) Unsubscribe(id rpc.ID) {
- log.Info("Unsubscribing from the super node service")
+ log.Infof("Unsubscribing %s from the %s super node service", id, sap.chain.String())
sap.Lock()
for ty := range sap.Subscriptions {
delete(sap.Subscriptions[ty], id)
@@ -395,20 +439,22 @@ func (sap *Service) Unsubscribe(id rpc.ID) {
}
// Start is used to begin the service
+// This is mostly just to satisfy the node.Service interface
func (sap *Service) Start(*p2p.Server) error {
- log.Info("Starting super node service")
+ log.Infof("Starting %s super node service", sap.chain.String())
wg := new(sync.WaitGroup)
- payloadChan := make(chan shared.StreamedIPLDs, PayloadChanBufferSize)
- if err := sap.SyncAndPublish(wg, payloadChan); err != nil {
+ payloadChan := make(chan shared.ConvertedData, PayloadChanBufferSize)
+ if err := sap.ProcessData(wg, payloadChan); err != nil {
return err
}
- sap.ScreenAndServe(wg, payloadChan)
+ sap.FilterAndServe(wg, payloadChan)
return nil
}
// Stop is used to close down the service
+// This is mostly just to satisfy the node.Service interface
func (sap *Service) Stop() error {
- log.Info("Stopping super node service")
+ log.Infof("Stopping %s super node service", sap.chain.String())
sap.Lock()
close(sap.QuitChan)
sap.close()
@@ -417,13 +463,19 @@ func (sap *Service) Stop() error {
}
// Node returns the node info for this service
-func (sap *Service) Node() core.Node {
+func (sap *Service) Node() *core.Node {
return sap.NodeInfo
}
+// Chain returns the chain type for this service
+func (sap *Service) Chain() shared.ChainType {
+ return sap.chain
+}
+
// close is used to close all listening subscriptions
// close needs to be called with subscription access locked
func (sap *Service) close() {
+ log.Infof("Closing all %s subscriptions", sap.chain.String())
for subType, subs := range sap.Subscriptions {
for _, sub := range subs {
sendNonBlockingQuit(sub)
@@ -436,6 +488,7 @@ func (sap *Service) close() {
// closeType is used to close all subscriptions of given type
// closeType needs to be called with subscription access locked
func (sap *Service) closeType(subType common.Hash) {
+ log.Infof("Closing all %s subscriptions of type %s", sap.chain.String(), subType.String())
subs := sap.Subscriptions[subType]
for _, sub := range subs {
sendNonBlockingQuit(sub)
diff --git a/pkg/super_node/service_test.go b/pkg/super_node/service_test.go
index c4568ea6..eb1b5c5e 100644
--- a/pkg/super_node/service_test.go
+++ b/pkg/super_node/service_test.go
@@ -51,7 +51,7 @@ var _ = Describe("Service", func() {
ReturnErr: nil,
}
mockConverter := &mocks.PayloadConverter{
- ReturnIPLDPayload: mocks.MockIPLDPayload,
+ ReturnIPLDPayload: mocks.MockConvertedPayload,
ReturnErr: nil,
}
processor := &super_node.Service{
@@ -63,7 +63,7 @@ var _ = Describe("Service", func() {
QuitChan: quitChan,
WorkerPoolSize: 1,
}
- err := processor.SyncAndPublish(wg, nil)
+ err := processor.ProcessData(wg, nil)
Expect(err).ToNot(HaveOccurred())
time.Sleep(2 * time.Second)
quitChan <- true
@@ -71,7 +71,7 @@ var _ = Describe("Service", func() {
Expect(mockConverter.PassedStatediffPayload).To(Equal(mocks.MockStateDiffPayload))
Expect(len(mockCidIndexer.PassedCIDPayload)).To(Equal(1))
Expect(mockCidIndexer.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
- Expect(mockPublisher.PassedIPLDPayload).To(Equal(mocks.MockIPLDPayload))
+ Expect(mockPublisher.PassedIPLDPayload).To(Equal(mocks.MockConvertedPayload))
Expect(mockStreamer.PassedPayloadChan).To(Equal(payloadChan))
})
})
diff --git a/pkg/super_node/shared/chain_type.go b/pkg/super_node/shared/chain_type.go
index 90faf946..83445192 100644
--- a/pkg/super_node/shared/chain_type.go
+++ b/pkg/super_node/shared/chain_type.go
@@ -25,7 +25,7 @@ import (
type ChainType int
const (
- Unknown ChainType = iota
+ UnknownChain ChainType = iota
Ethereum
Bitcoin
Omni
@@ -66,6 +66,6 @@ func NewChainType(name string) (ChainType, error) {
case "omni":
return Omni, nil
default:
- return Unknown, errors.New("invalid name for chain")
+ return UnknownChain, errors.New("invalid name for chain")
}
}
diff --git a/pkg/super_node/shared/config.go b/pkg/super_node/shared/config.go
deleted file mode 100644
index 9b50a4a7..00000000
--- a/pkg/super_node/shared/config.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package shared
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "time"
-
- "github.com/btcsuite/btcd/rpcclient"
- "github.com/ethereum/go-ethereum/ethclient"
- "github.com/ethereum/go-ethereum/rpc"
- "github.com/spf13/viper"
-
- "github.com/vulcanize/vulcanizedb/pkg/config"
- "github.com/vulcanize/vulcanizedb/pkg/eth"
- "github.com/vulcanize/vulcanizedb/pkg/eth/client"
- vRpc "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
- "github.com/vulcanize/vulcanizedb/pkg/eth/core"
- "github.com/vulcanize/vulcanizedb/pkg/eth/node"
- "github.com/vulcanize/vulcanizedb/pkg/postgres"
- "github.com/vulcanize/vulcanizedb/utils"
-)
-
-// SuperNodeConfig struct
-type SuperNodeConfig struct {
- // Ubiquitous fields
- Chain ChainType
- IPFSPath string
- DB *postgres.DB
- DBConfig config.Database
- Quit chan bool
- // Server fields
- Serve bool
- WSEndpoint string
- HTTPEndpoint string
- IPCEndpoint string
- // Sync params
- Sync bool
- Workers int
- WSClient interface{}
- NodeInfo core.Node
- // Backfiller params
- BackFill bool
- HTTPClient interface{}
- Frequency time.Duration
- BatchSize uint64
-}
-
-// NewSuperNodeConfigs is used to initialize multiple SuperNode configs from a single config .toml file
-// Separate chain supernode instances need to be ran in the same process in order to avoid lock contention on the ipfs repository
-func NewSuperNodeConfigs() ([]*SuperNodeConfig, error) {
- chains := viper.GetStringSlice("superNode.chains")
- configs := make([]*SuperNodeConfig, len(chains))
- var err error
- ipfsPath := viper.GetString("superNode.ipfsPath")
- if ipfsPath == "" {
- home, err := os.UserHomeDir()
- if err != nil {
- return nil, err
- }
- ipfsPath = filepath.Join(home, ".ipfs")
- }
- for i, chain := range chains {
- sn := new(SuperNodeConfig)
- sn.Chain, err = NewChainType(chain)
- if err != nil {
- return nil, err
- }
- sn.DBConfig = config.Database{
- Name: viper.GetString(fmt.Sprintf("superNode.%s.database.name", chain)),
- Hostname: viper.GetString(fmt.Sprintf("superNode.%s.database.hostname", chain)),
- Port: viper.GetInt(fmt.Sprintf("superNode.%s.database.port", chain)),
- User: viper.GetString(fmt.Sprintf("superNode.%s.database.user", chain)),
- Password: viper.GetString(fmt.Sprintf("superNode.%s.database.password", chain)),
- }
- sn.IPFSPath = ipfsPath
- sn.Serve = viper.GetBool(fmt.Sprintf("superNode.%s.server.on", chain))
- sn.Sync = viper.GetBool(fmt.Sprintf("superNode.%s.sync.on", chain))
- if sn.Sync {
- workers := viper.GetInt("superNode.sync.workers")
- if workers < 1 {
- workers = 1
- }
- sn.Workers = workers
- switch sn.Chain {
- case Ethereum:
- sn.NodeInfo, sn.WSClient, err = getEthNodeAndClient(viper.GetString("superNode.ethereum.sync.wsPath"))
- case Bitcoin:
- sn.NodeInfo = core.Node{
- ID: viper.GetString("superNode.bitcoin.node.nodeID"),
- ClientName: viper.GetString("superNode.bitcoin.node.clientName"),
- GenesisBlock: viper.GetString("superNode.bitcoin.node.genesisBlock"),
- NetworkID: viper.GetString("superNode.bitcoin.node.networkID"),
- }
- // For bitcoin we load in node info from the config because there is no RPC endpoint to retrieve this from the node
- sn.WSClient = &rpcclient.ConnConfig{
- Host: viper.GetString("superNode.bitcoin.sync.wsPath"),
- HTTPPostMode: true, // Bitcoin core only supports HTTP POST mode
- DisableTLS: true, // Bitcoin core does not provide TLS by default
- Pass: viper.GetString("superNode.bitcoin.sync.pass"),
- User: viper.GetString("superNode.bitcoin.sync.user"),
- }
- }
- }
- if sn.Serve {
- wsPath := viper.GetString(fmt.Sprintf("superNode.%s.server.wsPath", chain))
- if wsPath == "" {
- wsPath = "ws://127.0.0.1:8546"
- }
- sn.WSEndpoint = wsPath
- ipcPath := viper.GetString(fmt.Sprintf("superNode.%s.server.ipcPath", chain))
- if ipcPath == "" {
- home, err := os.UserHomeDir()
- if err != nil {
- return nil, err
- }
- ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
- }
- sn.IPCEndpoint = ipcPath
- httpPath := viper.GetString(fmt.Sprintf("superNode.%s.server.httpPath", chain))
- if httpPath == "" {
- httpPath = "http://127.0.0.1:8545"
- }
- sn.HTTPEndpoint = httpPath
- }
- db := utils.LoadPostgres(sn.DBConfig, sn.NodeInfo)
- sn.DB = &db
- sn.Quit = make(chan bool)
- if viper.GetBool(fmt.Sprintf("superNode.%s.backFill.on", chain)) {
- if err := sn.BackFillFields(chain); err != nil {
- return nil, err
- }
- }
- configs[i] = sn
- }
- return configs, err
-}
-
-// BackFillFields is used to fill in the BackFill fields of the config
-func (sn *SuperNodeConfig) BackFillFields(chain string) error {
- sn.BackFill = true
- var httpClient interface{}
- var err error
- switch sn.Chain {
- case Ethereum:
- _, httpClient, err = getEthNodeAndClient(viper.GetString("superNode.ethereum.backFill.httpPath"))
- if err != nil {
- return err
- }
- case Bitcoin:
- httpClient = &rpcclient.ConnConfig{
- Host: viper.GetString("superNode.bitcoin.backFill.httpPath"),
- HTTPPostMode: true, // Bitcoin core only supports HTTP POST mode
- DisableTLS: true, // Bitcoin core does not provide TLS by default
- Pass: viper.GetString("superNode.bitcoin.backFill.pass"),
- User: viper.GetString("superNode.bitcoin.backFill.user"),
- }
- }
- sn.HTTPClient = httpClient
- freq := viper.GetInt(fmt.Sprintf("superNode.%s.backFill.frequency", chain))
- var frequency time.Duration
- if freq <= 0 {
- frequency = time.Second * 30
- } else {
- frequency = time.Second * time.Duration(freq)
- }
- sn.Frequency = frequency
- sn.BatchSize = uint64(viper.GetInt64(fmt.Sprintf("superNode.%s.backFill.batchSize", chain)))
- return nil
-}
-
-func getEthNodeAndClient(path string) (core.Node, interface{}, error) {
- rawRPCClient, err := rpc.Dial(path)
- if err != nil {
- return core.Node{}, nil, err
- }
- rpcClient := client.NewRPCClient(rawRPCClient, path)
- ethClient := ethclient.NewClient(rawRPCClient)
- vdbEthClient := client.NewEthClient(ethClient)
- vdbNode := node.MakeNode(rpcClient)
- transactionConverter := vRpc.NewRPCTransactionConverter(ethClient)
- blockChain := eth.NewBlockChain(vdbEthClient, rpcClient, vdbNode, transactionConverter)
- return blockChain.Node(), rpcClient, nil
-}
diff --git a/pkg/super_node/shared/data_type.go b/pkg/super_node/shared/data_type.go
new file mode 100644
index 00000000..8112988a
--- /dev/null
+++ b/pkg/super_node/shared/data_type.go
@@ -0,0 +1,144 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package shared
+
+import (
+ "fmt"
+ "strings"
+)
+
+// DataType is an enum to loosely represent type of chain data
+type DataType int
+
+const (
+ UnknownDataType DataType = iota - 1
+ Full
+ Headers
+ Uncles
+ Transactions
+ Receipts
+ State
+ Storage
+)
+
+// String() method to resolve ReSyncType enum
+func (r DataType) String() string {
+ switch r {
+ case Full:
+ return "full"
+ case Headers:
+ return "headers"
+ case Uncles:
+ return "uncles"
+ case Transactions:
+ return "transactions"
+ case Receipts:
+ return "receipts"
+ case State:
+ return "state"
+ case Storage:
+ return "storage"
+ default:
+ return "unknown"
+ }
+}
+
+// GenerateResyncTypeFromString
+func GenerateResyncTypeFromString(str string) (DataType, error) {
+ switch strings.ToLower(str) {
+ case "full", "f":
+ return Full, nil
+ case "headers", "header", "h":
+ return Headers, nil
+ case "uncles", "u":
+ return Uncles, nil
+ case "transactions", "transaction", "trxs", "txs", "trx", "tx", "t":
+ return Transactions, nil
+ case "receipts", "receipt", "rcts", "rct", "r":
+ return Receipts, nil
+ case "state":
+ return State, nil
+ case "storage":
+ return Storage, nil
+ default:
+ return UnknownDataType, fmt.Errorf("unrecognized resync type: %s", str)
+ }
+}
+
+func SupportedResyncType(d DataType, c ChainType) (bool, error) {
+ switch c {
+ case Ethereum:
+ switch d {
+ case Full:
+ return true, nil
+ case Headers:
+ return true, nil
+ case Uncles:
+ return true, nil
+ case Transactions:
+ return true, nil
+ case Receipts:
+ return true, nil
+ case State:
+ return true, nil
+ case Storage:
+ return true, nil
+ default:
+ return true, nil
+ }
+ case Bitcoin:
+ switch d {
+ case Full:
+ return true, nil
+ case Headers:
+ return true, nil
+ case Uncles:
+ return false, nil
+ case Transactions:
+ return true, nil
+ case Receipts:
+ return false, nil
+ case State:
+ return false, nil
+ case Storage:
+ return false, nil
+ default:
+ return false, nil
+ }
+ case Omni:
+ switch d {
+ case Full:
+ return false, nil
+ case Headers:
+ return false, nil
+ case Uncles:
+ return false, nil
+ case Transactions:
+ return false, nil
+ case Receipts:
+ return false, nil
+ case State:
+ return false, nil
+ case Storage:
+ return false, nil
+ default:
+ return false, nil
+ }
+ default:
+ return false, fmt.Errorf("unrecognized chain type %s", c.String())
+ }
+}
diff --git a/pkg/super_node/shared/env.go b/pkg/super_node/shared/env.go
new file mode 100644
index 00000000..fbd66dd4
--- /dev/null
+++ b/pkg/super_node/shared/env.go
@@ -0,0 +1,96 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package shared
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/ethereum/go-ethereum/rpc"
+
+ "github.com/btcsuite/btcd/rpcclient"
+ "github.com/spf13/viper"
+ "github.com/vulcanize/vulcanizedb/pkg/eth/client"
+ "github.com/vulcanize/vulcanizedb/pkg/eth/core"
+ "github.com/vulcanize/vulcanizedb/pkg/eth/node"
+)
+
+// Env variables
+const (
+ IPFS_PATH = "IPFS_PATH"
+
+ ETH_WS_PATH = "ETH_WS_PATH"
+ ETH_HTTP_PATH = "ETH_HTTP_PATH"
+
+ BTC_WS_PATH = "BTC_WS_PATH"
+ BTC_HTTP_PATH = "BTC_HTTP_PATH"
+ BTC_NODE_PASSWORD = "BTC_NODE_PASSWORD"
+ BTC_NODE_USER = "BTC_NODE_USER"
+ BTC_NODE_ID = "BTC_NODE_ID"
+ BTC_CLIENT_NAME = "BTC_CLIENT_NAME"
+ BTC_GENESIS_BLOCK = "BTC_GENESIS_BLOCK"
+ BTC_NETWORK_ID = "BTC_NETWORK_ID"
+)
+
+// GetEthNodeAndClient returns eth node info and client from path url
+func GetEthNodeAndClient(path string) (core.Node, core.RPCClient, error) {
+ rawRPCClient, err := rpc.Dial(path)
+ if err != nil {
+ return core.Node{}, nil, err
+ }
+ rpcClient := client.NewRPCClient(rawRPCClient, path)
+ vdbNode := node.MakeNode(rpcClient)
+ return vdbNode, rpcClient, nil
+}
+
+// GetIPFSPath returns the ipfs path from the config or env variable
+func GetIPFSPath() (string, error) {
+ viper.BindEnv("ipfs.path", IPFS_PATH)
+ ipfsPath := viper.GetString("ipfs.path")
+ if ipfsPath == "" {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return "", err
+ }
+ ipfsPath = filepath.Join(home, ".ipfs")
+ }
+ return ipfsPath, nil
+}
+
+// GetBtcNodeAndClient returns btc node info from path url
+func GetBtcNodeAndClient(path string) (core.Node, *rpcclient.ConnConfig) {
+ viper.BindEnv("bitcoin.nodeID", BTC_NODE_ID)
+ viper.BindEnv("bitcoin.clientName", BTC_CLIENT_NAME)
+ viper.BindEnv("bitcoin.genesisBlock", BTC_GENESIS_BLOCK)
+ viper.BindEnv("bitcoin.networkID", BTC_NETWORK_ID)
+ viper.BindEnv("bitcoin.pass", BTC_NODE_PASSWORD)
+ viper.BindEnv("bitcoin.user", BTC_NODE_USER)
+
+ // For bitcoin we load in node info from the config because there is no RPC endpoint to retrieve this from the node
+ return core.Node{
+ ID: viper.GetString("bitcoin.nodeID"),
+ ClientName: viper.GetString("bitcoin.clientName"),
+ GenesisBlock: viper.GetString("bitcoin.genesisBlock"),
+ NetworkID: viper.GetString("bitcoin.networkID"),
+ }, &rpcclient.ConnConfig{
+ Host: path,
+ HTTPPostMode: true, // Bitcoin core only supports HTTP POST mode
+ DisableTLS: true, // Bitcoin core does not provide TLS by default
+ Pass: viper.GetString("bitcoin.pass"),
+ User: viper.GetString("bitcoin.user"),
+ }
+}
diff --git a/pkg/super_node/shared/functions.go b/pkg/super_node/shared/functions.go
index dc941440..efb1cd64 100644
--- a/pkg/super_node/shared/functions.go
+++ b/pkg/super_node/shared/functions.go
@@ -18,6 +18,9 @@ package shared
import (
"bytes"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
// ListContainsString used to check if a list of strings contains a particular string
@@ -30,10 +33,10 @@ func ListContainsString(sss []string, s string) bool {
return false
}
-// ListContainsBytes used to check if a list of byte arrays contains a particular byte array
-func ListContainsBytes(bbb [][]byte, b []byte) bool {
- for _, by := range bbb {
- if bytes.Equal(by, b) {
+// IPLDsContainBytes used to check if a list of strings contains a particular string
+func IPLDsContainBytes(iplds []ipfs.BlockModel, b []byte) bool {
+ for _, ipld := range iplds {
+ if bytes.Equal(ipld.Data, b) {
return true
}
}
@@ -49,3 +52,11 @@ func ListContainsGap(gapList []Gap, gap Gap) bool {
}
return false
}
+
+// HandleNullAddr converts a nil pointer to an address to a zero-valued hex address string
+func HandleNullAddr(to *common.Address) string {
+ if to == nil {
+ return "0x0000000000000000000000000000000000000000000000000000000000000000"
+ }
+ return to.Hex()
+}
diff --git a/pkg/super_node/shared/intefaces.go b/pkg/super_node/shared/intefaces.go
index 2974b7b1..39393919 100644
--- a/pkg/super_node/shared/intefaces.go
+++ b/pkg/super_node/shared/intefaces.go
@@ -18,6 +18,8 @@ package shared
import (
"math/big"
+
+ node "github.com/ipfs/go-ipld-format"
)
// PayloadStreamer streams chain-specific payloads to the provided channel
@@ -32,12 +34,12 @@ type PayloadFetcher interface {
// PayloadConverter converts chain-specific payloads into IPLD payloads for publishing
type PayloadConverter interface {
- Convert(payload RawChainData) (StreamedIPLDs, error)
+ Convert(payload RawChainData) (ConvertedData, error)
}
// IPLDPublisher publishes IPLD payloads and returns a CID payload for indexing
type IPLDPublisher interface {
- Publish(payload StreamedIPLDs) (CIDsForIndexing, error)
+ Publish(payload ConvertedData) (CIDsForIndexing, error)
}
// CIDIndexer indexes a CID payload in Postgres
@@ -47,12 +49,12 @@ type CIDIndexer interface {
// ResponseFilterer applies a filter to an IPLD payload to return a subscription response packet
type ResponseFilterer interface {
- Filter(filter SubscriptionSettings, payload StreamedIPLDs) (response ServerResponse, err error)
+ Filter(filter SubscriptionSettings, payload ConvertedData) (response IPLDs, err error)
}
// CIDRetriever retrieves cids according to a provided filter and returns a CID wrapper
type CIDRetriever interface {
- Retrieve(filter SubscriptionSettings, blockNumber int64) (CIDsForFetching, bool, error)
+ Retrieve(filter SubscriptionSettings, blockNumber int64) ([]CIDsForFetching, bool, error)
RetrieveFirstBlockNumber() (int64, error)
RetrieveLastBlockNumber() (int64, error)
RetrieveGapsInData() ([]Gap, error)
@@ -60,12 +62,7 @@ type CIDRetriever interface {
// IPLDFetcher uses a CID wrapper to fetch an IPLD wrapper
type IPLDFetcher interface {
- Fetch(cids CIDsForFetching) (FetchedIPLDs, error)
-}
-
-// IPLDResolver resolves an IPLD wrapper into chain-specific payloads
-type IPLDResolver interface {
- Resolve(iplds FetchedIPLDs) (ServerResponse, error)
+ Fetch(cids CIDsForFetching) (IPLDs, error)
}
// ClientSubscription is a general interface for chain data subscriptions
@@ -76,7 +73,12 @@ type ClientSubscription interface {
// DagPutter is a general interface for a dag putter
type DagPutter interface {
- DagPut(raw interface{}) ([]string, error)
+ DagPut(n node.Node) (string, error)
+}
+
+// Cleaner is for cleaning out data from the cache within the given ranges
+type Cleaner interface {
+ Clean(rngs [][2]uint64, t DataType) error
}
// SubscriptionSettings is the interface every subscription filter type needs to satisfy, no matter the chain
diff --git a/pkg/super_node/shared/mocks/fetcher.go b/pkg/super_node/shared/mocks/payload_fetcher.go
similarity index 90%
rename from pkg/super_node/shared/mocks/fetcher.go
rename to pkg/super_node/shared/mocks/payload_fetcher.go
index e9d6cbfa..589a50ee 100644
--- a/pkg/super_node/shared/mocks/fetcher.go
+++ b/pkg/super_node/shared/mocks/payload_fetcher.go
@@ -23,8 +23,8 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
-// IPLDFetcher mock for tests
-type IPLDFetcher struct {
+// PayloadFetcher mock for tests
+type PayloadFetcher struct {
PayloadsToReturn map[uint64]shared.RawChainData
FetchErrs map[uint64]error
CalledAtBlockHeights [][]uint64
@@ -32,7 +32,7 @@ type IPLDFetcher struct {
}
// FetchAt mock method
-func (fetcher *IPLDFetcher) FetchAt(blockHeights []uint64) ([]shared.RawChainData, error) {
+func (fetcher *PayloadFetcher) FetchAt(blockHeights []uint64) ([]shared.RawChainData, error) {
if fetcher.PayloadsToReturn == nil {
return nil, errors.New("mock StateDiffFetcher needs to be initialized with payloads to return")
}
diff --git a/pkg/super_node/shared/mocks/retriever.go b/pkg/super_node/shared/mocks/retriever.go
index c98a1c32..93efc9a5 100644
--- a/pkg/super_node/shared/mocks/retriever.go
+++ b/pkg/super_node/shared/mocks/retriever.go
@@ -21,8 +21,8 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
-// MockCIDRetriever is a mock CID retriever for use in tests
-type MockCIDRetriever struct {
+// CIDRetriever is a mock CID retriever for use in tests
+type CIDRetriever struct {
GapsToRetrieve []shared.Gap
GapsToRetrieveErr error
CalledTimes int
@@ -31,34 +31,34 @@ type MockCIDRetriever struct {
}
// RetrieveCIDs mock method
-func (*MockCIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumber int64) (shared.CIDsForFetching, bool, error) {
+func (*CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumber int64) ([]shared.CIDsForFetching, bool, error) {
panic("implement me")
}
// RetrieveLastBlockNumber mock method
-func (*MockCIDRetriever) RetrieveLastBlockNumber() (int64, error) {
+func (*CIDRetriever) RetrieveLastBlockNumber() (int64, error) {
panic("implement me")
}
// RetrieveFirstBlockNumber mock method
-func (mcr *MockCIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
+func (mcr *CIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
return mcr.FirstBlockNumberToReturn, mcr.RetrieveFirstBlockNumberErr
}
// RetrieveGapsInData mock method
-func (mcr *MockCIDRetriever) RetrieveGapsInData() ([]shared.Gap, error) {
+func (mcr *CIDRetriever) RetrieveGapsInData() ([]shared.Gap, error) {
mcr.CalledTimes++
return mcr.GapsToRetrieve, mcr.GapsToRetrieveErr
}
// SetGapsToRetrieve mock method
-func (mcr *MockCIDRetriever) SetGapsToRetrieve(gaps []shared.Gap) {
+func (mcr *CIDRetriever) SetGapsToRetrieve(gaps []shared.Gap) {
if mcr.GapsToRetrieve == nil {
mcr.GapsToRetrieve = make([]shared.Gap, 0)
}
mcr.GapsToRetrieve = append(mcr.GapsToRetrieve, gaps...)
}
-func (mcr *MockCIDRetriever) Database() *postgres.DB {
+func (mcr *CIDRetriever) Database() *postgres.DB {
panic("implement me")
}
diff --git a/pkg/super_node/shared/types.go b/pkg/super_node/shared/types.go
index 00719dcd..e213f7f4 100644
--- a/pkg/super_node/shared/types.go
+++ b/pkg/super_node/shared/types.go
@@ -20,16 +20,17 @@ package shared
type RawChainData interface{}
// The concrete type underneath StreamedIPLDs should not be a pointer
-type StreamedIPLDs interface{}
+type ConvertedData interface {
+ Height() int64
+}
type CIDsForIndexing interface{}
type CIDsForFetching interface{}
-type FetchedIPLDs interface{}
-
-// The concrete type underneath StreamedIPLDs should not be a pointer
-type ServerResponse interface{}
+type IPLDs interface {
+ Height() int64
+}
type Gap struct {
Start uint64
diff --git a/pkg/super_node/subscription.go b/pkg/super_node/subscription.go
index e693eef7..a1cdb045 100644
--- a/pkg/super_node/subscription.go
+++ b/pkg/super_node/subscription.go
@@ -17,8 +17,16 @@
package super_node
import (
+ "errors"
+
"github.com/ethereum/go-ethereum/rpc"
- "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+)
+
+type Flag int32
+
+const (
+ EmptyFlag Flag = iota
+ BackFillCompleteFlag
)
// Subscription holds the information for an individual client subscription to the super node
@@ -31,6 +39,22 @@ type Subscription struct {
// SubscriptionPayload is the struct for a super node stream payload
// It carries data of a type specific to the chain being supported/queried and an error message
type SubscriptionPayload struct {
- Data shared.ServerResponse `json:"data"` // e.g. for Ethereum eth.StreamPayload
- Err string `json:"err"`
+ Data []byte `json:"data"` // e.g. for Ethereum rlp serialized eth.StreamPayload
+ Height int64 `json:"height"`
+ Err string `json:"err"` // field for error
+ Flag Flag `json:"flag"` // field for message
+}
+
+func (sp SubscriptionPayload) Error() error {
+ if sp.Err == "" {
+ return nil
+ }
+ return errors.New(sp.Err)
+}
+
+func (sp SubscriptionPayload) BackFillComplete() bool {
+ if sp.Flag == BackFillCompleteFlag {
+ return true
+ }
+ return false
}
diff --git a/pkg/super_node/version.go b/pkg/super_node/version.go
new file mode 100644
index 00000000..a85bb41f
--- /dev/null
+++ b/pkg/super_node/version.go
@@ -0,0 +1,63 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package super_node
+
+import "fmt"
+
+const (
+ VersionMajor = 0 // Major version component of the current release
+ VersionMinor = 1 // Minor version component of the current release
+ VersionPatch = 0 // Patch version component of the current release
+ VersionMeta = "alpha" // Version metadata to append to the version string
+)
+
+// Version holds the textual version string.
+var Version = func() string {
+ return fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch)
+}()
+
+// VersionWithMeta holds the textual version string including the metadata.
+var VersionWithMeta = func() string {
+ v := Version
+ if VersionMeta != "" {
+ v += "-" + VersionMeta
+ }
+ return v
+}()
+
+// ArchiveVersion holds the textual version string
+func ArchiveVersion(gitCommit string) string {
+ vsn := Version
+ if VersionMeta != "stable" {
+ vsn += "-" + VersionMeta
+ }
+ if len(gitCommit) >= 8 {
+ vsn += "-" + gitCommit[:8]
+ }
+ return vsn
+}
+
+func VersionWithCommit(gitCommit, gitDate string) string {
+ vsn := VersionWithMeta
+ if len(gitCommit) >= 8 {
+ vsn += "-" + gitCommit[:8]
+ }
+ if (VersionMeta != "stable") && (gitDate != "") {
+ vsn += "-" + gitDate
+ }
+ return vsn
+}
diff --git a/pkg/wasm/instantiator.go b/pkg/wasm/instantiator.go
new file mode 100644
index 00000000..9f9c2065
--- /dev/null
+++ b/pkg/wasm/instantiator.go
@@ -0,0 +1,56 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package wasm
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/postgres"
+)
+
+// Instantiator is used to instantiate WASM functions in Postgres
+type Instantiator struct {
+ db *postgres.DB
+ instances []WasmFunction // WASM file paths and namespaces
+}
+
+type WasmFunction struct {
+ BinaryPath string
+ Namespace string
+}
+
+// NewWASMInstantiator returns a pointer to a new Instantiator
+func NewWASMInstantiator(db *postgres.DB, instances []WasmFunction) *Instantiator {
+ return &Instantiator{
+ db: db,
+ instances: instances,
+ }
+}
+
+// Instantiate is used to load the WASM functions into Postgres
+func (i *Instantiator) Instantiate() error {
+ // TODO: enable instantiation of WASM functions from IPFS
+ tx, err := i.db.Beginx()
+ if err != nil {
+ return err
+ }
+ for _, pn := range i.instances {
+ _, err := tx.Exec(`SELECT wasm_new_instance('$1', '$2')`, pn.BinaryPath, pn.Namespace)
+ if err != nil {
+ return err
+ }
+ }
+ return tx.Commit()
+}
diff --git a/pkg/watcher/btc/repository.go b/pkg/watcher/btc/repository.go
new file mode 100644
index 00000000..07482785
--- /dev/null
+++ b/pkg/watcher/btc/repository.go
@@ -0,0 +1 @@
+package btc
diff --git a/pkg/watcher/config.go b/pkg/watcher/config.go
new file mode 100644
index 00000000..dc8de350
--- /dev/null
+++ b/pkg/watcher/config.go
@@ -0,0 +1,138 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package watcher
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/vulcanize/vulcanizedb/pkg/wasm"
+
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/spf13/viper"
+
+ "github.com/vulcanize/vulcanizedb/pkg/config"
+ "github.com/vulcanize/vulcanizedb/pkg/eth/client"
+ "github.com/vulcanize/vulcanizedb/pkg/eth/core"
+ "github.com/vulcanize/vulcanizedb/pkg/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/btc"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+ shared2 "github.com/vulcanize/vulcanizedb/pkg/watcher/shared"
+ "github.com/vulcanize/vulcanizedb/utils"
+)
+
+// Config holds all of the parameters necessary for defining and running an instance of a watcher
+type Config struct {
+ // Subscription settings
+ SubscriptionConfig shared.SubscriptionSettings
+ // Database settings
+ DBConfig config.Database
+ // DB itself
+ DB *postgres.DB
+ // Subscription client
+ Client interface{}
+ // WASM instantiation paths and namespaces
+ WASMFunctions []wasm.WasmFunction
+ // File paths for trigger functions (sql files) that (can) use the instantiated wasm namespaces
+ TriggerFunctions []string
+ // Chain type used to specify what type of raw data we will be processing
+ Chain shared.ChainType
+ // Source type used to specify which streamer to use based on what API we will be interfacing with
+ Source shared2.SourceType
+ // Info for the node
+ NodeInfo core.Node
+}
+
+func NewWatcherConfig() (*Config, error) {
+ c := new(Config)
+ var err error
+ chain := viper.GetString("watcher.chain")
+ c.Chain, err = shared.NewChainType(chain)
+ if err != nil {
+ return nil, err
+ }
+ switch c.Chain {
+ case shared.Ethereum:
+ c.SubscriptionConfig, err = eth.NewEthSubscriptionConfig()
+ if err != nil {
+ return nil, err
+ }
+ case shared.Bitcoin:
+ c.SubscriptionConfig, err = btc.NewEthSubscriptionConfig()
+ if err != nil {
+ return nil, err
+ }
+ case shared.Omni:
+ return nil, errors.New("omni chain type currently not supported")
+ default:
+ return nil, fmt.Errorf("unexpected chain type %s", c.Chain.String())
+ }
+ sourcePath := viper.GetString("watcher.dataSource")
+ if sourcePath == "" {
+ sourcePath = "ws://127.0.0.1:8080" // default to and try the default ws url if no path is provided
+ }
+ sourceType := viper.GetString("watcher.dataPath")
+ c.Source, err = shared2.NewSourceType(sourceType)
+ if err != nil {
+ return nil, err
+ }
+ switch c.Source {
+ case shared2.Ethereum:
+ return nil, errors.New("ethereum data source currently not supported")
+ case shared2.Bitcoin:
+ return nil, errors.New("bitcoin data source currently not supported")
+ case shared2.VulcanizeDB:
+ rawRPCClient, err := rpc.Dial(sourcePath)
+ if err != nil {
+ return nil, err
+ }
+ cli := client.NewRPCClient(rawRPCClient, sourcePath)
+ var nodeInfo core.Node
+ if err := cli.CallContext(context.Background(), &nodeInfo, "vdb_node"); err != nil {
+ return nil, err
+ }
+ c.NodeInfo = nodeInfo
+ c.Client = cli
+ default:
+ return nil, fmt.Errorf("unexpected data source type %s", c.Source.String())
+ }
+ wasmBinaries := viper.GetStringSlice("watcher.wasmBinaries")
+ wasmNamespaces := viper.GetStringSlice("watcher.wasmNamespaces")
+ if len(wasmBinaries) != len(wasmNamespaces) {
+ return nil, fmt.Errorf("watcher config needs a namespace for every wasm binary\r\nhave %d binaries and %d namespaces", len(wasmBinaries), len(wasmNamespaces))
+ }
+ c.WASMFunctions = make([]wasm.WasmFunction, len(wasmBinaries))
+ for i, bin := range wasmBinaries {
+ c.WASMFunctions[i] = wasm.WasmFunction{
+ BinaryPath: bin,
+ Namespace: wasmNamespaces[i],
+ }
+ }
+ c.TriggerFunctions = viper.GetStringSlice("watcher.triggerFunctions")
+ c.DBConfig = config.Database{
+ Name: viper.GetString("watcher.database.name"),
+ Hostname: viper.GetString("watcher.database.hostname"),
+ Port: viper.GetInt("watcher.database.port"),
+ User: viper.GetString("watcher.database.user"),
+ Password: viper.GetString("watcher.database.password"),
+ }
+ db := utils.LoadPostgres(c.DBConfig, c.NodeInfo)
+ c.DB = &db
+ return c, nil
+}
diff --git a/pkg/watcher/constructors.go b/pkg/watcher/constructors.go
new file mode 100644
index 00000000..f8112c49
--- /dev/null
+++ b/pkg/watcher/constructors.go
@@ -0,0 +1,53 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package watcher
+
+import (
+ "fmt"
+
+ "github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
+ "github.com/vulcanize/vulcanizedb/pkg/eth/core"
+ "github.com/vulcanize/vulcanizedb/pkg/postgres"
+ shared2 "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+ "github.com/vulcanize/vulcanizedb/pkg/watcher/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/watcher/shared"
+)
+
+// NewSuperNodeStreamer returns a new shared.SuperNodeStreamer
+func NewSuperNodeStreamer(source shared.SourceType, client interface{}) (shared.SuperNodeStreamer, error) {
+ switch source {
+ case shared.VulcanizeDB:
+ cli, ok := client.(core.RPCClient)
+ if !ok {
+ var expectedClientType core.RPCClient
+ return nil, fmt.Errorf("vulcanizedb NewSuperNodeStreamer construct expects client type %T got %T", expectedClientType, client)
+ }
+ return streamer.NewSuperNodeStreamer(cli), nil
+ default:
+ return nil, fmt.Errorf("NewSuperNodeStreamer constructor unexpected souce type %s", source.String())
+ }
+}
+
+// NewRepository constructs and returns a new Repository that satisfies the shared.Repository interface for the specified chain
+func NewRepository(chain shared2.ChainType, db *postgres.DB, triggerFuncs []string) (shared.Repository, error) {
+ switch chain {
+ case shared2.Ethereum:
+ return eth.NewRepository(db, triggerFuncs), nil
+ default:
+ return nil, fmt.Errorf("NewRepository constructor unexpected chain type %s", chain.String())
+ }
+}
diff --git a/pkg/watcher/eth/converter.go b/pkg/watcher/eth/converter.go
new file mode 100644
index 00000000..98e4702b
--- /dev/null
+++ b/pkg/watcher/eth/converter.go
@@ -0,0 +1,166 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+
+ common2 "github.com/vulcanize/vulcanizedb/pkg/eth/converters/common"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+)
+
+// WatcherConverter converts watched data into models for the trigger tables
+type WatcherConverter struct {
+ chainConfig *params.ChainConfig
+}
+
+// NewWatcherConverter creates a pointer to a new WatcherConverter
+func NewWatcherConverter(chainConfig *params.ChainConfig) *WatcherConverter {
+ return &WatcherConverter{
+ chainConfig: chainConfig,
+ }
+}
+
+// Convert method is used to convert eth iplds to an cid payload
+// Satisfies the shared.PayloadConverter interface
+func (pc *WatcherConverter) Convert(ethIPLDs eth.IPLDs) (*eth.CIDPayload, error) {
+ numTxs := len(ethIPLDs.Transactions)
+ numRcts := len(ethIPLDs.Receipts)
+ if numTxs != numRcts {
+ return nil, fmt.Errorf("eth converter needs same numbe of receipts and transactions, have %d transactions and %d receipts", numTxs, numRcts)
+ }
+ // Initialize the payload struct and its fields
+ cids := new(eth.CIDPayload)
+ cids.UncleCIDs = make([]eth.UncleModel, len(ethIPLDs.Uncles))
+ cids.TransactionCIDs = make([]eth.TxModel, numTxs)
+ cids.ReceiptCIDs = make(map[common.Hash]eth.ReceiptModel, numTxs)
+ cids.StateNodeCIDs = make([]eth.StateNodeModel, len(ethIPLDs.StateNodes))
+ cids.StorageNodeCIDs = make(map[common.Hash][]eth.StorageNodeModel, len(ethIPLDs.StateNodes))
+
+ // Unpack header
+ var header types.Header
+ if err := rlp.DecodeBytes(ethIPLDs.Header.Data, &header); err != nil {
+ return nil, err
+ }
+ // Collect uncles so we can derive miner reward
+ uncles := make([]*types.Header, len(ethIPLDs.Uncles))
+ for i, uncleIPLD := range ethIPLDs.Uncles {
+ var uncle types.Header
+ if err := rlp.DecodeBytes(uncleIPLD.Data, &uncle); err != nil {
+ return nil, err
+ }
+ uncleReward := common2.CalcUncleMinerReward(header.Number.Int64(), uncle.Number.Int64())
+ uncles[i] = &uncle
+ // Uncle data
+ cids.UncleCIDs[i] = eth.UncleModel{
+ CID: uncleIPLD.CID,
+ BlockHash: uncle.Hash().String(),
+ ParentHash: uncle.ParentHash.String(),
+ Reward: uncleReward.String(),
+ }
+ }
+ // Collect transactions so we can derive receipt fields and miner reward
+ signer := types.MakeSigner(pc.chainConfig, header.Number)
+ transactions := make(types.Transactions, len(ethIPLDs.Transactions))
+ for i, txIPLD := range ethIPLDs.Transactions {
+ var tx types.Transaction
+ if err := rlp.DecodeBytes(txIPLD.Data, &tx); err != nil {
+ return nil, err
+ }
+ transactions[i] = &tx
+ from, err := types.Sender(signer, &tx)
+ if err != nil {
+ return nil, err
+ }
+ // Tx data
+ cids.TransactionCIDs[i] = eth.TxModel{
+ Dst: shared.HandleNullAddr(tx.To()),
+ Src: shared.HandleNullAddr(&from),
+ TxHash: tx.Hash().String(),
+ Index: int64(i),
+ CID: txIPLD.CID,
+ }
+ }
+ // Collect receipts so that we can derive the rest of their fields and miner reward
+ receipts := make(types.Receipts, len(ethIPLDs.Receipts))
+ for i, rctIPLD := range ethIPLDs.Receipts {
+ var rct types.Receipt
+ if err := rlp.DecodeBytes(rctIPLD.Data, &rct); err != nil {
+ return nil, err
+ }
+ receipts[i] = &rct
+ }
+ if err := receipts.DeriveFields(pc.chainConfig, header.Hash(), header.Number.Uint64(), transactions); err != nil {
+ return nil, err
+ }
+ for i, receipt := range receipts {
+ matchedTx := transactions[i]
+ if matchedTx.To() != nil {
+ receipt.ContractAddress = *transactions[i].To()
+ }
+ topicSets := make([][]string, 4)
+ for _, log := range receipt.Logs {
+ for i := range topicSets {
+ if i < len(log.Topics) {
+ topicSets[i] = append(topicSets[i], log.Topics[i].Hex())
+ }
+ }
+ }
+ // Rct data
+ cids.ReceiptCIDs[matchedTx.Hash()] = eth.ReceiptModel{
+ CID: ethIPLDs.Receipts[i].CID,
+ Topic0s: topicSets[0],
+ Topic1s: topicSets[1],
+ Topic2s: topicSets[2],
+ Topic3s: topicSets[3],
+ Contract: receipt.ContractAddress.Hex(),
+ }
+ }
+ minerReward := common2.CalcEthBlockReward(&header, uncles, transactions, receipts)
+ // Header data
+ cids.HeaderCID = eth.HeaderModel{
+ CID: ethIPLDs.Header.CID,
+ ParentHash: header.ParentHash.String(),
+ BlockHash: header.Hash().String(),
+ BlockNumber: header.Number.String(),
+ TotalDifficulty: ethIPLDs.TotalDifficulty.String(),
+ Reward: minerReward.String(),
+ }
+ // State data
+ for i, stateIPLD := range ethIPLDs.StateNodes {
+ cids.StateNodeCIDs[i] = eth.StateNodeModel{
+ CID: stateIPLD.IPLD.CID,
+ NodeType: eth.ResolveFromNodeType(stateIPLD.Type),
+ StateKey: stateIPLD.StateLeafKey.String(),
+ }
+ }
+ // Storage data
+ for _, storageIPLD := range ethIPLDs.StorageNodes {
+ cids.StorageNodeCIDs[storageIPLD.StateLeafKey] = append(cids.StorageNodeCIDs[storageIPLD.StateLeafKey], eth.StorageNodeModel{
+ CID: storageIPLD.IPLD.CID,
+ NodeType: eth.ResolveFromNodeType(storageIPLD.Type),
+ StorageKey: storageIPLD.StorageLeafKey.String(),
+ })
+ }
+ return cids, nil
+}
diff --git a/pkg/watcher/eth/repository.go b/pkg/watcher/eth/repository.go
new file mode 100644
index 00000000..1791697e
--- /dev/null
+++ b/pkg/watcher/eth/repository.go
@@ -0,0 +1,190 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth
+
+import (
+ "io/ioutil"
+
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/sirupsen/logrus"
+ "github.com/vulcanize/vulcanizedb/pkg/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/watcher/shared"
+)
+
+var (
+ vacuumThreshold int64 = 5000
+)
+
+// Repository is the underlying struct for satisfying the shared.Repository interface for eth
+type Repository struct {
+ cidIndexer *eth.CIDIndexer
+ converter *WatcherConverter
+ db *postgres.DB
+ triggerFunctions []string
+ deleteCalls int64
+}
+
+// NewRepository returns a new eth.Repository that satisfies the shared.Repository interface
+func NewRepository(db *postgres.DB, triggerFunctions []string) shared.Repository {
+ return &Repository{
+ cidIndexer: eth.NewCIDIndexer(db),
+ converter: NewWatcherConverter(params.MainnetChainConfig),
+ db: db,
+ triggerFunctions: triggerFunctions,
+ deleteCalls: 0,
+ }
+}
+
+// LoadTriggers is used to initialize Postgres trigger function
+// this needs to be called after the wasm functions these triggers invoke have been instantiated in Postgres
+func (r *Repository) LoadTriggers() error {
+ // TODO: enable loading of triggers from IPFS
+ tx, err := r.db.Beginx()
+ if err != nil {
+ return err
+ }
+ for _, funcPath := range r.triggerFunctions {
+ sqlFile, err := ioutil.ReadFile(funcPath)
+ if err != nil {
+ return err
+ }
+ sqlString := string(sqlFile)
+ if _, err := tx.Exec(sqlString); err != nil {
+ return err
+ }
+
+ }
+ return tx.Commit()
+}
+
+// QueueData puts super node payload data into the db queue
+func (r *Repository) QueueData(payload super_node.SubscriptionPayload) error {
+ pgStr := `INSERT INTO eth.queued_data (data, height) VALUES ($1, $2)
+ ON CONFLICT (height) DO UPDATE SET (data) VALUES ($1)`
+ _, err := r.db.Exec(pgStr, payload.Data, payload.Height)
+ return err
+}
+
+// GetQueueData grabs payload data from the queue table so that it can be readied
+// Used ensure we enter data into the tables that triggers act on in sequential order, even if we receive data out-of-order
+// Returns the queued data, the new index, and err
+// Deletes from the queue the data it retrieves
+// Periodically vacuum's the table to free up space from the deleted rows
+func (r *Repository) GetQueueData(height int64) (super_node.SubscriptionPayload, int64, error) {
+ pgStr := `DELETE FROM eth.queued_data
+ WHERE height = $1
+ RETURNING *`
+ var res shared.QueuedData
+ if err := r.db.Get(&res, pgStr, height); err != nil {
+ return super_node.SubscriptionPayload{}, height, err
+ }
+ // If the delete get query succeeded, increment deleteCalls and height and prep payload to return
+ r.deleteCalls++
+ height++
+ payload := super_node.SubscriptionPayload{
+ Data: res.Data,
+ Height: res.Height,
+ Flag: super_node.EmptyFlag,
+ }
+ // Periodically clean up space in the queued data table
+ if r.deleteCalls >= vacuumThreshold {
+ _, err := r.db.Exec(`VACUUM ANALYZE eth.queued_data`)
+ if err != nil {
+ logrus.Error(err)
+ }
+ r.deleteCalls = 0
+ }
+ return payload, height, nil
+}
+
+// ReadyData puts data in the tables ready for processing by trigger functions
+func (r *Repository) ReadyData(payload super_node.SubscriptionPayload) error {
+ var ethIPLDs eth.IPLDs
+ if err := rlp.DecodeBytes(payload.Data, ðIPLDs); err != nil {
+ return err
+ }
+ if err := r.readyIPLDs(ethIPLDs); err != nil {
+ return err
+ }
+ cids, err := r.converter.Convert(ethIPLDs)
+ if err != nil {
+ return err
+ }
+ // Use indexer to persist all of the cid meta data
+ // trigger functions will act on these tables
+ return r.cidIndexer.Index(cids)
+}
+
+// readyIPLDs adds IPLDs directly to the Postgres `blocks` table, rather than going through an IPFS node
+func (r *Repository) readyIPLDs(ethIPLDs eth.IPLDs) error {
+ tx, err := r.db.Beginx()
+ if err != nil {
+ return err
+ }
+ pgStr := `INSERT INTO blocks (key, data) VALUES ($1, $2)
+ ON CONFLICT (key) DO UPDATE SET (data) = ($2)`
+ if _, err := tx.Exec(pgStr, ethIPLDs.Header.CID, ethIPLDs.Header.Data); err != nil {
+ if err := tx.Rollback(); err != nil {
+ logrus.Error(err)
+ }
+ return err
+ }
+ for _, uncle := range ethIPLDs.Uncles {
+ if _, err := tx.Exec(pgStr, uncle.CID, uncle.Data); err != nil {
+ if err := tx.Rollback(); err != nil {
+ logrus.Error(err)
+ }
+ return err
+ }
+ }
+ for _, trx := range ethIPLDs.Transactions {
+ if _, err := tx.Exec(pgStr, trx.CID, trx.Data); err != nil {
+ if err := tx.Rollback(); err != nil {
+ logrus.Error(err)
+ }
+ return err
+ }
+ }
+ for _, rct := range ethIPLDs.Receipts {
+ if _, err := tx.Exec(pgStr, rct.CID, rct.Data); err != nil {
+ if err := tx.Rollback(); err != nil {
+ logrus.Error(err)
+ }
+ return err
+ }
+ }
+ for _, state := range ethIPLDs.StateNodes {
+ if _, err := tx.Exec(pgStr, state.IPLD.CID, state.IPLD.Data); err != nil {
+ if err := tx.Rollback(); err != nil {
+ logrus.Error(err)
+ }
+ return err
+ }
+ }
+ for _, storage := range ethIPLDs.StorageNodes {
+ if _, err := tx.Exec(pgStr, storage.IPLD.CID, storage.IPLD.Data); err != nil {
+ if err := tx.Rollback(); err != nil {
+ logrus.Error(err)
+ }
+ return err
+ }
+ }
+ return nil
+}
diff --git a/pkg/watcher/example/sql/transfer_table.sql b/pkg/watcher/example/sql/transfer_table.sql
new file mode 100644
index 00000000..895b6a2a
--- /dev/null
+++ b/pkg/watcher/example/sql/transfer_table.sql
@@ -0,0 +1,10 @@
+CREATE TABLE eth.token_transfers (
+ id SERIAL PRIMARY KEY,
+ receipt_id INTEGER NOT NULL REFERENCES eth.receipt_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
+ log_index INTEGER NOT NULL,
+ contract_address VARCHAR(66) NOT NULL,
+ src VARCHAR(66) NOT NULL,
+ dst VARCHAR(66) NOT NULL,
+ amount NUMERIC NOT NULL,
+ UNIQUE (receipt_id, log_index)
+);
\ No newline at end of file
diff --git a/pkg/watcher/example/sql/transfer_trigger.sql b/pkg/watcher/example/sql/transfer_trigger.sql
new file mode 100644
index 00000000..bb3700b7
--- /dev/null
+++ b/pkg/watcher/example/sql/transfer_trigger.sql
@@ -0,0 +1,7 @@
+CREATE OR REPLACE FUNCTION transfer_trigger() RETURNS trigger AS
+$BODY$
+BEGIN
+ SELECT *
+
+END;
+$BODY$
\ No newline at end of file
diff --git a/pkg/watcher/service.go b/pkg/watcher/service.go
new file mode 100644
index 00000000..8081f6a9
--- /dev/null
+++ b/pkg/watcher/service.go
@@ -0,0 +1,220 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package watcher
+
+import (
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/rlp"
+
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/sirupsen/logrus"
+
+ "github.com/vulcanize/vulcanizedb/pkg/super_node"
+ "github.com/vulcanize/vulcanizedb/pkg/wasm"
+ "github.com/vulcanize/vulcanizedb/pkg/watcher/shared"
+)
+
+// Watcher is the top level interface for watching data from super node
+type Watcher interface {
+ Init() error
+ Watch(wg *sync.WaitGroup) error
+}
+
+// Service is the underlying struct for the SuperNodeWatcher
+type Service struct {
+ // Config
+ WatcherConfig *Config
+ // Interface for streaming data from super node
+ SuperNodeStreamer shared.SuperNodeStreamer
+ // Interface for db operations
+ Repository shared.Repository
+ // WASM instantiator
+ WASMIniter *wasm.Instantiator
+
+ // Channels for process communication/data relay
+ PayloadChan chan super_node.SubscriptionPayload
+ QuitChan chan bool
+
+ // Indexes
+ payloadIndex *int64
+ endingIndex int64
+}
+
+// NewWatcher returns a new Service which satisfies the Watcher interface
+func NewWatcher(c *Config, quitChan chan bool) (Watcher, error) {
+ repo, err := NewRepository(c.SubscriptionConfig.ChainType(), c.DB, c.TriggerFunctions)
+ if err != nil {
+ return nil, err
+ }
+ streamer, err := NewSuperNodeStreamer(c.Source, c.Client)
+ if err != nil {
+ return nil, err
+ }
+ return &Service{
+ WatcherConfig: c,
+ SuperNodeStreamer: streamer,
+ Repository: repo,
+ WASMIniter: wasm.NewWASMInstantiator(c.DB, c.WASMFunctions),
+ PayloadChan: make(chan super_node.SubscriptionPayload, super_node.PayloadChanBufferSize),
+ QuitChan: quitChan,
+ }, nil
+}
+
+// Init is used to initialize the Postgres WASM and trigger functions
+func (s *Service) Init() error {
+ // Instantiate the Postgres WASM functions
+ if err := s.WASMIniter.Instantiate(); err != nil {
+ return err
+ }
+ // Load the Postgres trigger functions that (can) use
+ return s.Repository.LoadTriggers()
+}
+
+// Watch is the top level loop for watching
+func (s *Service) Watch(wg *sync.WaitGroup) error {
+ rlpConfig, err := rlp.EncodeToBytes(s.WatcherConfig.SubscriptionConfig)
+ if err != nil {
+ return err
+ }
+ sub, err := s.SuperNodeStreamer.Stream(s.PayloadChan, rlpConfig)
+ if err != nil {
+ return err
+ }
+ atomic.StoreInt64(s.payloadIndex, s.WatcherConfig.SubscriptionConfig.StartingBlock().Int64())
+ s.endingIndex = s.WatcherConfig.SubscriptionConfig.EndingBlock().Int64() // less than 0 => never end
+ backFillOnly := s.WatcherConfig.SubscriptionConfig.HistoricalDataOnly()
+ if backFillOnly { // we are only processing historical data => handle single contiguous stream
+ s.backFillOnlyQueuing(wg, sub)
+ } else { // otherwise we need to be prepared to handle out-of-order data
+ s.combinedQueuing(wg, sub)
+ }
+ return nil
+}
+
+// combinedQueuing assumes data is not necessarily going to come in linear order
+// this is true when we are backfilling and streaming at the head or when we are
+// only streaming at the head since reorgs can occur
+
+// NOTE: maybe we should push everything to the wait queue, otherwise the index could be shifted as we retrieve data from it
+func (s *Service) combinedQueuing(wg *sync.WaitGroup, sub *rpc.ClientSubscription) {
+ wg.Add(1)
+ // This goroutine is responsible for allocating incoming data to the ready or wait queue
+ // depending on if it is at the current index or not
+ forwardQuit := make(chan bool)
+ go func() {
+ for {
+ select {
+ case payload := <-s.PayloadChan:
+ // If there is an error associated with the payload, log it and continue
+ if payload.Error() != nil {
+ logrus.Error(payload.Error())
+ continue
+ }
+ if payload.Height == atomic.LoadInt64(s.payloadIndex) {
+ // If the data is at our current index it is ready to be processed
+ // add it to the ready data queue and increment the index
+ if err := s.Repository.ReadyData(payload); err != nil {
+ logrus.Error(err)
+ }
+ // Increment the current index and if we have exceeded our ending height shut down the watcher
+ if atomic.AddInt64(s.payloadIndex, 1) > s.endingIndex {
+ logrus.Info("Watcher has reached ending block height, shutting down")
+ forwardQuit <- true
+ wg.Done()
+ return
+ }
+ } else { // Otherwise add it to the wait queue
+ if err := s.Repository.QueueData(payload); err != nil {
+ logrus.Error(err)
+ }
+ }
+ case err := <-sub.Err():
+ logrus.Error(err)
+ case <-s.QuitChan:
+ logrus.Info("Watcher shutting down")
+ forwardQuit <- true
+ wg.Done()
+ return
+ }
+ }
+ }()
+ ticker := time.NewTicker(5 * time.Second)
+ // This goroutine is responsible for moving data from the wait queue to the ready queue
+ // preserving the correct order and alignment with the current index
+ go func() {
+ for {
+ select {
+ case <-ticker.C:
+ // Retrieve queued data, in order, and forward it to the ready queue
+ queueData, newIndex, err := s.Repository.GetQueueData(atomic.LoadInt64(s.payloadIndex))
+ if err != nil {
+ logrus.Error(err)
+ continue
+ }
+ atomic.StoreInt64(s.payloadIndex, newIndex)
+ if atomic.LoadInt64(s.payloadIndex) > s.endingIndex {
+ s.QuitChan <- true
+ }
+ if err := s.Repository.ReadyData(queueData); err != nil {
+ logrus.Error(err)
+ }
+ case <-forwardQuit:
+ return
+ default:
+ // Do nothing, wait til next tick
+ }
+ }
+ }()
+}
+
+// backFillOnlyQueuing assumes the data is coming in contiguously from behind the head
+// it puts all data directly into the ready queue
+// it continues until the watcher is told to quit or we receive notification that the backfill is finished
+func (s *Service) backFillOnlyQueuing(wg *sync.WaitGroup, sub *rpc.ClientSubscription) {
+ wg.Add(1)
+ go func() {
+ for {
+ select {
+ case payload := <-s.PayloadChan:
+ // If there is an error associated with the payload, log it and continue
+ if payload.Error() != nil {
+ logrus.Error(payload.Error())
+ continue
+ }
+ // If the payload signals that backfilling has completed, shut down the process
+ if payload.BackFillComplete() {
+ logrus.Info("Backfill complete, WatchContract shutting down")
+ wg.Done()
+ return
+ }
+ // Add the payload the ready data queue
+ if err := s.Repository.ReadyData(payload); err != nil {
+ logrus.Error(err)
+ }
+ case err := <-sub.Err():
+ logrus.Error(err)
+ case <-s.QuitChan:
+ logrus.Info("Watcher shutting down")
+ wg.Done()
+ return
+ }
+ }
+ }()
+}
diff --git a/pkg/super_node/btc/filterer_test.go b/pkg/watcher/shared/interfaces.go
similarity index 52%
rename from pkg/super_node/btc/filterer_test.go
rename to pkg/watcher/shared/interfaces.go
index 8dd3c1ae..5e6c64ee 100644
--- a/pkg/super_node/btc/filterer_test.go
+++ b/pkg/watcher/shared/interfaces.go
@@ -14,4 +14,23 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package btc
+package shared
+
+import (
+ "github.com/ethereum/go-ethereum/rpc"
+
+ "github.com/vulcanize/vulcanizedb/pkg/super_node"
+)
+
+// Repository is the interface for the Postgres database
+type Repository interface {
+ LoadTriggers() error
+ QueueData(payload super_node.SubscriptionPayload) error
+ GetQueueData(height int64) (super_node.SubscriptionPayload, int64, error)
+ ReadyData(payload super_node.SubscriptionPayload) error
+}
+
+// SuperNodeStreamer is the interface for streaming data from a vulcanizeDB super node
+type SuperNodeStreamer interface {
+ Stream(payloadChan chan super_node.SubscriptionPayload, rlpParams []byte) (*rpc.ClientSubscription, error)
+}
diff --git a/pkg/super_node/btc/resolver_test.go b/pkg/watcher/shared/models.go
similarity index 80%
rename from pkg/super_node/btc/resolver_test.go
rename to pkg/watcher/shared/models.go
index 8dd3c1ae..e905a7e2 100644
--- a/pkg/super_node/btc/resolver_test.go
+++ b/pkg/watcher/shared/models.go
@@ -14,4 +14,11 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package btc
+package shared
+
+// QueuedData is the db model for queued data
+type QueuedData struct {
+ ID int64 `db:"id"`
+ Data []byte `db:"data"`
+ Height int64 `db:"height"`
+}
diff --git a/pkg/watcher/shared/source_type.go b/pkg/watcher/shared/source_type.go
new file mode 100644
index 00000000..7a1ce730
--- /dev/null
+++ b/pkg/watcher/shared/source_type.go
@@ -0,0 +1,58 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package shared
+
+import (
+ "errors"
+ "strings"
+)
+
+// SourceType enum for specifying source type for raw chain data
+type SourceType int
+
+const (
+ Unknown SourceType = iota
+ VulcanizeDB
+ Ethereum
+ Bitcoin
+)
+
+func (c SourceType) String() string {
+ switch c {
+ case Ethereum:
+ return "Ethereum"
+ case Bitcoin:
+ return "Bitcoin"
+ case VulcanizeDB:
+ return "VulcanizeDB"
+ default:
+ return ""
+ }
+}
+
+func NewSourceType(name string) (SourceType, error) {
+ switch strings.ToLower(name) {
+ case "ethereum", "eth":
+ return Ethereum, nil
+ case "bitcoin", "btc", "xbt":
+ return Bitcoin, nil
+ case "vulcanizedb", "vdb":
+ return VulcanizeDB, nil
+ default:
+ return Unknown, errors.New("invalid name for data source")
+ }
+}