trimming down to ipfs watchers

This commit is contained in:
Ian Norden 2020-05-29 22:02:47 -05:00
parent 78e9fbd248
commit fe083c12f9
422 changed files with 1310 additions and 28341 deletions

2
.gitignore vendored
View File

@ -7,7 +7,7 @@ Vagrantfile
vagrant*.sh vagrant*.sh
.vagrant .vagrant
test_scripts/ test_scripts/
vulcanizedb ipfs-chain-watcher
postgraphile/build/ postgraphile/build/
postgraphile/node_modules/ postgraphile/node_modules/
postgraphile/package-lock.json postgraphile/package-lock.json

View File

@ -7,7 +7,7 @@ services:
addons: addons:
ssh_known_hosts: arch1.vdb.to ssh_known_hosts: arch1.vdb.to
postgresql: '11.2' postgresql: '11.2'
go_import_path: github.com/vulcanize/vulcanizedb go_import_path: github.com/vulcanize/ipfs-chain-watcher
before_install: before_install:
- openssl aes-256-cbc -K $encrypted_e1db309e8776_key -iv $encrypted_e1db309e8776_iv - openssl aes-256-cbc -K $encrypted_e1db309e8776_key -iv $encrypted_e1db309e8776_iv
-in temp_rsa.enc -out temp_rsa -d -in temp_rsa.enc -out temp_rsa -d

View File

@ -2,9 +2,9 @@ FROM golang:alpine as builder
RUN apk --update --no-cache add make git g++ RUN apk --update --no-cache add make git g++
# Build statically linked vDB binary (wonky path because of Dep) # Build statically linked vDB binary (wonky path because of Dep)
RUN mkdir -p /go/src/github.com/vulcanize/vulcanizedb RUN mkdir -p /go/src/github.com/vulcanize/ipfs-chain-watcher
ADD . /go/src/github.com/vulcanize/vulcanizedb ADD . /go/src/github.com/vulcanize/ipfs-chain-watcher
WORKDIR /go/src/github.com/vulcanize/vulcanizedb WORKDIR /go/src/github.com/vulcanize/ipfs-chain-watcher
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' . RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' .
# Build migration tool # Build migration tool
@ -14,10 +14,10 @@ RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflag
# Second stage # Second stage
FROM alpine FROM alpine
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/vulcanizedb /app/vulcanizedb COPY --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/vulcanizedb /app/vulcanizedb
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/environments/staging.toml /app/environments/ COPY --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/environments/staging.toml /app/environments/
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/startup_script.sh /app/ COPY --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/dockerfiles/startup_script.sh /app/
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations/* /app/ COPY --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/db/migrations/* /app/
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose /app/goose COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose /app/goose
WORKDIR /app WORKDIR /app

View File

@ -1,7 +1,7 @@
# Vulcanize DB # Vulcanize DB
[![Build Status](https://travis-ci.org/vulcanize/vulcanizedb.svg?branch=master)](https://travis-ci.org/vulcanize/vulcanizedb) [![Build Status](https://travis-ci.org/vulcanize/vulcanizedb.svg?branch=master)](https://travis-ci.org/vulcanize/vulcanizedb)
[![Go Report Card](https://goreportcard.com/badge/github.com/vulcanize/vulcanizedb)](https://goreportcard.com/report/github.com/vulcanize/vulcanizedb) [![Go Report Card](https://goreportcard.com/badge/github.com/vulcanize/ipfs-chain-watcher)](https://goreportcard.com/report/github.com/vulcanize/ipfs-chain-watcher)
> Vulcanize DB is a set of tools that make it easier for developers to write application-specific indexes and caches for dapps built on Ethereum. > Vulcanize DB is a set of tools that make it easier for developers to write application-specific indexes and caches for dapps built on Ethereum.
@ -44,11 +44,11 @@ data from VulcanizeDB's underlying Postgres database and making it accessible.
### Building the project ### Building the project
Download the codebase to your local `GOPATH` via: Download the codebase to your local `GOPATH` via:
`go get github.com/vulcanize/vulcanizedb` `go get github.com/vulcanize/ipfs-chain-watcher`
Move to the project directory: Move to the project directory:
`cd $GOPATH/src/github.com/vulcanize/vulcanizedb` `cd $GOPATH/src/github.com/vulcanize/ipfs-chain-watcher`
Be sure you have enabled Go Modules (`export GO111MODULE=on`), and build the executable with: Be sure you have enabled Go Modules (`export GO111MODULE=on`), and build the executable with:
@ -65,7 +65,7 @@ It can be additionally helpful to add `$GOPATH/bin` to your shell's `$PATH`.
1. Install Postgres 1. Install Postgres
1. Create a superuser for yourself and make sure `psql --list` works without prompting for a password. 1. Create a superuser for yourself and make sure `psql --list` works without prompting for a password.
1. `createdb vulcanize_public` 1. `createdb vulcanize_public`
1. `cd $GOPATH/src/github.com/vulcanize/vulcanizedb` 1. `cd $GOPATH/src/github.com/vulcanize/ipfs-chain-watcher`
1. Run the migrations: `make migrate HOST_NAME=localhost NAME=vulcanize_public PORT=5432` 1. Run the migrations: `make migrate HOST_NAME=localhost NAME=vulcanize_public PORT=5432`
- There is an optional var `USER=username` if the database user is not the default user `postgres` - There is an optional var `USER=username` if the database user is not the default user `postgres`
- To rollback a single step: `make rollback NAME=vulcanize_public` - To rollback a single step: `make rollback NAME=vulcanize_public`

View File

@ -1,193 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"errors"
"fmt"
"strconv"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/pkg/config"
p2 "github.com/vulcanize/vulcanizedb/pkg/plugin"
)
// composeCmd represents the compose command
var composeCmd = &cobra.Command{
Use: "compose",
Short: "Composes transformer initializer plugin",
Long: `This command needs a config .toml file of form:
[database]
name = "vulcanize_public"
hostname = "localhost"
user = "vulcanize"
password = "vulcanize"
port = 5432
[client]
ipcPath = "/Users/user/Library/Ethereum/geth.ipc"
[exporter]
home = "github.com/vulcanize/vulcanizedb"
name = "exampleTransformerExporter"
save = false
transformerNames = [
"transformer1",
"transformer2",
"transformer3",
"transformer4",
]
[exporter.transformer1]
path = "path/to/transformer1"
type = "eth_event"
repository = "github.com/account/repo"
migrations = "db/migrations"
rank = "0"
[exporter.transformer2]
path = "path/to/transformer2"
type = "eth_contract"
repository = "github.com/account/repo"
migrations = "db/migrations"
rank = "0"
[exporter.transformer3]
path = "path/to/transformer3"
type = "eth_event"
repository = "github.com/account/repo"
migrations = "db/migrations"
rank = "0"
[exporter.transformer4]
path = "path/to/transformer4"
type = "eth_storage"
repository = "github.com/account2/repo2"
migrations = "to/db/migrations"
rank = "1"
Note: If any of the plugin transformer need additional
configuration variables include them in the .toml file as well
This information is used to write and build a go plugin with a transformer
set composed from the transformer imports specified in the config file
This plugin is loaded and the set of transformer initializers is exported
from it and loaded into and executed over by the appropriate watcher.
The type of watcher that the transformer works with is specified using the
type variable for each transformer in the config. Currently there are watchers
of event data from an eth node (eth_event) and storage data from an eth node
(eth_storage), and a more generic interface for accepting contract_watcher pkg
based transformers which can perform both event watching and public method
polling (eth_contract).
Transformers of different types can be ran together in the same command using a
single config file or in separate command instances using different config files
Specify config location when executing the command:
./vulcanizedb compose --config=./environments/config_name.toml`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
compose()
},
}
func compose() {
// Build plugin generator config
prepConfig()
// Generate code to build the plugin according to the config file
logWithCommand.Info("generating plugin")
generator, err := p2.NewGenerator(genConfig, databaseConfig)
if err != nil {
logWithCommand.Debug("initializing plugin generator failed")
logWithCommand.Fatal(err)
}
err = generator.GenerateExporterPlugin()
if err != nil {
logWithCommand.Debug("generating plugin failed")
logWithCommand.Fatal(err)
}
// TODO: Embed versioning info in the .so files so we know which version of vulcanizedb to run them with
_, pluginPath, err := genConfig.GetPluginPaths()
if err != nil {
logWithCommand.Debug("getting plugin path failed")
logWithCommand.Fatal(err)
}
fmt.Printf("Composed plugin %s", pluginPath)
logWithCommand.Info("plugin .so file output to ", pluginPath)
}
func init() {
rootCmd.AddCommand(composeCmd)
}
func prepConfig() {
logWithCommand.Info("configuring plugin")
names := viper.GetStringSlice("exporter.transformerNames")
transformers := make(map[string]config.Transformer)
for _, name := range names {
logWithCommand.Debug("Configuring " + name + " transformer")
transformer := viper.GetStringMapString("exporter." + name)
p, pOK := transformer["path"]
if !pOK || p == "" {
logWithCommand.Fatal(name, " transformer config is missing `path` value")
}
r, rOK := transformer["repository"]
if !rOK || r == "" {
logWithCommand.Fatal(name, " transformer config is missing `repository` value")
}
m, mOK := transformer["migrations"]
if !mOK || m == "" {
logWithCommand.Fatal(name, " transformer config is missing `migrations` value")
}
mr, mrOK := transformer["rank"]
if !mrOK || mr == "" {
logWithCommand.Fatal(name, " transformer config is missing `rank` value")
}
rank, err := strconv.ParseUint(mr, 10, 64)
if err != nil {
logWithCommand.Fatal(name, " migration `rank` can't be converted to an unsigned integer")
}
t, tOK := transformer["type"]
if !tOK {
logWithCommand.Fatal(name, " transformer config is missing `type` value")
}
transformerType := config.GetTransformerType(t)
if transformerType == config.UnknownTransformerType {
logWithCommand.Fatal(errors.New(`unknown transformer type in exporter config accepted types are "eth_event", "eth_storage"`))
}
transformers[name] = config.Transformer{
Path: p,
Type: transformerType,
RepositoryPath: r,
MigrationPath: m,
MigrationRank: rank,
}
}
genConfig = config.Plugin{
Transformers: transformers,
FilePath: "$GOPATH/src/github.com/vulcanize/vulcanizedb/plugins",
FileName: viper.GetString("exporter.name"),
Save: viper.GetBool("exporter.save"),
Home: viper.GetString("exporter.home"),
}
}

View File

@ -1,217 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"os"
"plugin"
syn "sync"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
"github.com/vulcanize/vulcanizedb/libraries/shared/watcher"
"github.com/vulcanize/vulcanizedb/pkg/fs"
p2 "github.com/vulcanize/vulcanizedb/pkg/plugin"
"github.com/vulcanize/vulcanizedb/pkg/plugin/helpers"
"github.com/vulcanize/vulcanizedb/utils"
)
// composeAndExecuteCmd represents the composeAndExecute command
var composeAndExecuteCmd = &cobra.Command{
Use: "composeAndExecute",
Short: "Composes, loads, and executes transformer initializer plugin",
Long: `This command needs a config .toml file of form:
[database]
name = "vulcanize_public"
hostname = "localhost"
user = "vulcanize"
password = "vulcanize"
port = 5432
[client]
ipcPath = "/Users/user/Library/Ethereum/geth.ipc"
[exporter]
home = "github.com/vulcanize/vulcanizedb"
name = "exampleTransformerExporter"
save = false
transformerNames = [
"transformer1",
"transformer2",
"transformer3",
"transformer4",
]
[exporter.transformer1]
path = "path/to/transformer1"
type = "eth_event"
repository = "github.com/account/repo"
migrations = "db/migrations"
rank = "0"
[exporter.transformer2]
path = "path/to/transformer2"
type = "eth_contract"
repository = "github.com/account/repo"
migrations = "db/migrations"
rank = "2"
[exporter.transformer3]
path = "path/to/transformer3"
type = "eth_event"
repository = "github.com/account/repo"
migrations = "db/migrations"
rank = "0"
[exporter.transformer4]
path = "path/to/transformer4"
type = "eth_storage"
repository = "github.com/account2/repo2"
migrations = "to/db/migrations"
rank = "1"
Note: If any of the plugin transformer need additional
configuration variables include them in the .toml file as well
This information is used to write and build a go plugin with a transformer
set composed from the transformer imports specified in the config file
This plugin is loaded and the set of transformer initializers is exported
from it and loaded into and executed over by the appropriate watcher.
The type of watcher that the transformer works with is specified using the
type variable for each transformer in the config. Currently there are watchers
of event data from an eth node (eth_event) and storage data from an eth node
(eth_storage), and a more generic interface for accepting contract_watcher pkg
based transformers which can perform both event watching and public method
polling (eth_contract).
Transformers of different types can be ran together in the same command using a
single config file or in separate command instances using different config files
Specify config location when executing the command:
./vulcanizedb composeAndExecute --config=./environments/config_name.toml`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
composeAndExecute()
},
}
func composeAndExecute() {
// Build plugin generator config
prepConfig()
// Generate code to build the plugin according to the config file
logWithCommand.Info("generating plugin")
generator, err := p2.NewGenerator(genConfig, databaseConfig)
if err != nil {
logWithCommand.Fatal(err)
}
err = generator.GenerateExporterPlugin()
if err != nil {
logWithCommand.Debug("generating plugin failed")
logWithCommand.Fatal(err)
}
// Get the plugin path and load the plugin
_, pluginPath, err := genConfig.GetPluginPaths()
if err != nil {
logWithCommand.Fatal(err)
}
if !genConfig.Save {
defer helpers.ClearFiles(pluginPath)
}
logWithCommand.Info("linking plugin ", pluginPath)
plug, err := plugin.Open(pluginPath)
if err != nil {
logWithCommand.Debug("linking plugin failed")
logWithCommand.Fatal(err)
}
// Load the `Exporter` symbol from the plugin
logWithCommand.Info("loading transformers from plugin")
symExporter, err := plug.Lookup("Exporter")
if err != nil {
logWithCommand.Debug("loading Exporter symbol failed")
logWithCommand.Fatal(err)
}
// Assert that the symbol is of type Exporter
exporter, ok := symExporter.(Exporter)
if !ok {
logWithCommand.Debug("plugged-in symbol not of type Exporter")
os.Exit(1)
}
// Use the Exporters export method to load the EventTransformerInitializer, StorageTransformerInitializer, and ContractTransformerInitializer sets
ethEventInitializers, ethStorageInitializers, ethContractInitializers := exporter.Export()
// Setup bc and db objects
blockChain := getBlockChain()
db := utils.LoadPostgres(databaseConfig, blockChain.Node())
// Execute over transformer sets returned by the exporter
// Use WaitGroup to wait on both goroutines
var wg syn.WaitGroup
if len(ethEventInitializers) > 0 {
ew := watcher.NewEventWatcher(&db, blockChain)
err := ew.AddTransformers(ethEventInitializers)
if err != nil {
logWithCommand.Fatalf("failed to add event transformer initializers to watcher: %s", err.Error())
}
wg.Add(1)
go watchEthEvents(&ew, &wg)
}
if len(ethStorageInitializers) > 0 {
switch storageDiffsSource {
case "geth":
log.Debug("fetching storage diffs from geth pub sub")
rpcClient, _ := getClients()
stateDiffStreamer := streamer.NewStateDiffStreamer(rpcClient)
storageFetcher := fetcher.NewGethRPCStorageFetcher(stateDiffStreamer)
sw := watcher.NewStorageWatcher(storageFetcher, &db)
sw.AddTransformers(ethStorageInitializers)
wg.Add(1)
go watchEthStorage(sw, &wg)
default:
log.Debug("fetching storage diffs from csv")
tailer := fs.FileTailer{Path: storageDiffsPath}
storageFetcher := fetcher.NewCsvTailStorageFetcher(tailer)
sw := watcher.NewStorageWatcher(storageFetcher, &db)
sw.AddTransformers(ethStorageInitializers)
wg.Add(1)
go watchEthStorage(sw, &wg)
}
}
if len(ethContractInitializers) > 0 {
gw := watcher.NewContractWatcher(&db, blockChain)
gw.AddTransformers(ethContractInitializers)
wg.Add(1)
go watchEthContract(&gw, &wg)
}
wg.Wait()
}
func init() {
rootCmd.AddCommand(composeAndExecuteCmd)
composeAndExecuteCmd.Flags().BoolVarP(&recheckHeadersArg, "recheck-headers", "r", false, "whether to re-check headers for watched events")
composeAndExecuteCmd.Flags().DurationVarP(&queueRecheckInterval, "queue-recheck-interval", "q", 5*time.Minute, "interval duration for rechecking queued storage diffs (ex: 5m30s)")
}

View File

@ -1,118 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
st "github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/config"
ht "github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/header/transformer"
"github.com/vulcanize/vulcanizedb/utils"
)
// contractWatcherCmd represents the contractWatcher command
var contractWatcherCmd = &cobra.Command{
Use: "contractWatcher",
Short: "Watches events at the provided contract address using fully synced vDB",
Long: `Uses input contract address and event filters to watch events
Expects an ethereum node to be running
Expects an archival node synced into vulcanizeDB
Requires a .toml config file:
[database]
name = "vulcanize_public"
hostname = "localhost"
port = 5432
[client]
ipcPath = "/Users/user/Library/Ethereum/geth.ipc"
[contract]
network = ""
addresses = [
"contractAddress1",
"contractAddress2"
]
[contract.contractAddress1]
abi = 'ABI for contract 1'
startingBlock = 982463
[contract.contractAddress2]
abi = 'ABI for contract 2'
events = [
"event1",
"event2"
]
eventArgs = [
"arg1",
"arg2"
]
methods = [
"method1",
"method2"
]
methodArgs = [
"arg1",
"arg2"
]
startingBlock = 4448566
piping = true
`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
contractWatcher()
},
}
var (
mode string
)
func contractWatcher() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
blockChain := getBlockChain()
db := utils.LoadPostgres(databaseConfig, blockChain.Node())
var t st.ContractTransformer
con := config.ContractConfig{}
con.PrepConfig()
t = ht.NewTransformer(con, blockChain, &db)
err := t.Init()
if err != nil {
logWithCommand.Fatal(fmt.Sprintf("Failed to initialize transformer, err: %v ", err))
}
for range ticker.C {
err = t.Execute()
if err != nil {
logWithCommand.Error("Execution error for transformer: ", t.GetConfig().Name, err)
}
}
}
func init() {
rootCmd.AddCommand(contractWatcherCmd)
}

View File

@ -1,207 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"plugin"
syn "sync"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/libraries/shared/watcher"
"github.com/vulcanize/vulcanizedb/pkg/fs"
"github.com/vulcanize/vulcanizedb/utils"
)
// executeCmd represents the execute command
var executeCmd = &cobra.Command{
Use: "execute",
Short: "executes a precomposed transformer initializer plugin",
Long: `This command needs a config .toml file of form:
[database]
name = "vulcanize_public"
hostname = "localhost"
user = "vulcanize"
password = "vulcanize"
port = 5432
[client]
ipcPath = "/Users/user/Library/Ethereum/geth.ipc"
[exporter]
name = "exampleTransformerExporter"
Note: If any of the plugin transformer need additional
configuration variables include them in the .toml file as well
The exporter.name is the name (without extension) of the plugin to be loaded.
The plugin file needs to be located in the /plugins directory and this command assumes
the db migrations remain from when the plugin was composed. Additionally, the plugin
must have been composed by the same version of vulcanizedb or else it will not be compatible.
Specify config location when executing the command:
./vulcanizedb execute --config=./environments/config_name.toml`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
execute()
},
}
func execute() {
// Build plugin generator config
prepConfig()
// Get the plugin path and load the plugin
_, pluginPath, err := genConfig.GetPluginPaths()
if err != nil {
logWithCommand.Fatal(err)
}
fmt.Printf("Executing plugin %s", pluginPath)
logWithCommand.Info("linking plugin ", pluginPath)
plug, err := plugin.Open(pluginPath)
if err != nil {
logWithCommand.Warn("linking plugin failed")
logWithCommand.Fatal(err)
}
// Load the `Exporter` symbol from the plugin
logWithCommand.Info("loading transformers from plugin")
symExporter, err := plug.Lookup("Exporter")
if err != nil {
logWithCommand.Warn("loading Exporter symbol failed")
logWithCommand.Fatal(err)
}
// Assert that the symbol is of type Exporter
exporter, ok := symExporter.(Exporter)
if !ok {
logWithCommand.Fatal("plugged-in symbol not of type Exporter")
}
// Use the Exporters export method to load the EventTransformerInitializer, StorageTransformerInitializer, and ContractTransformerInitializer sets
ethEventInitializers, ethStorageInitializers, ethContractInitializers := exporter.Export()
// Setup bc and db objects
blockChain := getBlockChain()
db := utils.LoadPostgres(databaseConfig, blockChain.Node())
// Execute over transformer sets returned by the exporter
// Use WaitGroup to wait on both goroutines
var wg syn.WaitGroup
if len(ethEventInitializers) > 0 {
ew := watcher.NewEventWatcher(&db, blockChain)
err = ew.AddTransformers(ethEventInitializers)
if err != nil {
logWithCommand.Fatalf("failed to add event transformer initializers to watcher: %s", err.Error())
}
wg.Add(1)
go watchEthEvents(&ew, &wg)
}
if len(ethStorageInitializers) > 0 {
switch storageDiffsSource {
case "geth":
log.Debug("fetching storage diffs from geth pub sub")
wsClient := getWSClient()
stateDiffStreamer := streamer.NewStateDiffStreamer(wsClient)
storageFetcher := fetcher.NewGethRPCStorageFetcher(stateDiffStreamer)
sw := watcher.NewStorageWatcher(storageFetcher, &db)
sw.AddTransformers(ethStorageInitializers)
wg.Add(1)
go watchEthStorage(sw, &wg)
default:
log.Debug("fetching storage diffs from csv")
tailer := fs.FileTailer{Path: storageDiffsPath}
storageFetcher := fetcher.NewCsvTailStorageFetcher(tailer)
sw := watcher.NewStorageWatcher(storageFetcher, &db)
sw.AddTransformers(ethStorageInitializers)
wg.Add(1)
go watchEthStorage(sw, &wg)
}
}
if len(ethContractInitializers) > 0 {
gw := watcher.NewContractWatcher(&db, blockChain)
gw.AddTransformers(ethContractInitializers)
wg.Add(1)
go watchEthContract(&gw, &wg)
}
wg.Wait()
}
func init() {
rootCmd.AddCommand(executeCmd)
executeCmd.Flags().BoolVarP(&recheckHeadersArg, "recheck-headers", "r", false, "whether to re-check headers for watched events")
executeCmd.Flags().DurationVarP(&queueRecheckInterval, "queue-recheck-interval", "q", 5*time.Minute, "interval duration for rechecking queued storage diffs (ex: 5m30s)")
}
type Exporter interface {
Export() ([]transformer.EventTransformerInitializer, []transformer.StorageTransformerInitializer, []transformer.ContractTransformerInitializer)
}
func watchEthEvents(w *watcher.EventWatcher, wg *syn.WaitGroup) {
defer wg.Done()
// Execute over the EventTransformerInitializer set using the watcher
logWithCommand.Info("executing event transformers")
var recheck constants.TransformerExecution
if recheckHeadersArg {
recheck = constants.HeaderRecheck
} else {
recheck = constants.HeaderUnchecked
}
err := w.Execute(recheck)
if err != nil {
logWithCommand.Fatalf("error executing event watcher: %s", err.Error())
}
}
func watchEthStorage(w watcher.IStorageWatcher, wg *syn.WaitGroup) {
defer wg.Done()
// Execute over the StorageTransformerInitializer set using the storage watcher
logWithCommand.Info("executing storage transformers")
on := viper.GetBool("storageBackFill.on")
if on {
backFillStorage(w)
}
w.Execute(queueRecheckInterval, on)
}
func backFillStorage(w watcher.IStorageWatcher) {
rpcClient, _ := getClients()
// find min deployment block
minDeploymentBlock := constants.GetMinDeploymentBlock()
stateDiffFetcher := fetcher.NewStateDiffFetcher(rpcClient)
backFiller := storage.NewStorageBackFiller(stateDiffFetcher, storage.DefaultMaxBatchSize)
go w.BackFill(minDeploymentBlock, backFiller)
}
func watchEthContract(w *watcher.ContractWatcher, wg *syn.WaitGroup) {
defer wg.Done()
// Execute over the ContractTransformerInitializer set using the contract watcher
logWithCommand.Info("executing contract_watcher transformers")
ticker := time.NewTicker(pollingInterval)
defer ticker.Stop()
for range ticker.C {
w.Execute()
}
}

View File

@ -1,114 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/vulcanize/vulcanizedb/pkg/eth"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/eth/history"
"github.com/vulcanize/vulcanizedb/utils"
)
// headerSyncCmd represents the headerSync command
var headerSyncCmd = &cobra.Command{
Use: "headerSync",
Short: "Syncs VulcanizeDB with local ethereum node's block headers",
Long: `Syncs VulcanizeDB with local ethereum node. Populates
Postgres with block headers.
./vulcanizedb headerSync --starting-block-number 0 --config public.toml
Expects ethereum node to be running and requires a .toml config:
[database]
name = "vulcanize_public"
hostname = "localhost"
port = 5432
[client]
ipcPath = "/Users/user/Library/Ethereum/geth.ipc"
`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
headerSync()
},
}
func init() {
rootCmd.AddCommand(headerSyncCmd)
headerSyncCmd.Flags().Int64VarP(&startingBlockNumber, "starting-block-number", "s", 0, "Block number to start syncing from")
}
func backFillAllHeaders(blockchain core.BlockChain, headerRepository datastore.HeaderRepository, missingBlocksPopulated chan int, startingBlockNumber int64) {
populated, err := history.PopulateMissingHeaders(blockchain, headerRepository, startingBlockNumber)
if err != nil {
// TODO Lots of possible errors in the call stack above. If errors occur, we still put
// 0 in the channel, triggering another round
logWithCommand.Error("backfillAllHeaders: Error populating headers: ", err)
}
missingBlocksPopulated <- populated
}
func headerSync() {
ticker := time.NewTicker(pollingInterval)
defer ticker.Stop()
blockChain := getBlockChain()
validateArgs(blockChain)
db := utils.LoadPostgres(databaseConfig, blockChain.Node())
headerRepository := repositories.NewHeaderRepository(&db)
validator := history.NewHeaderValidator(blockChain, headerRepository, validationWindow)
missingBlocksPopulated := make(chan int)
go backFillAllHeaders(blockChain, headerRepository, missingBlocksPopulated, startingBlockNumber)
for {
select {
case <-ticker.C:
window, err := validator.ValidateHeaders()
if err != nil {
logWithCommand.Error("headerSync: ValidateHeaders failed: ", err)
}
logWithCommand.Debug(window.GetString())
case n := <-missingBlocksPopulated:
if n == 0 {
time.Sleep(3 * time.Second)
}
go backFillAllHeaders(blockChain, headerRepository, missingBlocksPopulated, startingBlockNumber)
}
}
}
func validateArgs(blockChain *eth.BlockChain) {
lastBlock, err := blockChain.LastBlock()
if err != nil {
logWithCommand.Error("validateArgs: Error getting last block: ", err)
}
if lastBlock.Int64() == 0 {
logWithCommand.Fatal("geth initial: state sync not finished")
}
if startingBlockNumber > lastBlock.Int64() {
logWithCommand.Fatal("starting block number > current block number")
}
}

View File

@ -19,11 +19,11 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared" "github.com/vulcanize/ipfs-chain-watcher/pkg/shared"
"github.com/vulcanize/vulcanizedb/pkg/ipfs" "github.com/vulcanize/ipfs-chain-watcher/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/super_node/resync" "github.com/vulcanize/ipfs-chain-watcher/pkg/resync"
v "github.com/vulcanize/vulcanizedb/version" v "github.com/vulcanize/ipfs-chain-watcher/version"
) )
// resyncCmd represents the resync command // resyncCmd represents the resync command

View File

@ -17,38 +17,24 @@
package cmd package cmd
import ( import (
"errors"
"fmt" "fmt"
"os" "os"
"strings" "strings"
"time" "time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/pkg/config" "github.com/vulcanize/ipfs-chain-watcher/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/eth"
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
vRpc "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/node"
) )
var ( var (
cfgFile string cfgFile string
databaseConfig config.Database databaseConfig config.Database
genConfig config.Plugin
ipc string ipc string
queueRecheckInterval time.Duration
startingBlockNumber int64
storageDiffsPath string
recheckHeadersArg bool
subCommand string subCommand string
logWithCommand log.Entry logWithCommand log.Entry
storageDiffsSource string
) )
const ( const (
@ -91,8 +77,6 @@ func initFuncs(cmd *cobra.Command, args []string) {
func setViperConfigs() { func setViperConfigs() {
ipc = viper.GetString("client.ipcpath") ipc = viper.GetString("client.ipcpath")
storageDiffsPath = viper.GetString("filesystem.storageDiffsPath")
storageDiffsSource = viper.GetString("storageDiffs.source")
databaseConfig = config.Database{ databaseConfig = config.Database{
Name: viper.GetString("database.name"), Name: viper.GetString("database.name"),
Hostname: viper.GetString("database.hostname"), Hostname: viper.GetString("database.hostname"),
@ -130,10 +114,6 @@ func init() {
rootCmd.PersistentFlags().String("database-user", "", "database user") rootCmd.PersistentFlags().String("database-user", "", "database user")
rootCmd.PersistentFlags().String("database-password", "", "database password") rootCmd.PersistentFlags().String("database-password", "", "database password")
rootCmd.PersistentFlags().String("client-ipcPath", "", "location of geth.ipc file") rootCmd.PersistentFlags().String("client-ipcPath", "", "location of geth.ipc file")
rootCmd.PersistentFlags().String("client-levelDbPath", "", "location of levelDb chaindata")
rootCmd.PersistentFlags().String("filesystem-storageDiffsPath", "", "location of storage diffs csv file")
rootCmd.PersistentFlags().String("storageDiffs-source", "csv", "where to get the state diffs: csv or geth")
rootCmd.PersistentFlags().String("exporter-name", "exporter", "name of exporter plugin")
rootCmd.PersistentFlags().String("log-level", log.InfoLevel.String(), "Log level (trace, debug, info, warn, error, fatal, panic") rootCmd.PersistentFlags().String("log-level", log.InfoLevel.String(), "Log level (trace, debug, info, warn, error, fatal, panic")
viper.BindPFlag("logfile", rootCmd.PersistentFlags().Lookup("logfile")) viper.BindPFlag("logfile", rootCmd.PersistentFlags().Lookup("logfile"))
@ -143,10 +123,6 @@ func init() {
viper.BindPFlag("database.user", rootCmd.PersistentFlags().Lookup("database-user")) viper.BindPFlag("database.user", rootCmd.PersistentFlags().Lookup("database-user"))
viper.BindPFlag("database.password", rootCmd.PersistentFlags().Lookup("database-password")) viper.BindPFlag("database.password", rootCmd.PersistentFlags().Lookup("database-password"))
viper.BindPFlag("client.ipcPath", rootCmd.PersistentFlags().Lookup("client-ipcPath")) viper.BindPFlag("client.ipcPath", rootCmd.PersistentFlags().Lookup("client-ipcPath"))
viper.BindPFlag("client.levelDbPath", rootCmd.PersistentFlags().Lookup("client-levelDbPath"))
viper.BindPFlag("filesystem.storageDiffsPath", rootCmd.PersistentFlags().Lookup("filesystem-storageDiffsPath"))
viper.BindPFlag("storageDiffs.source", rootCmd.PersistentFlags().Lookup("storageDiffs-source"))
viper.BindPFlag("exporter.fileName", rootCmd.PersistentFlags().Lookup("exporter-name"))
viper.BindPFlag("log.level", rootCmd.PersistentFlags().Lookup("log-level")) viper.BindPFlag("log.level", rootCmd.PersistentFlags().Lookup("log-level"))
} }
@ -162,35 +138,3 @@ func initConfig() {
log.Warn("No config file passed with --config flag") log.Warn("No config file passed with --config flag")
} }
} }
func getBlockChain() *eth.BlockChain {
rpcClient, ethClient := getClients()
vdbEthClient := client.NewEthClient(ethClient)
vdbNode := node.MakeNode(rpcClient)
transactionConverter := vRpc.NewRPCTransactionConverter(ethClient)
return eth.NewBlockChain(vdbEthClient, rpcClient, vdbNode, transactionConverter)
}
func getClients() (client.RPCClient, *ethclient.Client) {
rawRPCClient, err := rpc.Dial(ipc)
if err != nil {
logWithCommand.Fatal(err)
}
rpcClient := client.NewRPCClient(rawRPCClient, ipc)
ethClient := ethclient.NewClient(rawRPCClient)
return rpcClient, ethClient
}
func getWSClient() core.RPCClient {
wsRPCpath := viper.GetString("client.wsPath")
if wsRPCpath == "" {
logWithCommand.Fatal(errors.New("getWSClient() was called but no ws rpc path is provided"))
}
wsRPCClient, dialErr := rpc.Dial(wsRPCpath)
if dialErr != nil {
logWithCommand.Fatal(dialErr)
}
return client.NewRPCClient(wsRPCClient, wsRPCpath)
}

View File

@ -28,11 +28,11 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer" "github.com/vulcanize/ipfs-chain-watcher/pkg/client"
"github.com/vulcanize/vulcanizedb/pkg/eth/client" "github.com/vulcanize/ipfs-chain-watcher/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/core" "github.com/vulcanize/ipfs-chain-watcher/pkg/eth"
"github.com/vulcanize/vulcanizedb/pkg/super_node" "github.com/vulcanize/ipfs-chain-watcher/pkg/streamer"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth" "github.com/vulcanize/ipfs-chain-watcher/pkg/watcher"
) )
// streamEthSubscriptionCmd represents the streamEthSubscription command // streamEthSubscriptionCmd represents the streamEthSubscription command
@ -64,7 +64,7 @@ func streamEthSubscription() {
str := streamer.NewSuperNodeStreamer(rpcClient) str := streamer.NewSuperNodeStreamer(rpcClient)
// Buffered channel for reading subscription payloads // Buffered channel for reading subscription payloads
payloadChan := make(chan super_node.SubscriptionPayload, 20000) payloadChan := make(chan watcher.SubscriptionPayload, 20000)
// Subscribe to the super node service with the given config/filter parameters // Subscribe to the super node service with the given config/filter parameters
rlpParams, err := rlp.EncodeToBytes(ethSubConfig) rlpParams, err := rlp.EncodeToBytes(ethSubConfig)

View File

@ -19,20 +19,20 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
v "github.com/vulcanize/vulcanizedb/version" v "github.com/vulcanize/ipfs-chain-watcher/version"
) )
// versionCmd represents the version command // versionCmd represents the version command
var versionCmd = &cobra.Command{ var versionCmd = &cobra.Command{
Use: "version", Use: "version",
Short: "Prints the version of vulcanizeDB", Short: "Prints the version of ipfs-chain-watcher",
Long: `Use this command to fetch the version of vulcanizeDB Long: `Use this command to fetch the version of ipfs-chain-watcher
Usage: ./vulcanizedb version`, Usage: ./ipfs-chain-watcher version`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs() subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand) logWithCommand = *log.WithField("SubCommand", subCommand)
logWithCommand.Infof("VulcanizeDB version: %s", v.VersionWithMeta) logWithCommand.Infof("ipfs-chain-watcher version: %s", v.VersionWithMeta)
}, },
} }

View File

@ -25,17 +25,17 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/pkg/ipfs" "github.com/vulcanize/ipfs-chain-watcher/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/super_node" "github.com/vulcanize/ipfs-chain-watcher/pkg/shared"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared" "github.com/vulcanize/ipfs-chain-watcher/pkg/watcher"
v "github.com/vulcanize/vulcanizedb/version" v "github.com/vulcanize/ipfs-chain-watcher/version"
) )
// superNodeCmd represents the superNode command // superNodeCmd represents the superNode command
var superNodeCmd = &cobra.Command{ var superNodeCmd = &cobra.Command{
Use: "superNode", Use: "superNode",
Short: "VulcanizeDB SuperNode", Short: "sync chain data into PG-IPFS",
Long: `This command configures a VulcanizeDB SuperNode. Long: `This command configures a VulcanizeDB ipfs-chain-watcher.
The Sync process streams all chain data from the appropriate chain, processes this data into IPLD objects The Sync process streams all chain data from the appropriate chain, processes this data into IPLD objects
and publishes them to IPFS. It then indexes the CIDs against useful data fields/metadata in Postgres. and publishes them to IPFS. It then indexes the CIDs against useful data fields/metadata in Postgres.
@ -56,7 +56,7 @@ and fill in gaps in the data
func superNode() { func superNode() {
logWithCommand.Infof("running vdb version: %s", v.VersionWithMeta) logWithCommand.Infof("running vdb version: %s", v.VersionWithMeta)
logWithCommand.Debug("loading super node configuration variables") logWithCommand.Debug("loading super node configuration variables")
superNodeConfig, err := super_node.NewSuperNodeConfig() superNodeConfig, err := watcher.NewSuperNodeConfig()
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
@ -68,14 +68,14 @@ func superNode() {
} }
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
logWithCommand.Debug("initializing new super node service") logWithCommand.Debug("initializing new super node service")
superNode, err := super_node.NewSuperNode(superNodeConfig) superNode, err := watcher.NewSuperNode(superNodeConfig)
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
var forwardPayloadChan chan shared.ConvertedData var forwardPayloadChan chan shared.ConvertedData
if superNodeConfig.Serve { if superNodeConfig.Serve {
logWithCommand.Info("starting up super node servers") logWithCommand.Info("starting up super node servers")
forwardPayloadChan = make(chan shared.ConvertedData, super_node.PayloadChanBufferSize) forwardPayloadChan = make(chan shared.ConvertedData, watcher.PayloadChanBufferSize)
superNode.Serve(wg, forwardPayloadChan) superNode.Serve(wg, forwardPayloadChan)
if err := startServers(superNode, superNodeConfig); err != nil { if err := startServers(superNode, superNodeConfig); err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
@ -87,10 +87,10 @@ func superNode() {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
} }
var backFiller super_node.BackFillInterface var backFiller watcher.BackFillInterface
if superNodeConfig.BackFill { if superNodeConfig.BackFill {
logWithCommand.Debug("initializing new super node backfill service") logWithCommand.Debug("initializing new super node backfill service")
backFiller, err = super_node.NewBackFillService(superNodeConfig, forwardPayloadChan) backFiller, err = watcher.NewBackFillService(superNodeConfig, forwardPayloadChan)
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
@ -107,7 +107,7 @@ func superNode() {
wg.Wait() wg.Wait()
} }
func startServers(superNode super_node.SuperNode, settings *super_node.Config) error { func startServers(superNode watcher.SuperNode, settings *watcher.Config) error {
logWithCommand.Debug("starting up IPC server") logWithCommand.Debug("starting up IPC server")
_, _, err := rpc.StartIPCEndpoint(settings.IPCEndpoint, superNode.APIs()) _, _, err := rpc.StartIPCEndpoint(settings.IPCEndpoint, superNode.APIs())
if err != nil { if err != nil {

View File

@ -1,11 +0,0 @@
-- +goose Up
CREATE TABLE public.addresses
(
id SERIAL PRIMARY KEY,
address character varying(42),
hashed_address character varying(66),
UNIQUE (address)
);
-- +goose Down
DROP TABLE public.addresses;

View File

@ -1,25 +0,0 @@
-- +goose Up
CREATE TABLE public.headers
(
id SERIAL PRIMARY KEY,
hash VARCHAR(66),
block_number BIGINT,
raw JSONB,
block_timestamp NUMERIC,
check_count INTEGER NOT NULL DEFAULT 0,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
eth_node_fingerprint VARCHAR(128),
UNIQUE (block_number, hash, eth_node_fingerprint)
);
CREATE INDEX headers_block_number
ON public.headers (block_number);
CREATE INDEX headers_block_timestamp
ON public.headers (block_timestamp);
-- +goose Down
DROP INDEX public.headers_block_number;
DROP INDEX public.headers_block_timestamp;
DROP TABLE public.headers;

View File

@ -1,8 +0,0 @@
-- +goose Up
CREATE TABLE public.checked_headers (
id SERIAL PRIMARY KEY,
header_id INTEGER UNIQUE NOT NULL REFERENCES headers (id) ON DELETE CASCADE
);
-- +goose Down
DROP TABLE public.checked_headers;

View File

@ -1,14 +0,0 @@
-- +goose Up
CREATE TABLE public.storage_diff
(
id SERIAL PRIMARY KEY,
block_height BIGINT,
block_hash BYTEA,
hashed_address BYTEA,
storage_key BYTEA,
storage_value BYTEA,
UNIQUE (block_height, block_hash, hashed_address, storage_key, storage_value)
);
-- +goose Down
DROP TABLE public.storage_diff;

View File

@ -1,9 +0,0 @@
-- +goose Up
CREATE TABLE public.queued_storage
(
id SERIAL PRIMARY KEY,
diff_id BIGINT UNIQUE NOT NULL REFERENCES public.storage_diff (id)
);
-- +goose Down
DROP TABLE public.queued_storage;

View File

@ -1,29 +0,0 @@
-- +goose Up
CREATE TABLE public.header_sync_transactions
(
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
hash VARCHAR(66),
gas_limit NUMERIC,
gas_price NUMERIC,
input_data BYTEA,
nonce NUMERIC,
raw BYTEA,
tx_from VARCHAR(44),
tx_index INTEGER,
tx_to VARCHAR(44),
"value" NUMERIC,
UNIQUE (header_id, hash)
);
CREATE INDEX header_sync_transactions_header
ON public.header_sync_transactions (header_id);
CREATE INDEX header_sync_transactions_tx_index
ON public.header_sync_transactions (tx_index);
-- +goose Down
DROP INDEX public.header_sync_transactions_header;
DROP INDEX public.header_sync_transactions_tx_index;
DROP TABLE header_sync_transactions;

View File

@ -1,27 +0,0 @@
-- +goose Up
CREATE TABLE header_sync_receipts
(
id SERIAL PRIMARY KEY,
transaction_id INTEGER NOT NULL REFERENCES header_sync_transactions (id) ON DELETE CASCADE,
header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
contract_address_id INTEGER NOT NULL REFERENCES addresses (id) ON DELETE CASCADE,
cumulative_gas_used NUMERIC,
gas_used NUMERIC,
state_root VARCHAR(66),
status INTEGER,
tx_hash VARCHAR(66),
rlp BYTEA,
UNIQUE (header_id, transaction_id)
);
CREATE INDEX header_sync_receipts_header
ON public.header_sync_receipts (header_id);
CREATE INDEX header_sync_receipts_transaction
ON public.header_sync_receipts (transaction_id);
-- +goose Down
DROP INDEX public.header_sync_receipts_header;
DROP INDEX public.header_sync_receipts_transaction;
DROP TABLE header_sync_receipts;

View File

@ -1,22 +0,0 @@
-- +goose Up
-- SQL in this section is executed when the migration is applied.
CREATE TABLE header_sync_logs
(
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
address INTEGER NOT NULL REFERENCES addresses (id) ON DELETE CASCADE,
topics BYTEA[],
data BYTEA,
block_number BIGINT,
block_hash VARCHAR(66),
tx_hash VARCHAR(66),
tx_index INTEGER,
log_index INTEGER,
raw JSONB,
transformed BOOL NOT NULL DEFAULT FALSE,
UNIQUE (header_id, tx_index, log_index)
);
-- +goose Down
-- SQL in this section is executed when the migration is rolled back.
DROP TABLE header_sync_logs;

View File

@ -1,12 +0,0 @@
-- +goose Up
-- SQL in this section is executed when the migration is applied.
CREATE TABLE public.watched_logs
(
id SERIAL PRIMARY KEY,
contract_address VARCHAR(42),
topic_zero VARCHAR(66)
);
-- +goose Down
-- SQL in this section is executed when the migration is rolled back.
DROP TABLE public.watched_logs;

View File

@ -6,8 +6,6 @@ COMMENT ON TABLE btc.queue_data IS E'@name BtcQueueData';
COMMENT ON TABLE eth.transaction_cids IS E'@name EthTransactionCids'; COMMENT ON TABLE eth.transaction_cids IS E'@name EthTransactionCids';
COMMENT ON TABLE eth.header_cids IS E'@name EthHeaderCids'; COMMENT ON TABLE eth.header_cids IS E'@name EthHeaderCids';
COMMENT ON TABLE eth.queue_data IS E'@name EthQueueData'; COMMENT ON TABLE eth.queue_data IS E'@name EthQueueData';
COMMENT ON TABLE public.headers IS E'@name EthHeaders';
COMMENT ON COLUMN public.headers.node_id IS E'@name EthNodeID';
COMMENT ON COLUMN public.nodes.node_id IS E'@name ChainNodeID'; COMMENT ON COLUMN public.nodes.node_id IS E'@name ChainNodeID';
COMMENT ON COLUMN eth.header_cids.node_id IS E'@name EthNodeID'; COMMENT ON COLUMN eth.header_cids.node_id IS E'@name EthNodeID';
COMMENT ON COLUMN btc.header_cids.node_id IS E'@name BtcNodeID'; COMMENT ON COLUMN btc.header_cids.node_id IS E'@name BtcNodeID';

View File

@ -2,8 +2,8 @@
-- PostgreSQL database dump -- PostgreSQL database dump
-- --
-- Dumped from database version 10.10 -- Dumped from database version 12.1
-- Dumped by pg_dump version 10.10 -- Dumped by pg_dump version 12.1
SET statement_timeout = 0; SET statement_timeout = 0;
SET lock_timeout = 0; SET lock_timeout = 0;
@ -30,23 +30,9 @@ CREATE SCHEMA btc;
CREATE SCHEMA eth; CREATE SCHEMA eth;
--
-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: -
--
CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
--
-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: -
--
COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
SET default_tablespace = ''; SET default_tablespace = '';
SET default_with_oids = false; SET default_table_access_method = heap;
-- --
-- Name: header_cids; Type: TABLE; Schema: btc; Owner: - -- Name: header_cids; Type: TABLE; Schema: btc; Owner: -
@ -561,37 +547,6 @@ CREATE SEQUENCE eth.uncle_cids_id_seq
ALTER SEQUENCE eth.uncle_cids_id_seq OWNED BY eth.uncle_cids.id; ALTER SEQUENCE eth.uncle_cids_id_seq OWNED BY eth.uncle_cids.id;
--
-- Name: addresses; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.addresses (
id integer NOT NULL,
address character varying(42),
hashed_address character varying(66)
);
--
-- Name: addresses_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.addresses_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: addresses_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.addresses_id_seq OWNED BY public.addresses.id;
-- --
-- Name: blocks; Type: TABLE; Schema: public; Owner: - -- Name: blocks; Type: TABLE; Schema: public; Owner: -
-- --
@ -602,36 +557,6 @@ CREATE TABLE public.blocks (
); );
--
-- Name: checked_headers; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.checked_headers (
id integer NOT NULL,
header_id integer NOT NULL
);
--
-- Name: checked_headers_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.checked_headers_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: checked_headers_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.checked_headers_id_seq OWNED BY public.checked_headers.id;
-- --
-- Name: goose_db_version; Type: TABLE; Schema: public; Owner: - -- Name: goose_db_version; Type: TABLE; Schema: public; Owner: -
-- --
@ -664,174 +589,6 @@ CREATE SEQUENCE public.goose_db_version_id_seq
ALTER SEQUENCE public.goose_db_version_id_seq OWNED BY public.goose_db_version.id; ALTER SEQUENCE public.goose_db_version_id_seq OWNED BY public.goose_db_version.id;
--
-- Name: header_sync_logs; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.header_sync_logs (
id integer NOT NULL,
header_id integer NOT NULL,
address integer NOT NULL,
topics bytea[],
data bytea,
block_number bigint,
block_hash character varying(66),
tx_hash character varying(66),
tx_index integer,
log_index integer,
raw jsonb,
transformed boolean DEFAULT false NOT NULL
);
--
-- Name: header_sync_logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.header_sync_logs_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: header_sync_logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.header_sync_logs_id_seq OWNED BY public.header_sync_logs.id;
--
-- Name: header_sync_receipts; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.header_sync_receipts (
id integer NOT NULL,
transaction_id integer NOT NULL,
header_id integer NOT NULL,
contract_address_id integer NOT NULL,
cumulative_gas_used numeric,
gas_used numeric,
state_root character varying(66),
status integer,
tx_hash character varying(66),
rlp bytea
);
--
-- Name: header_sync_receipts_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.header_sync_receipts_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: header_sync_receipts_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.header_sync_receipts_id_seq OWNED BY public.header_sync_receipts.id;
--
-- Name: header_sync_transactions; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.header_sync_transactions (
id integer NOT NULL,
header_id integer NOT NULL,
hash character varying(66),
gas_limit numeric,
gas_price numeric,
input_data bytea,
nonce numeric,
raw bytea,
tx_from character varying(44),
tx_index integer,
tx_to character varying(44),
value numeric
);
--
-- Name: header_sync_transactions_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.header_sync_transactions_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: header_sync_transactions_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.header_sync_transactions_id_seq OWNED BY public.header_sync_transactions.id;
--
-- Name: headers; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.headers (
id integer NOT NULL,
hash character varying(66),
block_number bigint,
raw jsonb,
block_timestamp numeric,
check_count integer DEFAULT 0 NOT NULL,
node_id integer NOT NULL,
eth_node_fingerprint character varying(128)
);
--
-- Name: TABLE headers; Type: COMMENT; Schema: public; Owner: -
--
COMMENT ON TABLE public.headers IS '@name EthHeaders';
--
-- Name: COLUMN headers.node_id; Type: COMMENT; Schema: public; Owner: -
--
COMMENT ON COLUMN public.headers.node_id IS '@name EthNodeID';
--
-- Name: headers_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.headers_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: headers_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.headers_id_seq OWNED BY public.headers.id;
-- --
-- Name: nodes; Type: TABLE; Schema: public; Owner: - -- Name: nodes; Type: TABLE; Schema: public; Owner: -
-- --
@ -879,101 +636,6 @@ CREATE SEQUENCE public.nodes_id_seq
ALTER SEQUENCE public.nodes_id_seq OWNED BY public.nodes.id; ALTER SEQUENCE public.nodes_id_seq OWNED BY public.nodes.id;
--
-- Name: queued_storage; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.queued_storage (
id integer NOT NULL,
diff_id bigint NOT NULL
);
--
-- Name: queued_storage_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.queued_storage_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: queued_storage_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.queued_storage_id_seq OWNED BY public.queued_storage.id;
--
-- Name: storage_diff; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.storage_diff (
id integer NOT NULL,
block_height bigint,
block_hash bytea,
hashed_address bytea,
storage_key bytea,
storage_value bytea
);
--
-- Name: storage_diff_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.storage_diff_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: storage_diff_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.storage_diff_id_seq OWNED BY public.storage_diff.id;
--
-- Name: watched_logs; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.watched_logs (
id integer NOT NULL,
contract_address character varying(42),
topic_zero character varying(66)
);
--
-- Name: watched_logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.watched_logs_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: watched_logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.watched_logs_id_seq OWNED BY public.watched_logs.id;
-- --
-- Name: header_cids id; Type: DEFAULT; Schema: btc; Owner: - -- Name: header_cids id; Type: DEFAULT; Schema: btc; Owner: -
-- --
@ -1065,20 +727,6 @@ ALTER TABLE ONLY eth.transaction_cids ALTER COLUMN id SET DEFAULT nextval('eth.t
ALTER TABLE ONLY eth.uncle_cids ALTER COLUMN id SET DEFAULT nextval('eth.uncle_cids_id_seq'::regclass); ALTER TABLE ONLY eth.uncle_cids ALTER COLUMN id SET DEFAULT nextval('eth.uncle_cids_id_seq'::regclass);
--
-- Name: addresses id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.addresses ALTER COLUMN id SET DEFAULT nextval('public.addresses_id_seq'::regclass);
--
-- Name: checked_headers id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.checked_headers ALTER COLUMN id SET DEFAULT nextval('public.checked_headers_id_seq'::regclass);
-- --
-- Name: goose_db_version id; Type: DEFAULT; Schema: public; Owner: - -- Name: goose_db_version id; Type: DEFAULT; Schema: public; Owner: -
-- --
@ -1086,34 +734,6 @@ ALTER TABLE ONLY public.checked_headers ALTER COLUMN id SET DEFAULT nextval('pub
ALTER TABLE ONLY public.goose_db_version ALTER COLUMN id SET DEFAULT nextval('public.goose_db_version_id_seq'::regclass); ALTER TABLE ONLY public.goose_db_version ALTER COLUMN id SET DEFAULT nextval('public.goose_db_version_id_seq'::regclass);
--
-- Name: header_sync_logs id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_logs ALTER COLUMN id SET DEFAULT nextval('public.header_sync_logs_id_seq'::regclass);
--
-- Name: header_sync_receipts id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_receipts ALTER COLUMN id SET DEFAULT nextval('public.header_sync_receipts_id_seq'::regclass);
--
-- Name: header_sync_transactions id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_transactions ALTER COLUMN id SET DEFAULT nextval('public.header_sync_transactions_id_seq'::regclass);
--
-- Name: headers id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.headers ALTER COLUMN id SET DEFAULT nextval('public.headers_id_seq'::regclass);
-- --
-- Name: nodes id; Type: DEFAULT; Schema: public; Owner: - -- Name: nodes id; Type: DEFAULT; Schema: public; Owner: -
-- --
@ -1121,27 +741,6 @@ ALTER TABLE ONLY public.headers ALTER COLUMN id SET DEFAULT nextval('public.head
ALTER TABLE ONLY public.nodes ALTER COLUMN id SET DEFAULT nextval('public.nodes_id_seq'::regclass); ALTER TABLE ONLY public.nodes ALTER COLUMN id SET DEFAULT nextval('public.nodes_id_seq'::regclass);
--
-- Name: queued_storage id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.queued_storage ALTER COLUMN id SET DEFAULT nextval('public.queued_storage_id_seq'::regclass);
--
-- Name: storage_diff id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.storage_diff ALTER COLUMN id SET DEFAULT nextval('public.storage_diff_id_seq'::regclass);
--
-- Name: watched_logs id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.watched_logs ALTER COLUMN id SET DEFAULT nextval('public.watched_logs_id_seq'::regclass);
-- --
-- Name: header_cids header_cids_block_number_block_hash_key; Type: CONSTRAINT; Schema: btc; Owner: - -- Name: header_cids header_cids_block_number_block_hash_key; Type: CONSTRAINT; Schema: btc; Owner: -
-- --
@ -1350,22 +949,6 @@ ALTER TABLE ONLY eth.uncle_cids
ADD CONSTRAINT uncle_cids_pkey PRIMARY KEY (id); ADD CONSTRAINT uncle_cids_pkey PRIMARY KEY (id);
--
-- Name: addresses addresses_address_key; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.addresses
ADD CONSTRAINT addresses_address_key UNIQUE (address);
--
-- Name: addresses addresses_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.addresses
ADD CONSTRAINT addresses_pkey PRIMARY KEY (id);
-- --
-- Name: blocks blocks_key_key; Type: CONSTRAINT; Schema: public; Owner: - -- Name: blocks blocks_key_key; Type: CONSTRAINT; Schema: public; Owner: -
-- --
@ -1374,22 +957,6 @@ ALTER TABLE ONLY public.blocks
ADD CONSTRAINT blocks_key_key UNIQUE (key); ADD CONSTRAINT blocks_key_key UNIQUE (key);
--
-- Name: checked_headers checked_headers_header_id_key; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.checked_headers
ADD CONSTRAINT checked_headers_header_id_key UNIQUE (header_id);
--
-- Name: checked_headers checked_headers_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.checked_headers
ADD CONSTRAINT checked_headers_pkey PRIMARY KEY (id);
-- --
-- Name: goose_db_version goose_db_version_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- Name: goose_db_version goose_db_version_pkey; Type: CONSTRAINT; Schema: public; Owner: -
-- --
@ -1398,70 +965,6 @@ ALTER TABLE ONLY public.goose_db_version
ADD CONSTRAINT goose_db_version_pkey PRIMARY KEY (id); ADD CONSTRAINT goose_db_version_pkey PRIMARY KEY (id);
--
-- Name: header_sync_logs header_sync_logs_header_id_tx_index_log_index_key; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_logs
ADD CONSTRAINT header_sync_logs_header_id_tx_index_log_index_key UNIQUE (header_id, tx_index, log_index);
--
-- Name: header_sync_logs header_sync_logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_logs
ADD CONSTRAINT header_sync_logs_pkey PRIMARY KEY (id);
--
-- Name: header_sync_receipts header_sync_receipts_header_id_transaction_id_key; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_receipts
ADD CONSTRAINT header_sync_receipts_header_id_transaction_id_key UNIQUE (header_id, transaction_id);
--
-- Name: header_sync_receipts header_sync_receipts_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_receipts
ADD CONSTRAINT header_sync_receipts_pkey PRIMARY KEY (id);
--
-- Name: header_sync_transactions header_sync_transactions_header_id_hash_key; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_transactions
ADD CONSTRAINT header_sync_transactions_header_id_hash_key UNIQUE (header_id, hash);
--
-- Name: header_sync_transactions header_sync_transactions_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_transactions
ADD CONSTRAINT header_sync_transactions_pkey PRIMARY KEY (id);
--
-- Name: headers headers_block_number_hash_eth_node_fingerprint_key; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.headers
ADD CONSTRAINT headers_block_number_hash_eth_node_fingerprint_key UNIQUE (block_number, hash, eth_node_fingerprint);
--
-- Name: headers headers_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.headers
ADD CONSTRAINT headers_pkey PRIMARY KEY (id);
-- --
-- Name: nodes node_uc; Type: CONSTRAINT; Schema: public; Owner: - -- Name: nodes node_uc; Type: CONSTRAINT; Schema: public; Owner: -
-- --
@ -1478,88 +981,6 @@ ALTER TABLE ONLY public.nodes
ADD CONSTRAINT nodes_pkey PRIMARY KEY (id); ADD CONSTRAINT nodes_pkey PRIMARY KEY (id);
--
-- Name: queued_storage queued_storage_diff_id_key; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.queued_storage
ADD CONSTRAINT queued_storage_diff_id_key UNIQUE (diff_id);
--
-- Name: queued_storage queued_storage_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.queued_storage
ADD CONSTRAINT queued_storage_pkey PRIMARY KEY (id);
--
-- Name: storage_diff storage_diff_block_height_block_hash_hashed_address_storage_key; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.storage_diff
ADD CONSTRAINT storage_diff_block_height_block_hash_hashed_address_storage_key UNIQUE (block_height, block_hash, hashed_address, storage_key, storage_value);
--
-- Name: storage_diff storage_diff_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.storage_diff
ADD CONSTRAINT storage_diff_pkey PRIMARY KEY (id);
--
-- Name: watched_logs watched_logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.watched_logs
ADD CONSTRAINT watched_logs_pkey PRIMARY KEY (id);
--
-- Name: header_sync_receipts_header; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX header_sync_receipts_header ON public.header_sync_receipts USING btree (header_id);
--
-- Name: header_sync_receipts_transaction; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX header_sync_receipts_transaction ON public.header_sync_receipts USING btree (transaction_id);
--
-- Name: header_sync_transactions_header; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX header_sync_transactions_header ON public.header_sync_transactions USING btree (header_id);
--
-- Name: header_sync_transactions_tx_index; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX header_sync_transactions_tx_index ON public.header_sync_transactions USING btree (tx_index);
--
-- Name: headers_block_number; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX headers_block_number ON public.headers USING btree (block_number);
--
-- Name: headers_block_timestamp; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX headers_block_timestamp ON public.headers USING btree (block_timestamp);
-- --
-- Name: header_cids header_cids_node_id_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: - -- Name: header_cids header_cids_node_id_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: -
-- --
@ -1648,78 +1069,6 @@ ALTER TABLE ONLY eth.uncle_cids
ADD CONSTRAINT uncle_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES eth.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; ADD CONSTRAINT uncle_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES eth.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
--
-- Name: checked_headers checked_headers_header_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.checked_headers
ADD CONSTRAINT checked_headers_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
--
-- Name: header_sync_logs header_sync_logs_address_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_logs
ADD CONSTRAINT header_sync_logs_address_fkey FOREIGN KEY (address) REFERENCES public.addresses(id) ON DELETE CASCADE;
--
-- Name: header_sync_logs header_sync_logs_header_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_logs
ADD CONSTRAINT header_sync_logs_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
--
-- Name: header_sync_receipts header_sync_receipts_contract_address_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_receipts
ADD CONSTRAINT header_sync_receipts_contract_address_id_fkey FOREIGN KEY (contract_address_id) REFERENCES public.addresses(id) ON DELETE CASCADE;
--
-- Name: header_sync_receipts header_sync_receipts_header_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_receipts
ADD CONSTRAINT header_sync_receipts_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
--
-- Name: header_sync_receipts header_sync_receipts_transaction_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_receipts
ADD CONSTRAINT header_sync_receipts_transaction_id_fkey FOREIGN KEY (transaction_id) REFERENCES public.header_sync_transactions(id) ON DELETE CASCADE;
--
-- Name: header_sync_transactions header_sync_transactions_header_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.header_sync_transactions
ADD CONSTRAINT header_sync_transactions_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
--
-- Name: headers headers_node_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.headers
ADD CONSTRAINT headers_node_id_fkey FOREIGN KEY (node_id) REFERENCES public.nodes(id) ON DELETE CASCADE;
--
-- Name: queued_storage queued_storage_diff_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.queued_storage
ADD CONSTRAINT queued_storage_diff_id_fkey FOREIGN KEY (diff_id) REFERENCES public.storage_diff(id);
-- --
-- PostgreSQL database dump complete -- PostgreSQL database dump complete
-- --

View File

@ -8,14 +8,14 @@ RUN apk add busybox-extras
FROM golang:1.12.4 as builder FROM golang:1.12.4 as builder
# Get and build vulcanizedb # Get and build vulcanizedb
ADD . /go/src/github.com/vulcanize/vulcanizedb ADD . /go/src/github.com/vulcanize/ipfs-chain-watcher
# Build migration tool # Build migration tool
RUN go get -u -d github.com/pressly/goose/cmd/goose RUN go get -u -d github.com/pressly/goose/cmd/goose
WORKDIR /go/src/github.com/pressly/goose/cmd/goose WORKDIR /go/src/github.com/pressly/goose/cmd/goose
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -tags='no_mysql no_sqlite' -o goose . RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -tags='no_mysql no_sqlite' -o goose .
WORKDIR /go/src/github.com/vulcanize/vulcanizedb WORKDIR /go/src/github.com/vulcanize/ipfs-chain-watcher
# app container # app container
FROM alpine FROM alpine
@ -29,12 +29,12 @@ USER $USER
# chown first so dir is writable # chown first so dir is writable
# note: using $USER is merged, but not in the stable release yet # note: using $USER is merged, but not in the stable release yet
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/migrations/startup_script.sh . COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/dockerfiles/migrations/startup_script.sh .
# keep binaries immutable # keep binaries immutable
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations migrations/vulcanizedb COPY --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/db/migrations migrations/vulcanizedb
# XXX dir is already writeable RUN touch vulcanizedb.log # XXX dir is already writeable RUN touch vulcanizedb.log
CMD ["./startup_script.sh"] CMD ["./startup_script.sh"]

View File

@ -5,8 +5,8 @@ RUN apk --update --no-cache add make git g++ linux-headers
RUN apk add busybox-extras RUN apk add busybox-extras
# Get and build vulcanizedb # Get and build vulcanizedb
ADD . /go/src/github.com/vulcanize/vulcanizedb ADD . /go/src/github.com/vulcanize/ipfs-chain-watcher
WORKDIR /go/src/github.com/vulcanize/vulcanizedb WORKDIR /go/src/github.com/vulcanize/ipfs-chain-watcher
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o vulcanizedb . RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o vulcanizedb .
# Build migration tool # Build migration tool
@ -14,7 +14,7 @@ RUN go get -u -d github.com/pressly/goose/cmd/goose
WORKDIR /go/src/github.com/pressly/goose/cmd/goose WORKDIR /go/src/github.com/pressly/goose/cmd/goose
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -tags='no_mysql no_sqlite' -o goose . RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -tags='no_mysql no_sqlite' -o goose .
WORKDIR /go/src/github.com/vulcanize/vulcanizedb WORKDIR /go/src/github.com/vulcanize/ipfs-chain-watcher
# app container # app container
FROM alpine FROM alpine
@ -31,16 +31,16 @@ USER $USER
# chown first so dir is writable # chown first so dir is writable
# note: using $USER is merged, but not in the stable release yet # note: using $USER is merged, but not in the stable release yet
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/$CONFIG_FILE config.toml COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/$CONFIG_FILE config.toml
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/super_node/startup_script.sh . COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/dockerfiles/super_node/startup_script.sh .
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/super_node/entrypoint.sh . COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/dockerfiles/super_node/entrypoint.sh .
# keep binaries immutable # keep binaries immutable
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/vulcanizedb vulcanizedb COPY --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/vulcanizedb vulcanizedb
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations migrations/vulcanizedb COPY --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/db/migrations migrations/vulcanizedb
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/environments environments COPY --from=builder /go/src/github.com/vulcanize/ipfs-chain-watcher/environments environments
EXPOSE $EXPOSE_PORT_1 EXPOSE $EXPOSE_PORT_1
EXPOSE $EXPOSE_PORT_2 EXPOSE $EXPOSE_PORT_2

View File

@ -63,7 +63,7 @@ these are run independently, instead of using `composeAndExecute`, a couple of t
* The `execute` command does not require the plugin transformer dependencies be located in their `$GOPATH` directories, * The `execute` command does not require the plugin transformer dependencies be located in their `$GOPATH` directories,
instead it expects a .so file (of the name specified in the config file) to be in instead it expects a .so file (of the name specified in the config file) to be in
`$GOPATH/src/github.com/vulcanize/vulcanizedb/plugins/` and, as noted above, also expects the plugin db migrations to `$GOPATH/src/github.com/vulcanize/ipfs-chain-watcher/plugins/` and, as noted above, also expects the plugin db migrations to
have already been ran against the database. have already been ran against the database.
* Usage: * Usage:
@ -103,7 +103,7 @@ The config provides information for composing a set of transformers from externa
wsPath = "ws://127.0.0.1:8546" wsPath = "ws://127.0.0.1:8546"
[exporter] [exporter]
home = "github.com/vulcanize/vulcanizedb" home = "github.com/vulcanize/ipfs-chain-watcher"
name = "exampleTransformerExporter" name = "exampleTransformerExporter"
save = false save = false
transformerNames = [ transformerNames = [
@ -137,7 +137,7 @@ The config provides information for composing a set of transformers from externa
migrations = "to/db/migrations" migrations = "to/db/migrations"
rank = "1" rank = "1"
``` ```
- `home` is the name of the package you are building the plugin for, in most cases this is github.com/vulcanize/vulcanizedb - `home` is the name of the package you are building the plugin for, in most cases this is github.com/vulcanize/ipfs-chain-watcher
- `name` is the name used for the plugin files (.so and .go) - `name` is the name used for the plugin files (.so and .go)
- `save` indicates whether or not the user wants to save the .go file instead of removing it after .so compilation. Sometimes useful for debugging/trouble-shooting purposes. - `save` indicates whether or not the user wants to save the .go file instead of removing it after .so compilation. Sometimes useful for debugging/trouble-shooting purposes.
- `transformerNames` is the list of the names of the transformers we are composing together, so we know how to access their submaps in the exporter map - `transformerNames` is the list of the names of the transformers we are composing together, so we know how to access their submaps in the exporter map
@ -174,7 +174,7 @@ The general structure of a plugin .go file, and what we would see built with the
package main package main
import ( import (
interface1 "github.com/vulcanize/vulcanizedb/libraries/shared/transformer" interface1 "github.com/vulcanize/ipfs-chain-watcher/libraries/shared/transformer"
transformer1 "github.com/account/repo/path/to/transformer1" transformer1 "github.com/account/repo/path/to/transformer1"
transformer2 "github.com/account/repo/path/to/transformer2" transformer2 "github.com/account/repo/path/to/transformer2"
transformer3 "github.com/account/repo/path/to/transformer3" transformer3 "github.com/account/repo/path/to/transformer3"

View File

@ -40,10 +40,10 @@ An example of how to subscribe to a real-time Ethereum data feed from the super
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer" "github.com/vulcanize/ipfs-chain-watcher/libraries/shared/streamer"
"github.com/vulcanize/vulcanizedb/pkg/eth/client" "github.com/vulcanize/ipfs-chain-watcher/pkg/eth/client"
"github.com/vulcanize/vulcanizedb/pkg/super_node" "github.com/vulcanize/ipfs-chain-watcher/pkg/super_node"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth" "github.com/vulcanize/ipfs-chain-watcher/pkg/super_node/eth"
) )
config, _ := eth.NewEthSubscriptionConfig() config, _ := eth.NewEthSubscriptionConfig()
@ -162,10 +162,10 @@ An example of how to subscribe to a real-time Bitcoin data feed from the super n
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer" "github.com/vulcanize/ipfs-chain-watcher/libraries/shared/streamer"
"github.com/vulcanize/vulcanizedb/pkg/eth/client" "github.com/vulcanize/ipfs-chain-watcher/pkg/eth/client"
"github.com/vulcanize/vulcanizedb/pkg/super_node" "github.com/vulcanize/ipfs-chain-watcher/pkg/super_node"
"github.com/vulcanize/vulcanizedb/pkg/super_node/btc" "github.com/vulcanize/ipfs-chain-watcher/pkg/super_node/btc"
) )
config, _ := btc.NewBtcSubscriptionConfig() config, _ := btc.NewBtcSubscriptionConfig()

View File

@ -138,9 +138,9 @@ Finally, we can begin the vulcanizeDB process itself.
Start by downloading vulcanizedb and moving into the repo: Start by downloading vulcanizedb and moving into the repo:
`go get github.com/vulcanize/vulcanizedb` `go get github.com/vulcanize/ipfs-chain-watcher`
`cd $GOPATH/src/github.com/vulcanize/vulcanizedb` `cd $GOPATH/src/github.com/vulcanize/ipfs-chain-watcher`
Run the db migrations against the Postgres database we created for vulcanizeDB: Run the db migrations against the Postgres database we created for vulcanizeDB:

View File

@ -1,38 +0,0 @@
[database]
name = "vulcanize_public"
hostname = "localhost"
port = 5432
[client]
ipcPath = ""
[exporter]
home = "github.com/vulcanize/vulcanizedb"
name = "accountTransformerExporter"
save = false
transformerNames = [
"account"
]
[exporter.account]
path = "transformers/account/light/initializer"
type = "eth_contract"
repository = "github.com/vulcanize/account_transformers"
migrations = "db/migrations"
rank = "0"
[token]
addresses = [
"0x58b6A8A3302369DAEc383334672404Ee733aB239",
"0x862Da0A691bb0b74038377295f8fF523D0493eB4",
]
[token.equivalents]
0x0000000000085d4780B73119b644AE5ecd22b376 = [
"0x8dd5fbCe2F6a956C3022bA3663759011Dd51e73E"
]
0x58b6A8A3302369DAEc383334672404Ee733aB239 = [
"0x8e306b005773bee6bA6A6e8972Bc79D766cC15c8"
]
[account]
start = 0
addresses = []

View File

@ -1,26 +0,0 @@
[database]
name = "vulcanize_public"
hostname = "localhost"
port = 5432
[client]
ipcPath = ""
[contract]
network = ""
addresses = [
"0x314159265dD8dbb310642f98f50C066173C1259b",
"0x8dd5fbCe2F6a956C3022bA3663759011Dd51e73E"
]
[contract.0x314159265dD8dbb310642f98f50C066173C1259b]
abi = '[{"constant":true,"inputs":[{"name":"node","type":"bytes32"}],"name":"resolver","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"node","type":"bytes32"}],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"node","type":"bytes32"},{"name":"label","type":"bytes32"},{"name":"owner","type":"address"}],"name":"setSubnodeOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"node","type":"bytes32"},{"name":"ttl","type":"uint64"}],"name":"setTTL","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"node","type":"bytes32"}],"name":"ttl","outputs":[{"name":"","type":"uint64"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"node","type":"bytes32"},{"name":"resolver","type":"address"}],"name":"setResolver","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"node","type":"bytes32"},{"name":"owner","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"node","type":"bytes32"},{"indexed":false,"name":"owner","type":"address"}],"name":"Transfer","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"node","type":"bytes32"},{"indexed":true,"name":"label","type":"bytes32"},{"indexed":false,"name":"owner","type":"address"}],"name":"NewOwner","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"node","type":"bytes32"},{"indexed":false,"name":"resolver","type":"address"}],"name":"NewResolver","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"node","type":"bytes32"},{"indexed":false,"name":"ttl","type":"uint64"}],"name":"NewTTL","type":"event"}]'
startingBlock = 3327417
[contract.0x8dd5fbCe2F6a956C3022bA3663759011Dd51e73E]
events = [
"Transfer",
"Issue"
]
methods = [
"balanceOf"
]
startingBlock = 5197514

View File

@ -1,8 +0,0 @@
[database]
name = "vulcanize_public"
hostname = "localhost"
port = 5432
[client]
ipcPath = <local node's IPC filepath>
levelDbPath = <local node's LevelDB chaindata filepath>

2
go.mod
View File

@ -1,4 +1,4 @@
module github.com/vulcanize/vulcanizedb module github.com/vulcanize/ipfs-chain-watcher
go 1.12 go 1.12

5
go.sum
View File

@ -83,7 +83,6 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0=
github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis=
github.com/dave/jennifer v1.3.0 h1:p3tl41zjjCZTNBytMwrUuiAnherNUZktlhPTKoF/sEk=
github.com/dave/jennifer v1.3.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/dave/jennifer v1.3.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -611,7 +610,6 @@ github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXx
github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
github.com/polydawn/refmt v0.0.0-20190731040541-eff0b363297a h1:TdavzKWkPcC2G+6rKJclm/JfrWC6WZFfLUR7EJJX8MA= github.com/polydawn/refmt v0.0.0-20190731040541-eff0b363297a h1:TdavzKWkPcC2G+6rKJclm/JfrWC6WZFfLUR7EJJX8MA=
github.com/polydawn/refmt v0.0.0-20190731040541-eff0b363297a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190731040541-eff0b363297a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
github.com/pressly/goose v2.6.0+incompatible h1:3f8zIQ8rfgP9tyI0Hmcs2YNAqUCL1c+diLe3iU8Qd/k=
github.com/pressly/goose v2.6.0+incompatible/go.mod h1:m+QHWCqxR3k8D9l7qfzuC/djtlfzxr34mozWDYEu1z8= github.com/pressly/goose v2.6.0+incompatible/go.mod h1:m+QHWCqxR3k8D9l7qfzuC/djtlfzxr34mozWDYEu1z8=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@ -690,8 +688,6 @@ github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:s
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vulcanize/go-ethereum v1.5.10-0.20200311182536-d07dc803d290 h1:uMWt+x6JhVT7GyL983weZSxv1zDBxvGlI9HNkcTnUeg=
github.com/vulcanize/go-ethereum v1.5.10-0.20200311182536-d07dc803d290/go.mod h1:7oC0Ni6dosMv5pxMigm6s0hN8g4haJMBnqmmo0D9YfQ=
github.com/vulcanize/go-ethereum v1.9.11-statediff-0.0.2 h1:ebv2bWocCmNKGnpHtRjSWoTpkgyEbRBb028PanH43H8= github.com/vulcanize/go-ethereum v1.9.11-statediff-0.0.2 h1:ebv2bWocCmNKGnpHtRjSWoTpkgyEbRBb028PanH43H8=
github.com/vulcanize/go-ethereum v1.9.11-statediff-0.0.2/go.mod h1:7oC0Ni6dosMv5pxMigm6s0hN8g4haJMBnqmmo0D9YfQ= github.com/vulcanize/go-ethereum v1.9.11-statediff-0.0.2/go.mod h1:7oC0Ni6dosMv5pxMigm6s0hN8g4haJMBnqmmo0D9YfQ=
github.com/vulcanize/go-ipfs v0.4.22-alpha h1:W+6njT14KWllMhABRFtPndqHw8SHCt5SqD4YX528kxM= github.com/vulcanize/go-ipfs v0.4.22-alpha h1:W+6njT14KWllMhABRFtPndqHw8SHCt5SqD4YX528kxM=
@ -788,7 +784,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View File

@ -1,62 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package integration
import (
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/eth"
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
vRpc "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/eth/node"
"github.com/vulcanize/vulcanizedb/test_config"
)
var _ = Describe("Rewards calculations", func() {
It("calculates a block reward for a real block", func() {
rawRPCClient, err := rpc.Dial(test_config.TestClient.IPCPath)
Expect(err).NotTo(HaveOccurred())
rpcClient := client.NewRPCClient(rawRPCClient, test_config.TestClient.IPCPath)
ethClient := ethclient.NewClient(rawRPCClient)
blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient)
transactionConverter := vRpc.NewRPCTransactionConverter(ethClient)
blockChain := eth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
block, err := blockChain.GetBlockByNumber(1071819)
Expect(err).ToNot(HaveOccurred())
Expect(block.Reward).To(Equal("5313550000000000000"))
})
It("calculates an uncle reward for a real block", func() {
rawRPCClient, err := rpc.Dial(test_config.TestClient.IPCPath)
Expect(err).NotTo(HaveOccurred())
rpcClient := client.NewRPCClient(rawRPCClient, test_config.TestClient.IPCPath)
ethClient := ethclient.NewClient(rawRPCClient)
blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient)
transactionConverter := vRpc.NewRPCTransactionConverter(ethClient)
blockChain := eth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
block, err := blockChain.GetBlockByNumber(1071819)
Expect(err).ToNot(HaveOccurred())
Expect(block.UnclesReward).To(Equal("6875000000000000000"))
})
})

View File

@ -1,110 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package integration
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/eth"
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
rpc2 "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/node"
"github.com/vulcanize/vulcanizedb/pkg/eth/testing"
"github.com/vulcanize/vulcanizedb/test_config"
)
var _ = Describe("Reading contracts", func() {
Describe("Getting a contract attribute", func() {
It("retrieves the event log for a specific block and contract", func() {
expectedLogZero := core.FullSyncLog{
BlockNumber: 4703824,
TxHash: "0xf896bfd1eb539d881a1a31102b78de9f25cd591bf1fe1924b86148c0b205fd5d",
Address: "0xd26114cd6ee289accf82350c8d8487fedb8a0c07",
Topics: core.Topics{
0: "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
1: "0x000000000000000000000000fbb1b73c4f0bda4f67dca266ce6ef42f520fbb98",
2: "0x000000000000000000000000d26114cd6ee289accf82350c8d8487fedb8a0c07",
},
Index: 19,
Data: "0x0000000000000000000000000000000000000000000000000c7d713b49da0000"}
rawRPCClient, err := rpc.Dial(test_config.TestClient.IPCPath)
Expect(err).NotTo(HaveOccurred())
rpcClient := client.NewRPCClient(rawRPCClient, test_config.TestClient.IPCPath)
ethClient := ethclient.NewClient(rawRPCClient)
blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient)
transactionConverter := rpc2.NewRPCTransactionConverter(ethClient)
blockChain := eth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
contract := testing.SampleContract()
logs, err := blockChain.GetFullSyncLogs(contract, big.NewInt(4703824), nil)
Expect(err).To(BeNil())
Expect(len(logs)).To(Equal(3))
Expect(logs[0]).To(Equal(expectedLogZero))
})
It("returns and empty log array when no events for a given block / contract combo", func() {
rawRPCClient, err := rpc.Dial(test_config.TestClient.IPCPath)
Expect(err).NotTo(HaveOccurred())
rpcClient := client.NewRPCClient(rawRPCClient, test_config.TestClient.IPCPath)
ethClient := ethclient.NewClient(rawRPCClient)
blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient)
transactionConverter := rpc2.NewRPCTransactionConverter(ethClient)
blockChain := eth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
logs, err := blockChain.GetFullSyncLogs(core.Contract{Hash: "0x123"}, big.NewInt(4703824), nil)
Expect(err).To(BeNil())
Expect(len(logs)).To(Equal(0))
})
})
Describe("Fetching Contract data", func() {
It("returns the correct attribute for a real contract", func() {
rawRPCClient, err := rpc.Dial(test_config.TestClient.IPCPath)
Expect(err).NotTo(HaveOccurred())
rpcClient := client.NewRPCClient(rawRPCClient, test_config.TestClient.IPCPath)
ethClient := ethclient.NewClient(rawRPCClient)
blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient)
transactionConverter := rpc2.NewRPCTransactionConverter(ethClient)
blockChain := eth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
contract := testing.SampleContract()
var balance = new(big.Int)
args := make([]interface{}, 1)
args[0] = common.HexToHash("0xd26114cd6ee289accf82350c8d8487fedb8a0c07")
err = blockChain.FetchContractData(contract.Abi, "0xd26114cd6ee289accf82350c8d8487fedb8a0c07", "balanceOf", args, &balance, 5167471)
Expect(err).NotTo(HaveOccurred())
expected := new(big.Int)
expected.SetString("10897295492887612977137", 10)
Expect(balance).To(Equal(expected))
})
})
})

View File

@ -1,496 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package integration
import (
"fmt"
"strings"
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/header/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/constants"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers/test_helpers/mocks"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
var _ = Describe("contractWatcher headerSync transformer", func() {
var db *postgres.DB
var err error
var blockChain core.BlockChain
var headerRepository repositories.HeaderRepository
var headerID int64
var ensAddr = strings.ToLower(constants.EnsContractAddress) // 0x314159265dd8dbb310642f98f50c066173c1259b
var tusdAddr = strings.ToLower(constants.TusdContractAddress) // 0x8dd5fbce2f6a956c3022ba3663759011dd51e73e
BeforeEach(func() {
db, blockChain = test_helpers.SetupDBandBC()
headerRepository = repositories.NewHeaderRepository(db)
})
AfterEach(func() {
test_helpers.TearDown(db)
})
Describe("Init", func() {
It("Initializes transformer's contract objects", func() {
_, insertErr := headerRepository.CreateOrUpdateHeader(mocks.MockHeader1)
Expect(insertErr).NotTo(HaveOccurred())
_, insertErrTwo := headerRepository.CreateOrUpdateHeader(mocks.MockHeader3)
Expect(insertErrTwo).NotTo(HaveOccurred())
t := transformer.NewTransformer(test_helpers.TusdConfig, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
c, ok := t.Contracts[tusdAddr]
Expect(ok).To(Equal(true))
// TODO: Fix this
// This test sometimes randomly fails because
// for some reason the starting block number is not updated from
// its original value (5197514) to the block number (6194632)
// of the earliest header (mocks.MockHeader1) in the repository
// It is not clear how this happens without one of the above insertErrs
// having been thrown and without any errors thrown during the Init() call
Expect(c.StartingBlock).To(Equal(int64(6194632)))
Expect(c.Abi).To(Equal(constants.TusdAbiString))
Expect(c.Name).To(Equal("TrueUSD"))
Expect(c.Address).To(Equal(tusdAddr))
})
It("initializes when no headers available in db", func() {
t := transformer.NewTransformer(test_helpers.TusdConfig, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
})
It("Does nothing if nothing if no addresses are configured", func() {
_, insertErr := headerRepository.CreateOrUpdateHeader(mocks.MockHeader1)
Expect(insertErr).NotTo(HaveOccurred())
_, insertErrTwo := headerRepository.CreateOrUpdateHeader(mocks.MockHeader3)
Expect(insertErrTwo).NotTo(HaveOccurred())
var testConf config.ContractConfig
testConf = test_helpers.TusdConfig
testConf.Addresses = nil
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
_, ok := t.Contracts[tusdAddr]
Expect(ok).To(Equal(false))
})
})
Describe("Execute- against TrueUSD contract", func() {
BeforeEach(func() {
header1, err := blockChain.GetHeaderByNumber(6791668)
Expect(err).ToNot(HaveOccurred())
header2, err := blockChain.GetHeaderByNumber(6791669)
Expect(err).ToNot(HaveOccurred())
header3, err := blockChain.GetHeaderByNumber(6791670)
Expect(err).ToNot(HaveOccurred())
headerRepository.CreateOrUpdateHeader(header1)
headerID, err = headerRepository.CreateOrUpdateHeader(header2)
Expect(err).ToNot(HaveOccurred())
headerRepository.CreateOrUpdateHeader(header3)
})
It("Transforms watched contract data into custom repositories", func() {
t := transformer.NewTransformer(test_helpers.TusdConfig, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
log := test_helpers.HeaderSyncTransferLog{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.transfer_event", tusdAddr)).StructScan(&log)
Expect(err).ToNot(HaveOccurred())
// We don't know vulcID, so compare individual fields instead of complete structures
Expect(log.HeaderID).To(Equal(headerID))
Expect(log.From).To(Equal("0x1062a747393198f70F71ec65A582423Dba7E5Ab3"))
Expect(log.To).To(Equal("0x2930096dB16b4A44Ecd4084EA4bd26F7EeF1AEf0"))
Expect(log.Value).To(Equal("9998940000000000000000"))
})
It("Keeps track of contract-related addresses while transforming event data if they need to be used for later method polling", func() {
var testConf config.ContractConfig
testConf = test_helpers.TusdConfig
testConf.Methods = map[string][]string{
tusdAddr: {"balanceOf"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
c, ok := t.Contracts[tusdAddr]
Expect(ok).To(Equal(true))
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
Expect(len(c.EmittedAddrs)).To(Equal(4))
Expect(len(c.EmittedHashes)).To(Equal(0))
b, ok := c.EmittedAddrs[common.HexToAddress("0x1062a747393198f70F71ec65A582423Dba7E5Ab3")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
b, ok = c.EmittedAddrs[common.HexToAddress("0x2930096dB16b4A44Ecd4084EA4bd26F7EeF1AEf0")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
b, ok = c.EmittedAddrs[common.HexToAddress("0x571A326f5B15E16917dC17761c340c1ec5d06f6d")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
b, ok = c.EmittedAddrs[common.HexToAddress("0xFBb1b73C4f0BDa4f67dcA266ce6Ef42f520fBB98")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
_, ok = c.EmittedAddrs[common.HexToAddress("0x09BbBBE21a5975cAc061D82f7b843b1234567890")]
Expect(ok).To(Equal(false))
_, ok = c.EmittedAddrs[common.HexToAddress("0x")]
Expect(ok).To(Equal(false))
_, ok = c.EmittedAddrs[""]
Expect(ok).To(Equal(false))
_, ok = c.EmittedAddrs[common.HexToAddress("0x09THISE21a5IS5cFAKE1D82fAND43bCE06MADEUP")]
Expect(ok).To(Equal(false))
})
It("Polls given methods using generated token holder address", func() {
var testConf config.ContractConfig
testConf = test_helpers.TusdConfig
testConf.Methods = map[string][]string{
tusdAddr: {"balanceOf"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
res := test_helpers.BalanceOf{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.balanceof_method WHERE who_ = '0x1062a747393198f70F71ec65A582423Dba7E5Ab3' AND block = '6791669'", tusdAddr)).StructScan(&res)
Expect(err).ToNot(HaveOccurred())
Expect(res.Balance).To(Equal("55849938025000000000000"))
Expect(res.TokenName).To(Equal("TrueUSD"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.balanceof_method WHERE who_ = '0x09BbBBE21a5975cAc061D82f7b843b1234567890' AND block = '6791669'", tusdAddr)).StructScan(&res)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("no rows in result set"))
})
It("Fails if initialization has not been done", func() {
t := transformer.NewTransformer(test_helpers.TusdConfig, blockChain, db)
err = t.Execute()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("transformer has no initialized contracts"))
})
})
Describe("Execute- against ENS registry contract", func() {
BeforeEach(func() {
header1, err := blockChain.GetHeaderByNumber(6885695)
Expect(err).ToNot(HaveOccurred())
header2, err := blockChain.GetHeaderByNumber(6885696)
Expect(err).ToNot(HaveOccurred())
header3, err := blockChain.GetHeaderByNumber(6885697)
Expect(err).ToNot(HaveOccurred())
headerRepository.CreateOrUpdateHeader(header1)
headerID, err = headerRepository.CreateOrUpdateHeader(header2)
Expect(err).ToNot(HaveOccurred())
headerRepository.CreateOrUpdateHeader(header3)
})
It("Transforms watched contract data into custom repositories", func() {
t := transformer.NewTransformer(test_helpers.ENSConfig, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
Expect(t.Start).To(Equal(int64(6885698)))
log := test_helpers.HeaderSyncNewOwnerLog{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.newowner_event", ensAddr)).StructScan(&log)
Expect(err).ToNot(HaveOccurred())
// We don't know vulcID, so compare individual fields instead of complete structures
Expect(log.HeaderID).To(Equal(headerID))
Expect(log.Node).To(Equal("0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae"))
Expect(log.Label).To(Equal("0x95832c7a47ff8a7840e28b78ce695797aaf402b1c186bad9eca28842625b5047"))
Expect(log.Owner).To(Equal("0x6090A6e47849629b7245Dfa1Ca21D94cd15878Ef"))
})
It("Keeps track of contract-related hashes while transforming event data if they need to be used for later method polling", func() {
var testConf config.ContractConfig
testConf = test_helpers.ENSConfig
testConf.Methods = map[string][]string{
ensAddr: {"owner"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
c, ok := t.Contracts[ensAddr]
Expect(ok).To(Equal(true))
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
Expect(len(c.EmittedHashes)).To(Equal(2))
Expect(len(c.EmittedAddrs)).To(Equal(0))
b, ok := c.EmittedHashes[common.HexToHash("0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
b, ok = c.EmittedHashes[common.HexToHash("0x95832c7a47ff8a7840e28b78ce695797aaf402b1c186bad9eca28842625b5047")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
// Doesn't keep track of address since it wouldn't be used in calling the 'owner' method
_, ok = c.EmittedAddrs[common.HexToAddress("0x6090A6e47849629b7245Dfa1Ca21D94cd15878Ef")]
Expect(ok).To(Equal(false))
})
It("Polls given method using list of collected hashes", func() {
var testConf config.ContractConfig
testConf = test_helpers.ENSConfig
testConf.Methods = map[string][]string{
ensAddr: {"owner"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
res := test_helpers.Owner{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.owner_method WHERE node_ = '0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae' AND block = '6885696'", ensAddr)).StructScan(&res)
Expect(err).ToNot(HaveOccurred())
Expect(res.Address).To(Equal("0x6090A6e47849629b7245Dfa1Ca21D94cd15878Ef"))
Expect(res.TokenName).To(Equal(""))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.owner_method WHERE node_ = '0x95832c7a47ff8a7840e28b78ce695797aaf402b1c186bad9eca28842625b5047' AND block = '6885696'", ensAddr)).StructScan(&res)
Expect(err).ToNot(HaveOccurred())
Expect(res.Address).To(Equal("0x0000000000000000000000000000000000000000"))
Expect(res.TokenName).To(Equal(""))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.owner_method WHERE node_ = '0x9THIS110dcc444fIS242510c09bbAbe21aFAKEcacNODE82f7b843HASH61ba391' AND block = '6885696'", ensAddr)).StructScan(&res)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("no rows in result set"))
})
It("It does not persist events if they do not pass the emitted arg filter", func() {
var testConf config.ContractConfig
testConf = test_helpers.ENSConfig
testConf.EventArgs = map[string][]string{
ensAddr: {"fake_filter_value"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
log := test_helpers.HeaderSyncNewOwnerLog{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.newowner_event", ensAddr)).StructScan(&log)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("does not exist"))
})
It("If a method arg filter is applied, only those arguments are used in polling", func() {
var testConf config.ContractConfig
testConf = test_helpers.ENSConfig
testConf.MethodArgs = map[string][]string{
ensAddr: {"0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae"},
}
testConf.Methods = map[string][]string{
ensAddr: {"owner"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
res := test_helpers.Owner{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.owner_method WHERE node_ = '0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae' AND block = '6885696'", ensAddr)).StructScan(&res)
Expect(err).ToNot(HaveOccurred())
Expect(res.Address).To(Equal("0x6090A6e47849629b7245Dfa1Ca21D94cd15878Ef"))
Expect(res.TokenName).To(Equal(""))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.owner_method WHERE node_ = '0x95832c7a47ff8a7840e28b78ce695797aaf402b1c186bad9eca28842625b5047' AND block = '6885696'", ensAddr)).StructScan(&res)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("no rows in result set"))
})
})
Describe("Execute- against both ENS and TrueUSD", func() {
BeforeEach(func() {
for i := 6885692; i <= 6885701; i++ {
header, err := blockChain.GetHeaderByNumber(int64(i))
Expect(err).ToNot(HaveOccurred())
_, err = headerRepository.CreateOrUpdateHeader(header)
Expect(err).ToNot(HaveOccurred())
}
})
It("Transforms watched contract data into custom repositories", func() {
t := transformer.NewTransformer(test_helpers.ENSandTusdConfig, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
Expect(t.Start).To(Equal(int64(6885702)))
newOwnerLog := test_helpers.HeaderSyncNewOwnerLog{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.newowner_event", ensAddr)).StructScan(&newOwnerLog)
Expect(err).ToNot(HaveOccurred())
// We don't know vulcID, so compare individual fields instead of complete structures
Expect(newOwnerLog.Node).To(Equal("0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae"))
Expect(newOwnerLog.Label).To(Equal("0x95832c7a47ff8a7840e28b78ce695797aaf402b1c186bad9eca28842625b5047"))
Expect(newOwnerLog.Owner).To(Equal("0x6090A6e47849629b7245Dfa1Ca21D94cd15878Ef"))
transferLog := test_helpers.HeaderSyncTransferLog{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.transfer_event", tusdAddr)).StructScan(&transferLog)
Expect(err).ToNot(HaveOccurred())
// We don't know vulcID, so compare individual fields instead of complete structures
Expect(transferLog.From).To(Equal("0x8cA465764873E71CEa525F5EB6AE973d650c22C2"))
Expect(transferLog.To).To(Equal("0xc338482360651E5D30BEd77b7c85358cbBFB2E0e"))
Expect(transferLog.Value).To(Equal("2800000000000000000000"))
})
It("Marks header checked for a contract that has no logs at that header", func() {
t := transformer.NewTransformer(test_helpers.ENSandTusdConfig, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
Expect(t.Start).To(Equal(int64(6885702)))
newOwnerLog := test_helpers.HeaderSyncNewOwnerLog{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.newowner_event", ensAddr)).StructScan(&newOwnerLog)
Expect(err).ToNot(HaveOccurred())
transferLog := test_helpers.HeaderSyncTransferLog{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.transfer_event", tusdAddr)).StructScan(&transferLog)
Expect(err).ToNot(HaveOccurred())
Expect(transferLog.HeaderID).ToNot(Equal(newOwnerLog.HeaderID))
type checkedHeader struct {
ID int64 `db:"id"`
HeaderID int64 `db:"header_id"`
NewOwner int64 `db:"newowner_0x314159265dd8dbb310642f98f50c066173c1259b"`
Transfer int64 `db:"transfer_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e"`
}
transferCheckedHeader := new(checkedHeader)
err = db.QueryRowx("SELECT * FROM public.checked_headers WHERE header_id = $1", transferLog.HeaderID).StructScan(transferCheckedHeader)
Expect(err).ToNot(HaveOccurred())
Expect(transferCheckedHeader.Transfer).To(Equal(int64(1)))
Expect(transferCheckedHeader.NewOwner).To(Equal(int64(1)))
newOwnerCheckedHeader := new(checkedHeader)
err = db.QueryRowx("SELECT * FROM public.checked_headers WHERE header_id = $1", newOwnerLog.HeaderID).StructScan(newOwnerCheckedHeader)
Expect(err).ToNot(HaveOccurred())
Expect(newOwnerCheckedHeader.NewOwner).To(Equal(int64(1)))
Expect(newOwnerCheckedHeader.Transfer).To(Equal(int64(1)))
})
It("Keeps track of contract-related hashes and addresses while transforming event data if they need to be used for later method polling", func() {
var testConf config.ContractConfig
testConf = test_helpers.ENSandTusdConfig
testConf.Methods = map[string][]string{
ensAddr: {"owner"},
tusdAddr: {"balanceOf"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
ens, ok := t.Contracts[ensAddr]
Expect(ok).To(Equal(true))
tusd, ok := t.Contracts[tusdAddr]
Expect(ok).To(Equal(true))
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
Expect(len(ens.EmittedHashes)).To(Equal(2))
Expect(len(ens.EmittedAddrs)).To(Equal(0))
Expect(len(tusd.EmittedAddrs)).To(Equal(2))
Expect(len(tusd.EmittedHashes)).To(Equal(0))
b, ok := ens.EmittedHashes[common.HexToHash("0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
b, ok = ens.EmittedHashes[common.HexToHash("0x95832c7a47ff8a7840e28b78ce695797aaf402b1c186bad9eca28842625b5047")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
b, ok = tusd.EmittedAddrs[common.HexToAddress("0x8cA465764873E71CEa525F5EB6AE973d650c22C2")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
b, ok = tusd.EmittedAddrs[common.HexToAddress("0xc338482360651E5D30BEd77b7c85358cbBFB2E0e")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
_, ok = tusd.EmittedAddrs[common.HexToAddress("0x6090A6e47849629b7245Dfa1Ca21D94cd15878Ef")]
Expect(ok).To(Equal(false))
})
It("Polls given methods for each contract, using list of collected values", func() {
var testConf config.ContractConfig
testConf = test_helpers.ENSandTusdConfig
testConf.Methods = map[string][]string{
ensAddr: {"owner"},
tusdAddr: {"balanceOf"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
owner := test_helpers.Owner{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.owner_method WHERE node_ = '0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae' AND block = '6885696'", ensAddr)).StructScan(&owner)
Expect(err).ToNot(HaveOccurred())
Expect(owner.Address).To(Equal("0x6090A6e47849629b7245Dfa1Ca21D94cd15878Ef"))
Expect(owner.TokenName).To(Equal(""))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.owner_method WHERE node_ = '0x95832c7a47ff8a7840e28b78ce695797aaf402b1c186bad9eca28842625b5047' AND block = '6885696'", ensAddr)).StructScan(&owner)
Expect(err).ToNot(HaveOccurred())
Expect(owner.Address).To(Equal("0x0000000000000000000000000000000000000000"))
Expect(owner.TokenName).To(Equal(""))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.owner_method WHERE node_ = '0x95832c7a47ff8a7840e28b78ceMADEUPaaf4HASHc186badTHItransformers.8IS625bFAKE' AND block = '6885696'", ensAddr)).StructScan(&owner)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("no rows in result set"))
bal := test_helpers.BalanceOf{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.balanceof_method WHERE who_ = '0x8cA465764873E71CEa525F5EB6AE973d650c22C2' AND block = '6885701'", tusdAddr)).StructScan(&bal)
Expect(err).ToNot(HaveOccurred())
Expect(bal.Balance).To(Equal("1954436000000000000000"))
Expect(bal.TokenName).To(Equal("TrueUSD"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.balanceof_method WHERE who_ = '0x09BbBBE21a5975cAc061D82f7b843b1234567890' AND block = '6885701'", tusdAddr)).StructScan(&bal)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("no rows in result set"))
})
})
})

View File

@ -1,121 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package integration_test
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/eth"
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
rpc2 "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/node"
"github.com/vulcanize/vulcanizedb/test_config"
)
var _ = Describe("Reading from the Geth blockchain", func() {
var blockChain *eth.BlockChain
BeforeEach(func() {
rawRPCClient, err := rpc.Dial(test_config.TestClient.IPCPath)
Expect(err).NotTo(HaveOccurred())
rpcClient := client.NewRPCClient(rawRPCClient, test_config.TestClient.IPCPath)
ethClient := ethclient.NewClient(rawRPCClient)
blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient)
transactionConverter := rpc2.NewRPCTransactionConverter(ethClient)
blockChain = eth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
})
It("retrieves the genesis block and first block", func(done Done) {
genesisBlock, err := blockChain.GetBlockByNumber(int64(0))
Expect(err).ToNot(HaveOccurred())
firstBlock, err := blockChain.GetBlockByNumber(int64(1))
Expect(err).ToNot(HaveOccurred())
lastBlockNumber, err := blockChain.LastBlock()
Expect(err).NotTo(HaveOccurred())
Expect(genesisBlock.Number).To(Equal(int64(0)))
Expect(firstBlock.Number).To(Equal(int64(1)))
Expect(lastBlockNumber.Int64()).To(BeNumerically(">", 0))
close(done)
}, 15)
It("retrieves the node info", func(done Done) {
node := blockChain.Node()
Expect(node.GenesisBlock).ToNot(BeNil())
Expect(node.NetworkID).To(Equal("1.000000"))
Expect(len(node.ID)).ToNot(BeZero())
Expect(node.ClientName).ToNot(BeZero())
close(done)
}, 15)
It("retrieves transaction", func() {
// actual transaction: https://etherscan.io/tx/0x44d462f2a19ad267e276b234a62c542fc91c974d2e4754a325ca405f95440255
txHash := common.HexToHash("0x44d462f2a19ad267e276b234a62c542fc91c974d2e4754a325ca405f95440255")
transactions, err := blockChain.GetTransactions([]common.Hash{txHash})
Expect(err).NotTo(HaveOccurred())
Expect(len(transactions)).To(Equal(1))
expectedData := []byte{149, 227, 197, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 160, 85, 105, 13, 157, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7,
241, 202, 218, 90, 30, 178, 234, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 92, 155, 193, 43}
expectedRaw := []byte{248, 201, 9, 132, 59, 154, 202, 0, 131, 1, 102, 93, 148, 44, 75, 208, 100, 185, 152, 131,
128, 118, 250, 52, 26, 131, 208, 7, 252, 47, 165, 9, 87, 128, 184, 100, 149, 227, 197, 11, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 160, 85, 105, 13, 157, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 241, 202, 218, 90, 30, 178, 234, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 92, 155, 193, 43, 37, 160, 237, 184, 236, 248, 23, 152,
53, 238, 44, 215, 181, 234, 229, 157, 246, 212, 178, 88, 25, 116, 134, 163, 124, 64, 2, 66, 25, 118, 1, 253, 27,
101, 160, 36, 226, 116, 43, 147, 236, 124, 76, 227, 250, 228, 168, 22, 19, 248, 155, 248, 151, 219, 14, 1, 186,
159, 35, 154, 22, 222, 123, 254, 147, 63, 221}
expectedModel := core.TransactionModel{
Data: expectedData,
From: "0x3b08b99441086edd66f36f9f9aee733280698378",
GasLimit: 91741,
GasPrice: 1000000000,
Hash: "0x44d462f2a19ad267e276b234a62c542fc91c974d2e4754a325ca405f95440255",
Nonce: 9,
Raw: expectedRaw,
Receipt: core.Receipt{},
To: "0x2c4bd064b998838076fa341a83d007fc2fa50957",
TxIndex: 30,
Value: "0",
}
Expect(transactions[0]).To(Equal(expectedModel))
})
//Benchmarking test: remove skip to test performance of block retrieval
XMeasure("retrieving n blocks", func(b Benchmarker) {
b.Time("runtime", func() {
var blocks []core.Block
n := 10
for i := 5327459; i > 5327459-n; i-- {
block, err := blockChain.GetBlockByNumber(int64(i))
Expect(err).ToNot(HaveOccurred())
blocks = append(blocks, block)
}
Expect(len(blocks)).To(Equal(n))
})
}, 10)
})

View File

@ -1,57 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package integration_test
import (
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/eth"
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/constants"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/getter"
rpc2 "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/eth/node"
"github.com/vulcanize/vulcanizedb/test_config"
)
var _ = Describe("Interface Getter", func() {
Describe("GetAbi", func() {
It("Constructs and returns a custom abi based on results from supportsInterface calls", func() {
expectedABI := `[` + constants.AddrChangeInterface + `,` + constants.NameChangeInterface + `,` + constants.ContentChangeInterface + `,` + constants.AbiChangeInterface + `,` + constants.PubkeyChangeInterface + `]`
con := test_config.TestClient
testIPC := con.IPCPath
blockNumber := int64(6885696)
rawRpcClient, err := rpc.Dial(testIPC)
Expect(err).NotTo(HaveOccurred())
rpcClient := client.NewRPCClient(rawRpcClient, testIPC)
ethClient := ethclient.NewClient(rawRpcClient)
blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient)
transactionConverter := rpc2.NewRPCTransactionConverter(ethClient)
blockChain := eth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
interfaceGetter := getter.NewInterfaceGetter(blockChain)
abi, err := interfaceGetter.GetABI(constants.PublicResolverAddress, blockNumber)
Expect(err).NotTo(HaveOccurred())
Expect(abi).To(Equal(expectedABI))
_, err = eth.ParseAbi(abi)
Expect(err).ToNot(HaveOccurred())
})
})
})

View File

@ -1,252 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package integration_test
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/constants"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/contract"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/poller"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/types"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
var _ = Describe("Poller", func() {
var contractPoller poller.Poller
var con *contract.Contract
var db *postgres.DB
var bc core.BlockChain
AfterEach(func() {
test_helpers.TearDown(db)
})
Describe("Full sync mode", func() {
BeforeEach(func() {
db, bc = test_helpers.SetupDBandBC()
contractPoller = poller.NewPoller(bc, db, types.FullSync)
})
Describe("PollContract", func() {
It("Polls specified contract methods using contract's argument list", func() {
con = test_helpers.SetupTusdContract(nil, []string{"balanceOf"})
Expect(con.Abi).To(Equal(constants.TusdAbiString))
con.StartingBlock = 6707322
con.AddEmittedAddr(common.HexToAddress("0xfE9e8709d3215310075d67E3ed32A380CCf451C8"), common.HexToAddress("0x3f5CE5FBFe3E9af3971dD833D26bA9b5C936f0bE"))
err := contractPoller.PollContract(*con, 6707323)
Expect(err).ToNot(HaveOccurred())
scanStruct := test_helpers.BalanceOf{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.balanceof_method WHERE who_ = '0xfE9e8709d3215310075d67E3ed32A380CCf451C8' AND block = '6707322'", constants.TusdContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Balance).To(Equal("66386309548896882859581786"))
Expect(scanStruct.TokenName).To(Equal("TrueUSD"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.balanceof_method WHERE who_ = '0xfE9e8709d3215310075d67E3ed32A380CCf451C8' AND block = '6707323'", constants.TusdContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Balance).To(Equal("66386309548896882859581786"))
Expect(scanStruct.TokenName).To(Equal("TrueUSD"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.balanceof_method WHERE who_ = '0x3f5CE5FBFe3E9af3971dD833D26bA9b5C936f0bE' AND block = '6707322'", constants.TusdContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Balance).To(Equal("17982350181394112023885864"))
Expect(scanStruct.TokenName).To(Equal("TrueUSD"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.balanceof_method WHERE who_ = '0x3f5CE5FBFe3E9af3971dD833D26bA9b5C936f0bE' AND block = '6707323'", constants.TusdContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Balance).To(Equal("17982350181394112023885864"))
Expect(scanStruct.TokenName).To(Equal("TrueUSD"))
})
It("Polls specified contract methods using contract's hash list", func() {
con = test_helpers.SetupENSContract(nil, []string{"owner"})
Expect(con.Abi).To(Equal(constants.ENSAbiString))
Expect(len(con.Methods)).To(Equal(1))
con.AddEmittedHash(common.HexToHash("0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae"), common.HexToHash("0x7e74a86b6e146964fb965db04dc2590516da77f720bb6759337bf5632415fd86"))
err := contractPoller.PollContractAt(*con, 6885877)
Expect(err).ToNot(HaveOccurred())
scanStruct := test_helpers.Owner{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.owner_method WHERE node_ = '0x7e74a86b6e146964fb965db04dc2590516da77f720bb6759337bf5632415fd86' AND block = '6885877'", constants.EnsContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Address).To(Equal("0x546aA2EaE2514494EeaDb7bbb35243348983C59d"))
Expect(scanStruct.TokenName).To(Equal("ENS-Registry"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.owner_method WHERE node_ = '0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae' AND block = '6885877'", constants.EnsContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Address).To(Equal("0x6090A6e47849629b7245Dfa1Ca21D94cd15878Ef"))
Expect(scanStruct.TokenName).To(Equal("ENS-Registry"))
})
It("Does not poll and persist any methods if none are specified", func() {
con = test_helpers.SetupTusdContract(nil, nil)
Expect(con.Abi).To(Equal(constants.TusdAbiString))
con.StartingBlock = 6707322
con.AddEmittedAddr(common.HexToAddress("0xfE9e8709d3215310075d67E3ed32A380CCf451C8"), common.HexToAddress("0x3f5CE5FBFe3E9af3971dD833D26bA9b5C936f0bE"))
err := contractPoller.PollContract(*con, 6707323)
Expect(err).ToNot(HaveOccurred())
scanStruct := test_helpers.BalanceOf{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.balanceof_method WHERE who_ = '0xfE9e8709d3215310075d67E3ed32A380CCf451C8' AND block = '6707322'", constants.TusdContractAddress)).StructScan(&scanStruct)
Expect(err).To(HaveOccurred())
})
})
Describe("FetchContractData", func() {
It("Calls a single contract method", func() {
var name = new(string)
err := contractPoller.FetchContractData(constants.TusdAbiString, constants.TusdContractAddress, "name", nil, &name, 6197514)
Expect(err).ToNot(HaveOccurred())
Expect(*name).To(Equal("TrueUSD"))
})
})
})
Describe("Header sync mode", func() {
BeforeEach(func() {
db, bc = test_helpers.SetupDBandBC()
contractPoller = poller.NewPoller(bc, db, types.HeaderSync)
})
Describe("PollContract", func() {
It("Polls specified contract methods using contract's token holder address list", func() {
con = test_helpers.SetupTusdContract(nil, []string{"balanceOf"})
Expect(con.Abi).To(Equal(constants.TusdAbiString))
con.StartingBlock = 6707322
con.AddEmittedAddr(common.HexToAddress("0xfE9e8709d3215310075d67E3ed32A380CCf451C8"), common.HexToAddress("0x3f5CE5FBFe3E9af3971dD833D26bA9b5C936f0bE"))
err := contractPoller.PollContract(*con, 6707323)
Expect(err).ToNot(HaveOccurred())
scanStruct := test_helpers.BalanceOf{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.balanceof_method WHERE who_ = '0xfE9e8709d3215310075d67E3ed32A380CCf451C8' AND block = '6707322'", constants.TusdContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Balance).To(Equal("66386309548896882859581786"))
Expect(scanStruct.TokenName).To(Equal("TrueUSD"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.balanceof_method WHERE who_ = '0xfE9e8709d3215310075d67E3ed32A380CCf451C8' AND block = '6707323'", constants.TusdContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Balance).To(Equal("66386309548896882859581786"))
Expect(scanStruct.TokenName).To(Equal("TrueUSD"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.balanceof_method WHERE who_ = '0x3f5CE5FBFe3E9af3971dD833D26bA9b5C936f0bE' AND block = '6707322'", constants.TusdContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Balance).To(Equal("17982350181394112023885864"))
Expect(scanStruct.TokenName).To(Equal("TrueUSD"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.balanceof_method WHERE who_ = '0x3f5CE5FBFe3E9af3971dD833D26bA9b5C936f0bE' AND block = '6707323'", constants.TusdContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Balance).To(Equal("17982350181394112023885864"))
Expect(scanStruct.TokenName).To(Equal("TrueUSD"))
})
It("Polls specified contract methods using contract's hash list", func() {
con = test_helpers.SetupENSContract(nil, []string{"owner"})
Expect(con.Abi).To(Equal(constants.ENSAbiString))
Expect(len(con.Methods)).To(Equal(1))
con.AddEmittedHash(common.HexToHash("0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae"), common.HexToHash("0x7e74a86b6e146964fb965db04dc2590516da77f720bb6759337bf5632415fd86"))
err := contractPoller.PollContractAt(*con, 6885877)
Expect(err).ToNot(HaveOccurred())
scanStruct := test_helpers.Owner{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.owner_method WHERE node_ = '0x7e74a86b6e146964fb965db04dc2590516da77f720bb6759337bf5632415fd86' AND block = '6885877'", constants.EnsContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Address).To(Equal("0x546aA2EaE2514494EeaDb7bbb35243348983C59d"))
Expect(scanStruct.TokenName).To(Equal("ENS-Registry"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.owner_method WHERE node_ = '0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae' AND block = '6885877'", constants.EnsContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Address).To(Equal("0x6090A6e47849629b7245Dfa1Ca21D94cd15878Ef"))
Expect(scanStruct.TokenName).To(Equal("ENS-Registry"))
})
It("Does not poll and persist any methods if none are specified", func() {
con = test_helpers.SetupTusdContract(nil, nil)
Expect(con.Abi).To(Equal(constants.TusdAbiString))
con.StartingBlock = 6707322
con.AddEmittedAddr(common.HexToAddress("0xfE9e8709d3215310075d67E3ed32A380CCf451C8"), common.HexToAddress("0x3f5CE5FBFe3E9af3971dD833D26bA9b5C936f0bE"))
err := contractPoller.PollContract(*con, 6707323)
Expect(err).ToNot(HaveOccurred())
scanStruct := test_helpers.BalanceOf{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.balanceof_method WHERE who_ = '0xfE9e8709d3215310075d67E3ed32A380CCf451C8' AND block = '6707322'", constants.TusdContractAddress)).StructScan(&scanStruct)
Expect(err).To(HaveOccurred())
})
It("Caches returned values of the appropriate types for downstream method polling if method piping is turned on", func() {
con = test_helpers.SetupENSContract(nil, []string{"resolver"})
Expect(con.Abi).To(Equal(constants.ENSAbiString))
con.StartingBlock = 6921967
con.EmittedAddrs = map[interface{}]bool{}
con.Piping = false
con.AddEmittedHash(common.HexToHash("0x495b6e6efdedb750aa519919b5cf282bdaa86067b82a2293a3ff5723527141e8"))
err := contractPoller.PollContract(*con, 6921968)
Expect(err).ToNot(HaveOccurred())
scanStruct := test_helpers.Resolver{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.resolver_method WHERE node_ = '0x495b6e6efdedb750aa519919b5cf282bdaa86067b82a2293a3ff5723527141e8' AND block = '6921967'", constants.EnsContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Address).To(Equal("0x5FfC014343cd971B7eb70732021E26C35B744cc4"))
Expect(scanStruct.TokenName).To(Equal("ENS-Registry"))
Expect(len(con.EmittedAddrs)).To(Equal(0)) // With piping off the address is not saved
test_helpers.TearDown(db)
db, bc = test_helpers.SetupDBandBC()
contractPoller = poller.NewPoller(bc, db, types.HeaderSync)
con.Piping = true
err = contractPoller.PollContract(*con, 6921968)
Expect(err).ToNot(HaveOccurred())
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM header_%s.resolver_method WHERE node_ = '0x495b6e6efdedb750aa519919b5cf282bdaa86067b82a2293a3ff5723527141e8' AND block = '6921967'", constants.EnsContractAddress)).StructScan(&scanStruct)
Expect(err).ToNot(HaveOccurred())
Expect(scanStruct.Address).To(Equal("0x5FfC014343cd971B7eb70732021E26C35B744cc4"))
Expect(scanStruct.TokenName).To(Equal("ENS-Registry"))
Expect(len(con.EmittedAddrs)).To(Equal(1)) // With piping on it is saved
Expect(con.EmittedAddrs[common.HexToAddress("0x5FfC014343cd971B7eb70732021E26C35B744cc4")]).To(Equal(true))
})
})
Describe("FetchContractData", func() {
It("Calls a single contract method", func() {
var name = new(string)
err := contractPoller.FetchContractData(constants.TusdAbiString, constants.TusdContractAddress, "name", nil, &name, 6197514)
Expect(err).ToNot(HaveOccurred())
Expect(*name).To(Equal("TrueUSD"))
})
})
})
})

View File

@ -1,8 +0,0 @@
# Shared Tools
## Description
Code that is useful for or used by plugins written on top of VulcanizeDB.
## Note
Much code in this directory may not be used outside of the tests, but don't delete it - it could be used by a plugin.
Renaming and/or deleting functions in this namespace requires a version bump to avoid breaking plugins.

View File

@ -1,35 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package chunker_test
import (
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
)
func TestFactories(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Shared Chunker Suite")
}
var _ = BeforeSuite(func() {
log.SetOutput(ioutil.Discard)
})

View File

@ -1,69 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package chunker
import (
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
)
type Chunker interface {
AddConfig(transformerConfig transformer.EventTransformerConfig)
ChunkLogs(logs []core.HeaderSyncLog) map[string][]core.HeaderSyncLog
}
type LogChunker struct {
AddressToNames map[string][]string
NameToTopic0 map[string]common.Hash
}
// Returns a new log chunker with initialised maps.
// Needs to have configs added with `AddConfigs` to consider logs for the respective transformer.
func NewLogChunker() *LogChunker {
return &LogChunker{
AddressToNames: map[string][]string{},
NameToTopic0: map[string]common.Hash{},
}
}
// Configures the chunker by adding one config with more addresses and topics to consider.
func (chunker *LogChunker) AddConfig(transformerConfig transformer.EventTransformerConfig) {
for _, address := range transformerConfig.ContractAddresses {
var lowerCaseAddress = strings.ToLower(address)
chunker.AddressToNames[lowerCaseAddress] = append(chunker.AddressToNames[lowerCaseAddress], transformerConfig.TransformerName)
chunker.NameToTopic0[transformerConfig.TransformerName] = common.HexToHash(transformerConfig.Topic)
}
}
// Goes through a slice of logs, associating relevant logs (matching addresses and topic) with transformers
func (chunker *LogChunker) ChunkLogs(logs []core.HeaderSyncLog) map[string][]core.HeaderSyncLog {
chunks := map[string][]core.HeaderSyncLog{}
for _, log := range logs {
// Topic0 is not unique to each transformer, also need to consider the contract address
relevantTransformers := chunker.AddressToNames[strings.ToLower(log.Log.Address.Hex())]
for _, t := range relevantTransformers {
if chunker.NameToTopic0[t] == log.Log.Topics[0] {
chunks[t] = append(chunks[t], log)
}
}
}
return chunks
}

View File

@ -1,163 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package chunker_test
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
chunk "github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
)
var _ = Describe("Log chunker", func() {
var (
chunker *chunk.LogChunker
)
BeforeEach(func() {
chunker = chunk.NewLogChunker()
configA := transformer.EventTransformerConfig{
TransformerName: "TransformerA",
ContractAddresses: []string{"0x00000000000000000000000000000000000000A1", "0x00000000000000000000000000000000000000A2"},
Topic: "0xA",
}
chunker.AddConfig(configA)
configB := transformer.EventTransformerConfig{
TransformerName: "TransformerB",
ContractAddresses: []string{"0x00000000000000000000000000000000000000B1"},
Topic: "0xB",
}
chunker.AddConfig(configB)
configC := transformer.EventTransformerConfig{
TransformerName: "TransformerC",
ContractAddresses: []string{"0x00000000000000000000000000000000000000A2"},
Topic: "0xC",
}
chunker.AddConfig(configC)
})
Describe("initialisation", func() {
It("creates lookup maps correctly", func() {
Expect(chunker.AddressToNames).To(Equal(map[string][]string{
"0x00000000000000000000000000000000000000a1": {"TransformerA"},
"0x00000000000000000000000000000000000000a2": {"TransformerA", "TransformerC"},
"0x00000000000000000000000000000000000000b1": {"TransformerB"},
}))
Expect(chunker.NameToTopic0).To(Equal(map[string]common.Hash{
"TransformerA": common.HexToHash("0xA"),
"TransformerB": common.HexToHash("0xB"),
"TransformerC": common.HexToHash("0xC"),
}))
})
})
Describe("AddConfig", func() {
It("can add more configs later", func() {
configD := transformer.EventTransformerConfig{
TransformerName: "TransformerD",
ContractAddresses: []string{"0x000000000000000000000000000000000000000D"},
Topic: "0xD",
}
chunker.AddConfig(configD)
Expect(chunker.AddressToNames).To(ContainElement([]string{"TransformerD"}))
Expect(chunker.NameToTopic0).To(ContainElement(common.HexToHash("0xD")))
})
It("lower cases address", func() {
configD := transformer.EventTransformerConfig{
TransformerName: "TransformerD",
ContractAddresses: []string{"0x000000000000000000000000000000000000000D"},
Topic: "0xD",
}
chunker.AddConfig(configD)
Expect(chunker.AddressToNames["0x000000000000000000000000000000000000000d"]).To(Equal([]string{"TransformerD"}))
})
})
Describe("ChunkLogs", func() {
It("only associates logs with relevant topic0 and address to transformers", func() {
logs := []core.HeaderSyncLog{log1, log2, log3, log4, log5}
chunks := chunker.ChunkLogs(logs)
Expect(chunks["TransformerA"]).To(And(ContainElement(log1), ContainElement(log4)))
Expect(chunks["TransformerB"]).To(BeEmpty())
Expect(chunks["TransformerC"]).To(ContainElement(log5))
})
})
})
var (
// Match TransformerA
log1 = core.HeaderSyncLog{
Log: types.Log{
Address: common.HexToAddress("0xA1"),
Topics: []common.Hash{
common.HexToHash("0xA"),
common.HexToHash("0xLogTopic1"),
},
},
}
// Match TransformerA address, but not topic0
log2 = core.HeaderSyncLog{
Log: types.Log{
Address: common.HexToAddress("0xA1"),
Topics: []common.Hash{
common.HexToHash("0xB"),
common.HexToHash("0xLogTopic2"),
},
},
}
// Match TransformerA topic, but TransformerB address
log3 = core.HeaderSyncLog{
Log: types.Log{
Address: common.HexToAddress("0xB1"),
Topics: []common.Hash{
common.HexToHash("0xA"),
common.HexToHash("0xLogTopic3"),
},
},
}
// Match TransformerA, with the other address
log4 = core.HeaderSyncLog{
Log: types.Log{
Address: common.HexToAddress("0xA2"),
Topics: []common.Hash{
common.HexToHash("0xA"),
common.HexToHash("0xLogTopic4"),
},
},
}
// Match TransformerC, which shares address with TransformerA
log5 = core.HeaderSyncLog{
Log: types.Log{
Address: common.HexToAddress("0xA2"),
Topics: []common.Hash{
common.HexToHash("0xC"),
common.HexToHash("0xLogTopic5"),
},
},
}
)

View File

@ -1,25 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package constants
type TransformerExecution bool
const (
HeaderRecheck TransformerExecution = true
HeaderUnchecked TransformerExecution = false
RecheckHeaderCap = int64(5)
)

View File

@ -1,19 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package constants
var DataItemLength = 32

View File

@ -1,80 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package constants
import (
"fmt"
"math"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
var initialized = false
func initConfig() {
if initialized {
return
}
if err := viper.ReadInConfig(); err == nil {
log.Info("Using config file:", viper.ConfigFileUsed())
} else {
panic(fmt.Sprintf("Could not find environment file: %v", err))
}
initialized = true
}
// GetMinDeploymentBlock gets the minimum deployment block for multiple contracts from config
func GetMinDeploymentBlock() uint64 {
initConfig()
contractNames := getContractNames()
if len(contractNames) < 1 {
log.Fatalf("No contracts supplied")
}
minBlock := uint64(math.MaxUint64)
for c := range contractNames {
deployed := getDeploymentBlock(c)
if deployed < minBlock {
minBlock = deployed
}
}
return minBlock
}
func getContractNames() map[string]bool {
transformerNames := viper.GetStringSlice("exporter.transformerNames")
contractNames := make(map[string]bool)
for _, transformerName := range transformerNames {
configKey := "exporter." + transformerName + ".contracts"
names := viper.GetStringSlice(configKey)
for _, name := range names {
contractNames[name] = true
}
}
return contractNames
}
func getDeploymentBlock(contractName string) uint64 {
configKey := "contract." + contractName + ".deployed"
value := viper.GetInt64(configKey)
if value < 0 {
log.Infof("No deployment block configured for contract \"%v\", defaulting to 0.", contractName)
return 0
}
return uint64(value)
}

View File

@ -1,388 +0,0 @@
# Watching Contract Events
One approach VulcanizeDB takes to caching and indexing smart contracts is to watch contract events emitted in receipt logs.
With a header synced vDB we can watch events by iterating over headers retrieved from the synced `headers` table and using these headers to
fetch and verify relevant event logs from a full Ethereum node, keeping track of which headers we have checked for which events
with our `checked_headers` table.
## Assumptions
This approach assumes you are running a vDB header sync which is run against a light Ethereum node;
this approach also assumes there is a full node available.
Looking forward, we will be building fetchers that enable sourcing data from IPFS instead of an ETH node.
## Shared Code
VulcanizeDB has shared code built out for building and plugging in event transformers
### [Event Watcher (header sync)](../staging/libraries/shared/watcher/event_watcher.go)
The event watcher is responsible for continuously fetching and delegating chunks of logs and their associated header to the appropriate transformers.
Using the `compose` or `composeAndExecute` command, event watchers can be loaded with plugin event transformers and execute over them.
### [Event Transformer](../staging/libraries/shared/transformer/event_transformer.go)
The event transformer is responsible for converting event logs into more useful data objects and storing them in Postgres.
The event transformer is composed of converter and repository interfaces and a config struct:
```go
type EventTransformer struct {
Config transformer.EventTransformerConfig
Converter Converter
Repository Repository
}
```
The event transformer executes over provided event logs at a given header.
In this process, the converter unpacks these logs into entities and then converts these entities
to their final db models. These models are then written to the Postgres db by the repository.
```go
func (transformer Transformer) Execute(logs []types.Log, header core.Header, recheckHeaders constants.TransformerExecution) error {
transformerName := transformer.Config.TransformerName
config := transformer.Config
if len(logs) < 1 {
err := transformer.Repository.MarkHeaderChecked(header.Id)
if err != nil {
log.Printf("Error marking header as checked in %v: %v", transformerName, err)
return err
}
return nil
}
entities, err := transformer.Converter.ToEntities(config.ContractAbi, logs)
if err != nil {
log.Printf("Error converting logs to entities in %v: %v", transformerName, err)
return err
}
models, err := transformer.Converter.ToModels(entities)
if err != nil {
log.Printf("Error converting entities to models in %v: %v", transformerName, err)
return err
}
err = transformer.Repository.Create(header.Id, models)
if err != nil {
log.Printf("Error persisting %v record: %v", transformerName, err)
return err
}
return nil
}
```
## Custom Code
In order to watch events at a smart contract, for those events the developer must create:
1. Config - struct to hold configuration information (contract address, starting block, event name and signature).
1. Entity - struct to unpack the event log into.
1. Model - struct representing the final data model we want to write to Postgres.
1. Converter - an interface which can unpack event logs into our entities and convert those entities to our models.
1. Repository - an interface to write our models to Postgres.
1. EventTransformerInitializer - a public variable which exports our configured transformer to be loaded as part of a plugin.
1. DB migrations - migrations to generate the Postgres schema, tables, views, function, etc that are needed to store and interface with the transformed data models.
The example event we will use looks like:
```
event ExampleEvent(bytes32 indexed arg1, address indexed arg2, bytes32 arg3, uint256 arg4, uint256 arg5);
```
### Config
The config holds configuration variables for the event transformer, including a name for the transformer, the contract address
it is working at, the contract's ABI, the topic (e.g. event signature; topic0) that it is filtering for, and starting
and ending block numbers.
```go
type EventTransformerConfig struct {
TransformerName string
ContractAddresses []string
ContractAbi string
Topic string
StartingBlockNumber int64
EndingBlockNumber int64 // Set -1 for indefinite transformer
}
```
### Entity
Entity field names for event arguments need to be exported and match the argument's name and type. LogIndex,
TransactionIndex, and the Raw log are retained in order to link the data to it's source for downstream validation.
```go
type ExampleEntity struct {
Arg1 common.Hash
Arg2 common.Address
Arg3 common.Hash
Arg4 *big.Int
Arg5 *big.Int
LogIndex uint
TransactionIndex uint
Raw types.Log
}
```
### Model
Model fields are not constrained by the event log structure.
This allows us to rename our fields, decode or convert our log values into more useful types, and perform operations
with or on the values before persisting the data to Postgres.
```go
type ExampleModel struct {
EventHash string
UserAddress string
FractionSkimmed string
Surplus string
Deficit string
FinalPosition string
LogIndex uint
TransactionIndex uint
Raw types.Log
}
```
### Converter
The converter needs to satisfy the interface. One for unpacking logs into the custom defined entities, and
another for converting those entities to their final db models.
```go
type Converter interface {
ToEntities(contractAbi string, ethLog []types.Log) ([]interface{}, error)
ToModels([]interface{}) ([]interface{}, error)
}
```
For the example event, this might look like:
```go
type ExampleConverter struct{}
func (ExampleConverter) ToEntities(contractAbi string, ethLogs []types.Log) ([]interface{}, error) {
var entities []interface{}
for _, ethLog := range ethLogs {
entity := &ExampleEntity{}
address := ethLog.Address
abi, err := geth.ParseAbi(contractAbi)
if err != nil {
return nil, err
}
contract := bind.NewBoundContract(address, abi, nil, nil, nil)
err = contract.UnpackLog(entity, "ExampleEvent", ethLog)
if err != nil {
return nil, err
}
entity.Raw = ethLog
entity.LogIndex = ethLog.Index
entity.TransactionIndex = ethLog.TxIndex
entities = append(entities, *entity)
}
return entities, nil
}
func (converter ExampleConverter) ToModels(entities []interface{}) ([]interface{}, error) {
var models []interface{}
for _, entity := range entities {
entity, ok := entity.(ExampleModel)
if !ok {
return nil, fmt.Errorf("entity of type %T, not %T", entity, ExampleModel{})
}
fractionSkimmed, err := hexutil.DecodeBig(entity.Arg3.Hex())
if err != nil {
reuturn nil, err
}
position := new(big.Int)
position.Sub(entity.Arg4, entity.Arg5)
finalPosition := new(big.Int)
if preTaxPosition.Sign() < 0 {
finalPosition = position
} else {
skim := new(big.Int)
skim.Div(position, fractionSkimmed)
finalPosition = position.Sub(position, skim)
}
rawLog, err := json.Marshal(entity.Raw)
if err != nil {
return nil, err
}
model := ExampleModel{
EventHash: entity.Arg1.Hex(),
UserAddress: entity.Arg2.Hex(),
FractionSkimmed: fractionSkimmed.String(),
Surplus: entity.Arg4.String(),
Deficit: entity.Arg5.String(),
FinalPosition: finalPosition,
LogIndex: entity.LogIndex,
TransactionIndex: entity.TransactionIndex,
Raw: rawLog,
}
models = append(models, model)
}
return models, nil
}
```
Notice that in this example we have a bytes32 argument in the event that needs to be decoded to an integer before it can be worked with
to produce our hypothetical `FinalPosition` field. This is to highlight the fact that contracts can and sometimes do encode the
data types we want to work with into raw bytes. Writing custom transformers with these converters allows us to account for this.
### Repository
The repository needs to satisfy the interface and use the `Create` method to write the model to Postgres.
```go
type Repository interface {
Create(headerID int64, models []interface{}) error
MarkHeaderChecked(headerID int64) error
MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error)
RecheckHeaders(startingBlockNumber, endingBlockNUmber int64) ([]core.Header, error)
SetDB(db *postgres.DB)
}
```
For the example event, this might look like:
```go
type ExampleRepository struct {
db *postgres.DB
}
func (repository *ExampleRepository) SetDB(db *postgres.DB) {
repository.db = db
}
func (repository ExampleRepository) Create(headerID int64, models []interface{}) error {
tx, dBaseErr := repository.db.Beginx()
if dBaseErr != nil {
return dBaseErr
}
for _, model := range models {
model, ok := model.(ExampleModel)
if !ok {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error("failed to rollback ", rollbackErr)
}
return fmt.Errorf("model of type %T, not %T", model, ExampleModel{})
}
_, execErr := tx.Exec(
`INSERT into example_schema.example_event (header_id, event_hash, user_address, fraction_skimmed, surplus, deficit, final_position, log_idx, tx_idx, raw_log)
VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT (header_id, tx_idx, log_idx) DO UPDATE SET event_hash = $2, user_address = $3, fraction_skimmed = $4, surplus = $5, deficit = $6, final_position = $7, raw_log = $10;`,
headerID, model.EventHash, model.UserAddress, model.FractonSkimmed, model.Surplus, model.Deficit, model.FinalPosition, model.LogIndex, model.TransactionIndex, model.Raw,
)
if execErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error("failed to rollback ", rollbackErr)
}
return execErr
}
}
checkHeaderErr := repo.MarkHeaderCheckedInTransaction(headerID, tx, "example_event_checked")
if checkHeaderErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error("failed to rollback ", rollbackErr)
}
return checkHeaderErr
}
return tx.Commit()
}
func (repository ExampleRepository) MarkHeaderChecked(headerID int64) error {
return repo.MarkHeaderChecked(headerID, repository.db, "example_event_checked")
}
func (repository ExampleRepository) MissingHeaders(startingBlockNumber int64, endingBlockNumber int64) ([]core.Header, error) {
return repo.MissingHeaders(startingBlockNumber, endingBlockNumber, repository.db,"example_event_checked")
}
func (repository ExampleRepository) RecheckHeaders(startingBlockNumber int64, endingBlockNumber int64) ([]core.Header, error) {
return repo.RecheckHeaders(startingBlockNumber, endingBlockNumber, repository.db, "example_event_checked")
}
```
### EventTransformerInitializer
A transformer initializer variable needs to be exported from somewhere within the transformer repository so that the transformer can be
loaded as part of a plugin in the `compose` or `composeAndExecute` commands. It is important that this variable is named `EventTransformerInitializer` and
it must be of `type EventTransformerInitializer func(db *postgres.DB) EventTransformer`.
```go
var EventTransformerInitializer transformer.EventTransformerInitializer = factories.Transformer{
Config: exampleEventConfig,
Converter: ExampleConverter{},
Repository: &ExampleRepository{},
}.NewTransformer
```
### DB migrations
We use `goose` as our migration management tool. Any Go data model that needs to be written to Postgres by the
repository needs a db migration for the corresponding Postgres data model.
Each contract or set of transformers being watched should define its own namespace with a db schema:
```postgresql
-- +goose Up
CREATE SCHEMA example_schema;
-- +goose Down
DROP SCHEMA example_schema;
```
For the example event and its resulting model, the table we write to would look like:
```postgresql
-- +goose Up
CREATE TABLE example_schema.example_event (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
event_hash CHARACTER VARYING(66) NOT NULL,
user_address CHARACTER VARYING(66) NOT NULL,
fraction_skimmed NUMERIC NOT NULL,
surplus NUMERIC NOT NULL,
deficit NUMERIC NOT NULL,
final_position NUMERIC NOT NULL,
tx_idx INTEGER NOT NUll,
log_idx INTEGER NOT NUll,
raw_log JSONB,
UNIQUE (header_id, tx_idx, log_idx)
);
ALTER TABLE public.checked_headers
ADD COLUMN example_event_checked INTEGER NOT NULL DEFAULT 0;
-- +goose Down
DROP TABLE example_schema.example_event;
ALTER TABLE public.checked_headers
DROP COLUMN example_event_checked;
```
Notice that we have also added a column to the `checked_headers` table for this event so that we can keep track
of which headers we have already filtered through for this event.
## Summary
To create a transformer for a contract event we need to create entities for unpacking the raw log, models to represent
the final data structure, a converter to mediate this unpacking and conversion between entities to models, a repository to write
these models to Postgres, db migrations to accommodate these models in Postgres, and a EventTransformerInitializer to export the
configured transformer and load it as a plugin to the `compose` or `composeAndExecute` commands as described in the main readme.

View File

@ -1,28 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package event
import (
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
// Converter transforms log data into general InsertionModels the Repository can persist__
type Converter interface {
ToModels(contractAbi string, ethLog []core.HeaderSyncLog) ([]InsertionModel, error)
SetDB(db *postgres.DB)
}

View File

@ -1,35 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package event_test
import (
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
)
func TestFactories(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Factories Suite")
}
var _ = BeforeSuite(func() {
log.SetOutput(ioutil.Discard)
})

View File

@ -1,179 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package event
import (
"database/sql/driver"
"fmt"
"strings"
"github.com/vulcanize/vulcanizedb/utils"
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
const SetLogTransformedQuery = `UPDATE public.header_sync_logs SET transformed = true WHERE id = $1`
// Repository persists transformed values to the DB
type Repository interface {
Create(models []InsertionModel) error
SetDB(db *postgres.DB)
}
// LogFK is the name of log foreign key columns
const LogFK ColumnName = "log_id"
// AddressFK is the name of address foreign key columns
const AddressFK ColumnName = "address_id"
// HeaderFK is the name of header foreign key columns
const HeaderFK ColumnName = "header_id"
// SchemaName is the schema to work with
type SchemaName string
// TableName identifies the table for inserting the data
type TableName string
// ColumnName identifies columns on the given table
type ColumnName string
// ColumnValues maps a column to the value for insertion. This is restricted to []byte, bool, float64, int64, string, time.Time
type ColumnValues map[ColumnName]interface{}
// ErrUnsupportedValue is thrown when a model supplies a type of value the postgres driver cannot handle.
var ErrUnsupportedValue = func(value interface{}) error {
return fmt.Errorf("unsupported type of value supplied in model: %v (%T)", value, value)
}
// InsertionModel is the generalised data structure a converter returns, and contains everything the repository needs to
// persist the converted data.
type InsertionModel struct {
SchemaName SchemaName
TableName TableName
OrderedColumns []ColumnName // Defines the fields to insert, and in which order the table expects them
ColumnValues ColumnValues // Associated values for columns, restricted to []byte, bool, float64, int64, string, time.Time
}
// ModelToQuery stores memoised insertion queries to minimise computation
var ModelToQuery = map[string]string{}
// GetMemoizedQuery gets/creates a DB insertion query for the model
func GetMemoizedQuery(model InsertionModel) string {
// The schema and table name uniquely determines the insertion query, use that for memoization
queryKey := string(model.SchemaName) + string(model.TableName)
query, queryMemoized := ModelToQuery[queryKey]
if !queryMemoized {
query = GenerateInsertionQuery(model)
ModelToQuery[queryKey] = query
}
return query
}
// GenerateInsertionQuery creates an SQL insertion query from an insertion model.
// Should be called through GetMemoizedQuery, so the query is not generated on each call to Create.
func GenerateInsertionQuery(model InsertionModel) string {
var valuePlaceholders []string
var updateOnConflict []string
for i := 0; i < len(model.OrderedColumns); i++ {
valuePlaceholder := fmt.Sprintf("$%d", 1+i)
valuePlaceholders = append(valuePlaceholders, valuePlaceholder)
updateOnConflict = append(updateOnConflict,
fmt.Sprintf("%s = %s", model.OrderedColumns[i], valuePlaceholder))
}
baseQuery := `INSERT INTO %v.%v (%v) VALUES(%v)
ON CONFLICT (header_id, log_id) DO UPDATE SET %v;`
return fmt.Sprintf(baseQuery,
model.SchemaName,
model.TableName,
joinOrderedColumns(model.OrderedColumns),
strings.Join(valuePlaceholders, ", "),
strings.Join(updateOnConflict, ", "))
}
/*
Create generates an insertion query and persists to the DB, given a slice of InsertionModels.
ColumnValues are restricted to []byte, bool, float64, int64, string, time.Time.
testModel = shared.InsertionModel{
SchemaName: "public"
TableName: "testEvent",
OrderedColumns: []string{"header_id", "log_id", "variable1"},
ColumnValues: ColumnValues{
"header_id": 303
"log_id": "808",
"variable1": "value1",
},
}
*/
func Create(models []InsertionModel, db *postgres.DB) error {
if len(models) == 0 {
return fmt.Errorf("repository got empty model slice")
}
tx, dbErr := db.Beginx()
if dbErr != nil {
return dbErr
}
for _, model := range models {
// Maps can't be iterated over in a reliable manner, so we rely on OrderedColumns to define the order to insert
// tx.Exec is variadically typed in the args, so if we wrap in []interface{} we can apply them all automatically
var args []interface{}
for _, col := range model.OrderedColumns {
value := model.ColumnValues[col]
// Check whether or not PG can accept the type of value in the model
okPgValue := driver.IsValue(value)
if !okPgValue {
logrus.WithField("model", model).Errorf("PG cannot handle value of this type: %T", value)
return ErrUnsupportedValue(value)
}
args = append(args, value)
}
insertionQuery := GetMemoizedQuery(model)
_, execErr := tx.Exec(insertionQuery, args...) // couldn't pass varying types in bulk with args :: []string
if execErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
logrus.Error("failed to rollback ", rollbackErr)
}
return execErr
}
_, logErr := tx.Exec(SetLogTransformedQuery, model.ColumnValues[LogFK])
if logErr != nil {
utils.RollbackAndLogFailure(tx, logErr, "header_sync_logs.transformed")
return logErr
}
}
return tx.Commit()
}
func joinOrderedColumns(columns []ColumnName) string {
var stringColumns []string
for _, columnName := range columns {
stringColumns = append(stringColumns, string(columnName))
}
return strings.Join(stringColumns, ", ")
}

View File

@ -1,205 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package event_test
import (
"fmt"
"math/big"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/factories/event"
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
"github.com/vulcanize/vulcanizedb/test_config"
)
var _ = Describe("Repository", func() {
var db *postgres.DB
BeforeEach(func() {
db = test_config.NewTestDB(test_config.NewTestNode())
test_config.CleanTestDB(db)
})
Describe("Create", func() {
const createTestEventTableQuery = `CREATE TABLE public.testEvent(
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
log_id BIGINT NOT NULL REFERENCES header_sync_logs (id) ON DELETE CASCADE,
variable1 TEXT,
UNIQUE (header_id, log_id)
);`
var (
headerID, logID int64
headerRepository repositories.HeaderRepository
testModel event.InsertionModel
)
BeforeEach(func() {
_, tableErr := db.Exec(createTestEventTableQuery)
Expect(tableErr).NotTo(HaveOccurred())
headerRepository = repositories.NewHeaderRepository(db)
var insertHeaderErr error
headerID, insertHeaderErr = headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
Expect(insertHeaderErr).NotTo(HaveOccurred())
headerSyncLog := test_data.CreateTestLog(headerID, db)
logID = headerSyncLog.ID
testModel = event.InsertionModel{
SchemaName: "public",
TableName: "testEvent",
OrderedColumns: []event.ColumnName{
event.HeaderFK, event.LogFK, "variable1",
},
ColumnValues: event.ColumnValues{
event.HeaderFK: headerID,
event.LogFK: logID,
"variable1": "value1",
},
}
})
AfterEach(func() {
db.MustExec(`DROP TABLE public.testEvent;`)
})
// Needs to run before the other tests, since those insert keys in map
It("memoizes queries", func() {
Expect(len(event.ModelToQuery)).To(Equal(0))
event.GetMemoizedQuery(testModel)
Expect(len(event.ModelToQuery)).To(Equal(1))
event.GetMemoizedQuery(testModel)
Expect(len(event.ModelToQuery)).To(Equal(1))
})
It("persists a model to postgres", func() {
createErr := event.Create([]event.InsertionModel{testModel}, db)
Expect(createErr).NotTo(HaveOccurred())
var res TestEvent
dbErr := db.Get(&res, `SELECT log_id, variable1 FROM public.testEvent;`)
Expect(dbErr).NotTo(HaveOccurred())
Expect(res.LogID).To(Equal(fmt.Sprint(testModel.ColumnValues[event.LogFK])))
Expect(res.Variable1).To(Equal(testModel.ColumnValues["variable1"]))
})
Describe("returns errors", func() {
It("for empty model slice", func() {
err := event.Create([]event.InsertionModel{}, db)
Expect(err).To(MatchError("repository got empty model slice"))
})
It("for failed SQL inserts", func() {
header := fakes.GetFakeHeader(1)
headerID, headerErr := headerRepository.CreateOrUpdateHeader(header)
Expect(headerErr).NotTo(HaveOccurred())
brokenModel := event.InsertionModel{
SchemaName: "public",
TableName: "testEvent",
// Wrong name of last column compared to DB, will generate incorrect query
OrderedColumns: []event.ColumnName{
event.HeaderFK, event.LogFK, "variable2",
},
ColumnValues: event.ColumnValues{
event.HeaderFK: headerID,
event.LogFK: logID,
"variable1": "value1",
},
}
// Remove cached queries, or we won't generate a new (incorrect) one
delete(event.ModelToQuery, "publictestEvent")
createErr := event.Create([]event.InsertionModel{brokenModel}, db)
// Remove incorrect query, so other tests won't get it
delete(event.ModelToQuery, "publictestEvent")
Expect(createErr).To(HaveOccurred())
})
It("for unsupported types in ColumnValue", func() {
unsupportedValue := big.NewInt(5)
testModel = event.InsertionModel{
SchemaName: "public",
TableName: "testEvent",
OrderedColumns: []event.ColumnName{
event.HeaderFK, event.LogFK, "variable1",
},
ColumnValues: event.ColumnValues{
event.HeaderFK: headerID,
event.LogFK: logID,
"variable1": unsupportedValue,
},
}
createErr := event.Create([]event.InsertionModel{testModel}, db)
Expect(createErr).To(MatchError(event.ErrUnsupportedValue(unsupportedValue)))
})
})
It("upserts queries with conflicting source", func() {
conflictingModel := event.InsertionModel{
SchemaName: "public",
TableName: "testEvent",
OrderedColumns: []event.ColumnName{
event.HeaderFK, event.LogFK, "variable1",
},
ColumnValues: event.ColumnValues{
event.HeaderFK: headerID,
event.LogFK: logID,
"variable1": "conflictingValue",
},
}
createErr := event.Create([]event.InsertionModel{testModel, conflictingModel}, db)
Expect(createErr).NotTo(HaveOccurred())
var res TestEvent
dbErr := db.Get(&res, `SELECT log_id, variable1 FROM public.testEvent;`)
Expect(dbErr).NotTo(HaveOccurred())
Expect(res.Variable1).To(Equal(conflictingModel.ColumnValues["variable1"]))
})
It("generates correct queries", func() {
actualQuery := event.GenerateInsertionQuery(testModel)
expectedQuery := `INSERT INTO public.testEvent (header_id, log_id, variable1) VALUES($1, $2, $3)
ON CONFLICT (header_id, log_id) DO UPDATE SET header_id = $1, log_id = $2, variable1 = $3;`
Expect(actualQuery).To(Equal(expectedQuery))
})
It("marks log transformed", func() {
createErr := event.Create([]event.InsertionModel{testModel}, db)
Expect(createErr).NotTo(HaveOccurred())
var logTransformed bool
getErr := db.Get(&logTransformed, `SELECT transformed FROM public.header_sync_logs WHERE id = $1`, logID)
Expect(getErr).NotTo(HaveOccurred())
Expect(logTransformed).To(BeTrue())
})
})
})
type TestEvent struct {
LogID string `db:"log_id"`
Variable1 string
}

View File

@ -1,68 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package event
import (
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type Transformer struct {
Config transformer.EventTransformerConfig
Converter Converter
Repository Repository
}
func (transformer Transformer) NewTransformer(db *postgres.DB) transformer.EventTransformer {
transformer.Converter.SetDB(db)
transformer.Repository.SetDB(db)
return transformer
}
func (transformer Transformer) Execute(logs []core.HeaderSyncLog) error {
transformerName := transformer.Config.TransformerName
config := transformer.Config
if len(logs) < 1 {
return nil
}
models, err := transformer.Converter.ToModels(config.ContractAbi, logs)
if err != nil {
logrus.Errorf("error converting entities to models in %v: %v", transformerName, err)
return err
}
err = transformer.Repository.Create(models)
if err != nil {
logrus.Errorf("error persisting %v record: %v", transformerName, err)
return err
}
logrus.Debug("Persisted log for " + transformerName)
return nil
}
func (transformer Transformer) GetName() string {
return transformer.Config.TransformerName
}
func (transformer Transformer) GetConfig() transformer.EventTransformerConfig {
return transformer.Config
}

View File

@ -1,107 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package event_test
import (
"math/rand"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/factories/event"
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
)
var _ = Describe("Transformer", func() {
var (
repository mocks.MockEventRepository
converter mocks.MockConverter
t transformer.EventTransformer
headerOne core.Header
config = test_data.GenericTestConfig
logs []core.HeaderSyncLog
)
BeforeEach(func() {
repository = mocks.MockEventRepository{}
converter = mocks.MockConverter{}
t = event.Transformer{
Repository: &repository,
Converter: &converter,
Config: config,
}.NewTransformer(nil)
headerOne = core.Header{ID: rand.Int63(), BlockNumber: rand.Int63()}
logs = []core.HeaderSyncLog{{
ID: 0,
HeaderID: headerOne.ID,
Log: test_data.GenericTestLog(),
Transformed: false,
}}
})
It("sets the db", func() {
Expect(repository.SetDbCalled).To(BeTrue())
})
It("doesn't attempt to convert or persist an empty collection when there are no logs", func() {
err := t.Execute([]core.HeaderSyncLog{})
Expect(err).NotTo(HaveOccurred())
Expect(converter.ToModelsCalledCounter).To(Equal(0))
Expect(repository.CreateCalledCounter).To(Equal(0))
})
It("converts an eth log to a model", func() {
err := t.Execute(logs)
Expect(err).NotTo(HaveOccurred())
Expect(converter.ContractAbi).To(Equal(config.ContractAbi))
Expect(converter.LogsToConvert).To(Equal(logs))
})
It("returns an error if converting to models fails", func() {
converter.ToModelsError = fakes.FakeError
err := t.Execute(logs)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
It("persists the record", func() {
converter.ModelsToReturn = []event.InsertionModel{test_data.GenericModel}
err := t.Execute(logs)
Expect(err).NotTo(HaveOccurred())
Expect(repository.PassedModels[0]).To(Equal(test_data.GenericModel))
})
It("returns error if persisting the record fails", func() {
repository.SetCreateError(fakes.FakeError)
err := t.Execute(logs)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
})

View File

@ -1,167 +0,0 @@
# Storage Transformer Example
In the Storage Transformer README, we went over code that needs to be written to add a new storage transformer to VulcanizeDB.
In this document, we'll go over an example contract and discuss how one would go about watching its storage.
## Example Contract
For the purposes of this document, we'll be assuming that we're interested in watching the following contract:
```solidity
pragma solidity ^0.5.1;
contract Contract {
uint256 public num_addresses;
mapping(address => uint) public addresses;
event AddressAdded(
address addr,
uint256 num_addrs
);
constructor() public {
addresses[msg.sender] = 1;
num_addresses = 1;
}
function add_address(address addr) public {
bool exists = addresses[addr] > 0;
addresses[addr]++;
if (!exists) {
emit AddressAdded(addr, ++num_addresses);
}
}
}
```
Disclaimer: this contract has not been audited and is not intended to be modeled or used in production. :)
This contract persists two values in its storage:
1. `num_addresses`: the total number of unique addresses known to the contract.
2. `addresses`: a mapping that records the number of times an address has been added to the contract.
It also emits an event each time a new address is added into the contract's storage.
## Custom Code
In order to monitor the state of this smart contract, we'd need to implement: an event transformer, a mappings namespace, and a repository.
We will go through each of these in turn.
### Event Transformer
Given that the contract's storage includes a mapping, `addresses`, we will need to be able to identify the keys to that mapping that exist in the system so that we can recognize contract storage keys that correspond to non-zero values in that mapping.
The simplest way to be aware of keys used in a contract's mapping is to listen for contract events that emit the keys that are used in its mapping(s).
Since this contract includes an event, `AddressAdded`, that is emitted each time a new address is added to the `addresses` mapping, we will want to listen for those events and cache the adddresses that map to non-zero values.
Please see the event transformer README for detailed instructions about developing this code.
In short, it should be feasible to recognize `AddressAdded` events on the blockchain and parse them to keep a record of addresses that have been added to the system.
### Mappings
If we point an ethereum node at a blockchain hosting this contract and our node is equipped to write out storage changes happening on this contract, we will expect such changes to appear each time `add_address` (which modifies the `addresses` mapping) is called.
In order for those changes - which include raw hex versions of storage keys and storage values, to be useful for us - we need to know how to recognize and parse them.
Our mappings file should assist us with both of these tasks: the `Lookup` function should recognize raw storage keys and return known metadata about the storage value.
In order to perform this lookup, the mappings file should maintain its own mapping of known storage keys to the corresponding storage value metadata.
This internal mapping should contain the storage key for `num_addresses` as well as a storage key for each `addresses` key known to be associated with a non-zero value.
#### num_addresses
`num_addresses` is the first variable declared on the contract, and it is a simple (non-array, non-mapping) type.
Therefore, we know that its storage key is `0000000000000000000000000000000000000000000000000000000000000000`.
The storage key for non-array and non-mapping variables is (usually*) the index of the variable on the contract's storage.
If we see a storage diff being emitted from this contract with this storage key, we know that the `num_addresses` variable has been modified.
In this case, we would expect that the call `mappings.Lookup("0000000000000000000000000000000000000000000000000000000000000000")` would return metadata corresponding to the `num_addresses` variable.
This metadata would probably look something like:
```golang
shared.StorageValueMetadata{
Name: "num_addresses",
Keys: nil,
Type: shared.Uint256,
}
```
<sup>*</sup> Occasionally, multiple variables may be packed into one storage slot, which complicates a direct translation of the index of the variable on the contract to its storage key.
#### addresses
`addresses` is the second variable declared on the contract, but it is a mapping.
Since it is a mapping, the storage key is more complex than `0000000000000000000000000000000000000000000000000000000000000001` (which would be the key for the variable if it were not an array or mapping).
Having a single storage slot for an entire mapping would not work, since there can be an arbitrary number of entries in a mapping, and a single storage value slot is constrained to 32 bytes.
The way that smart contract mappings are maintained in storage (in Solidity) is by creating a new storage key/value pair for each entry in the mapping, where the storage key is a hash of the occupied slot's key concatenated with the mapping's index on the contract.
Given an occupied slot's key, `k`, and a mapping's index on the contract, `i`, we can generate the storage key with the following code:
```golang
func GetMappingStorageKey(k, i string) string {
return common.BytesToHash(crypto.Keccak256(common.FromHex(k + i))).Hex()
}
```
If we were to call the contract's `add_address` function with `0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe`, we would expect to see an `AddressAdded` event emitted, with `0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe` in its payload.
From that event, we would know that there exists in the contract's storage a storage key of:
```golang
GetMappingStorageKey("0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe", "0000000000000000000000000000000000000000000000000000000000000001")
```
Executing the above code results in: `0x0f96a1133cfd5b94c329aa0526b5962bd791dbbfc481ca82f7d4a439e1e9bc40`.
Therefore, the first time `add_address` was called for this address, we would also expect to see a storage diff with a key of `0x0f96a1133cfd5b94c329aa0526b5962bd791dbbfc481ca82f7d4a439e1e9bc40` and a value of `0000000000000000000000000000000000000000000000000000000000000001`.
This would be the indication that in contract storage, the address `0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe` maps to the value 1.
Given that we knew this address was a key in the mapping from our event transformer, we would expect a call to `mappings.Lookup("0x0f96a1133cfd5b94c329aa0526b5962bd791dbbfc481ca82f7d4a439e1e9bc40")` to return metadata corresponding to _this slot_ in the addresses mapping:
```golang
shared.StorageValueMetadata{
Name: "addresses,
Keys: map[Key]string{Address: "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"},
Type: shared.Uint256,
}
```
### Repository
Once we have recognized a storage diff, we can decode the storage value to the data's known type.
Since the metadata tells us that the above values are `uint256`, we can decode a value like `0000000000000000000000000000000000000000000000000000000000000001` to `1`.
The purpose of the contract-specific repository is to write that value to the database in a way that makes it useful for future queries.
Typically, this involves writing the block hash, block number, decoded value, and any keys in the metadata to a table.
The current repository interface has a generalized `Create` function that can accept any arbitrary storage row along with its metadata.
This is deliberate, to facilitate shared use of the common storage transformer.
An implication of this decision is that the `Create` function typically includes a `switch` statement that selects which table to write to, as well as what data to include, based on the name of the variable as defined in the metadata.
An example implementation of `Create` for our example contract above might look like:
```golang
func (repository AddressStorageRepository) Create(blockNumber int, blockHash string, metadata shared.StorageValueMetadata, value interface{}) error {
switch metadata.Name {
case "num_addresses":
_, err := repository.db.Exec(`INSERT INTO storage.num_addresses (block_hash, block_number, n) VALUES ($1, $2, $3)`,
blockHash, blockNumber, value)
return err
case "addresses":
_, err := repository.db.Exec(`INSERT INTO storage.addresses (block_hash, block_number, address, n) VALUES ($1, $2, $3, $4)`,
blockHash, blockNumber, metadata.Keys[Address], value)
return err
default:
panic(fmt.Sprintf("unrecognized contract storage name: %s", metadata.Name))
}
}
```
## Summary
With our very simple address storing contract, we would be able to read its storage diffs by implementing an event transformer, a mappings, and a repository.
The mappings would be able to lookup storage keys reflecting `num_addresses` or any slot in `addresses`, using addresses derived from watching the `AddressAdded` event for the latter.
The repository would be able to persist the value or `num_addresses` or any slot in `addresses`, using metadata returned from the mappings.
The mappings and repository could be plugged into the common storage transformer, enabling us to know the contract's state as it is changing.

View File

@ -1,140 +0,0 @@
# Watching Contract Storage
One approach VulcanizeDB takes to caching and indexing smart contracts is to ingest raw contract storage values.
Assuming that you are running an ethereum node that is writing contract storage changes to a CSV file, VulcanizeDB can parse them and persist the results to postgres.
## Assumptions
The current approach for caching smart contract storage diffs assumes that you are running a node that is writing contract storage diffs to a CSV file.
The CSV file is expected to have 5 columns: contract address, block hash, block number, storage key, storage value.
We have [a branch on vulcanize/parity-ethereum](https://github.com/vulcanize/parity-ethereum/tree/watch-storage-diffs) that enables running a node that writes storage diffs this way.
Looking forward, we would like to isolate this assumption as much as possible.
We may end up needing to read CSV data that is formatted differently, or reading data from a non-CSV source, and we do not want resulting changes to cascade throughout the codebase.
## Shared Code
VulcanizeDB has shared code for continuously reading from the CSV file written by the ethereum node and writing a parsed version of each row to postgres.
### Storage Watcher
The storage watcher is responsible for continuously delegating CSV rows to the appropriate transformer as they are being written by the ethereum node.
It maintains a mapping of contract addresses to transformers, and will ignore storage diff rows for contract addresses that do not have a corresponding transformer.
Storage watchers can be loaded with plugin storage transformers and executed using the `composeAndExecute` command.
### Storage Transformer
The storage transformer is responsible for converting raw contract storage hex values into useful data and writing them to postgres.
The storage transformer depends on contract-specific implementations of code capable of recognizing storage keys and writing the matching (decoded) storage value to disk.
```golang
func (transformer Transformer) Execute(row shared.StorageDiffRow) error {
metadata, lookupErr := transformer.StorageKeysLookup.Lookup(diff.StorageKey)
if lookupErr != nil {
return lookupErr
}
value, decodeErr := utils.Decode(diff, metadata)
if decodeErr != nil {
return decodeErr
}
return transformer.Repository.Create(diff.BlockHeight, diff.BlockHash.Hex(), metadata, value)
}
```
## Custom Code
In order to watch an additional smart contract, a developer must create three things:
1. StorageKeysLoader - identify keys in the contract's storage trie, providing metadata to describe how associated values should be decoded.
1. Repository - specify how to persist a parsed version of the storage value matching the recognized storage key.
1. Instance - create an instance of the storage transformer that uses your mappings and repository.
### StorageKeysLoader
A `StorageKeysLoader` is used by the `StorageKeysLookup` object on a storage transformer.
```golang
type KeysLoader interface {
LoadMappings() (map[common.Hash]utils.StorageValueMetadata, error)
SetDB(db *postgres.DB)
}
```
When a key is not found, the lookup object refreshes its known keys by calling the loader.
```golang
func (lookup *keysLookup) refreshMappings() error {
var err error
lookup.mappings, err = lookup.loader.LoadMappings()
if err != nil {
return err
}
lookup.mappings = utils.AddHashedKeys(lookup.mappings)
return nil
}
```
A contract-specific implementation of the loader enables the storage transformer to fetch metadata associated with a storage key.
Storage metadata contains: the name of the variable matching the storage key, a raw version of any keys associated with the variable (if the variable is a mapping), and the variable's type.
```golang
type StorageValueMetadata struct {
Name string
Keys map[Key]string
Type ValueType
}
```
The `Keys` field on the metadata is only relevant if the variable is a mapping. For example, in the following Solidity code:
```solidity
pragma solidity ^0.4.0;
contract Contract {
uint x;
mapping(address => uint) y;
}
```
The metadata for variable `x` would not have any associated keys, but the metadata for a storage key associated with `y` would include the address used to specify that key's index in the mapping.
The `SetDB` function is required for the storage key loader to connect to the database.
A database connection may be desired when keys in a mapping variable need to be read from log events (e.g. to lookup what addresses may exist in `y`, above).
### Repository
```golang
type Repository interface {
Create(blockNumber int, blockHash string, metadata shared.StorageValueMetadata, value interface{}) error
SetDB(db *postgres.DB)
}
```
A contract-specific implementation of the repository interface enables the transformer to write the decoded storage value to the appropriate table in postgres.
The `Create` function is expected to recognize and persist a given storage value by the variable's name, as indicated on the row's metadata.
Note: we advise silently discarding duplicates in `Create` - as it's possible that you may read the same diff several times, and an error will trigger the storage watcher to queue that diff for later processing.
The `SetDB` function is required for the repository to connect to the database.
### Instance
```golang
type Transformer struct {
Address common.Address
Mappings storage_diffs.Mappings
Repository storage_diffs.Repository
}
```
A new instance of the storage transformer is initialized with the contract-specific mappings and repository, as well as the contract's address.
The contract's address is included so that the watcher can query that value from the transformer in order to build up its mapping of addresses to transformers.
## Summary
To begin watching an additional smart contract, create a new mappings file for looking up storage keys on that contract, a repository for writing storage values from the contract, and initialize a new storage transformer instance with the mappings, repository, and contract address.
The new instance, wrapped in an initializer that calls `SetDB` on the mappings and repository, should be passed to the `AddTransformers` function on the storage watcher.

View File

@ -1,28 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package storage
import (
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type KeysLoader interface {
LoadMappings() (map[common.Hash]utils.StorageValueMetadata, error)
SetDB(db *postgres.DB)
}

View File

@ -1,66 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package storage
import (
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type KeysLookup interface {
Lookup(key common.Hash) (utils.StorageValueMetadata, error)
SetDB(db *postgres.DB)
}
type keysLookup struct {
loader KeysLoader
mappings map[common.Hash]utils.StorageValueMetadata
}
func NewKeysLookup(loader KeysLoader) KeysLookup {
return &keysLookup{loader: loader, mappings: make(map[common.Hash]utils.StorageValueMetadata)}
}
func (lookup *keysLookup) Lookup(key common.Hash) (utils.StorageValueMetadata, error) {
metadata, ok := lookup.mappings[key]
if !ok {
refreshErr := lookup.refreshMappings()
if refreshErr != nil {
return metadata, refreshErr
}
metadata, ok = lookup.mappings[key]
if !ok {
return metadata, utils.ErrStorageKeyNotFound{Key: key.Hex()}
}
}
return metadata, nil
}
func (lookup *keysLookup) refreshMappings() error {
var err error
lookup.mappings, err = lookup.loader.LoadMappings()
if err != nil {
return err
}
lookup.mappings = utils.AddHashedKeys(lookup.mappings)
return nil
}
func (lookup *keysLookup) SetDB(db *postgres.DB) {
lookup.loader.SetDB(db)
}

View File

@ -1,113 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package storage_test
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/factories/storage"
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
"github.com/vulcanize/vulcanizedb/test_config"
)
var _ = Describe("Storage keys lookup", func() {
var (
fakeMetadata = utils.GetStorageValueMetadata("name", map[utils.Key]string{}, utils.Uint256)
lookup storage.KeysLookup
loader *mocks.MockStorageKeysLoader
)
BeforeEach(func() {
loader = &mocks.MockStorageKeysLoader{}
lookup = storage.NewKeysLookup(loader)
})
Describe("Lookup", func() {
Describe("when key not found", func() {
It("refreshes keys", func() {
loader.StorageKeyMappings = map[common.Hash]utils.StorageValueMetadata{fakes.FakeHash: fakeMetadata}
_, err := lookup.Lookup(fakes.FakeHash)
Expect(err).NotTo(HaveOccurred())
Expect(loader.LoadMappingsCallCount).To(Equal(1))
})
It("returns error if refreshing keys fails", func() {
loader.LoadMappingsError = fakes.FakeError
_, err := lookup.Lookup(fakes.FakeHash)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
})
Describe("when key found", func() {
BeforeEach(func() {
loader.StorageKeyMappings = map[common.Hash]utils.StorageValueMetadata{fakes.FakeHash: fakeMetadata}
_, err := lookup.Lookup(fakes.FakeHash)
Expect(err).NotTo(HaveOccurred())
Expect(loader.LoadMappingsCallCount).To(Equal(1))
})
It("does not refresh keys", func() {
_, err := lookup.Lookup(fakes.FakeHash)
Expect(err).NotTo(HaveOccurred())
Expect(loader.LoadMappingsCallCount).To(Equal(1))
})
})
It("returns metadata for loaded static key", func() {
loader.StorageKeyMappings = map[common.Hash]utils.StorageValueMetadata{fakes.FakeHash: fakeMetadata}
metadata, err := lookup.Lookup(fakes.FakeHash)
Expect(err).NotTo(HaveOccurred())
Expect(metadata).To(Equal(fakeMetadata))
})
It("returns metadata for hashed version of key (accommodates keys emitted from Geth)", func() {
loader.StorageKeyMappings = map[common.Hash]utils.StorageValueMetadata{fakes.FakeHash: fakeMetadata}
hashedKey := common.BytesToHash(crypto.Keccak256(fakes.FakeHash.Bytes()))
metadata, err := lookup.Lookup(hashedKey)
Expect(err).NotTo(HaveOccurred())
Expect(metadata).To(Equal(fakeMetadata))
})
It("returns key not found error if key not found", func() {
_, err := lookup.Lookup(fakes.FakeHash)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(utils.ErrStorageKeyNotFound{Key: fakes.FakeHash.Hex()}))
})
})
Describe("SetDB", func() {
It("sets the db on the loader", func() {
lookup.SetDB(test_config.NewTestDB(test_config.NewTestNode()))
Expect(loader.SetDBCalled).To(BeTrue())
})
})
})

View File

@ -1,27 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package storage
import (
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type Repository interface {
Create(diffID int64, metadata utils.StorageValueMetadata, value interface{}) error
SetDB(db *postgres.DB)
}

View File

@ -1,29 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package storage_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestStorage(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Storage Factories Suite")
}

View File

@ -1,52 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package storage
import (
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type Transformer struct {
HashedAddress common.Hash
StorageKeysLookup KeysLookup
Repository Repository
}
func (transformer Transformer) NewTransformer(db *postgres.DB) transformer.StorageTransformer {
transformer.StorageKeysLookup.SetDB(db)
transformer.Repository.SetDB(db)
return transformer
}
func (transformer Transformer) KeccakContractAddress() common.Hash {
return transformer.HashedAddress
}
func (transformer Transformer) Execute(diff utils.PersistedStorageDiff) error {
metadata, lookupErr := transformer.StorageKeysLookup.Lookup(diff.StorageKey)
if lookupErr != nil {
return lookupErr
}
value, decodeErr := utils.Decode(diff, metadata)
if decodeErr != nil {
return decodeErr
}
return transformer.Repository.Create(diff.ID, metadata, value)
}

View File

@ -1,160 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package storage_test
import (
"math/rand"
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/factories/storage"
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
)
var _ = Describe("Storage transformer", func() {
var (
storageKeysLookup *mocks.MockStorageKeysLookup
repository *mocks.MockStorageRepository
t storage.Transformer
)
BeforeEach(func() {
storageKeysLookup = &mocks.MockStorageKeysLookup{}
repository = &mocks.MockStorageRepository{}
t = storage.Transformer{
HashedAddress: common.Hash{},
StorageKeysLookup: storageKeysLookup,
Repository: repository,
}
})
It("returns the contract address being watched", func() {
fakeAddress := utils.HexToKeccak256Hash("0x12345")
t.HashedAddress = fakeAddress
Expect(t.KeccakContractAddress()).To(Equal(fakeAddress))
})
It("looks up metadata for storage key", func() {
t.Execute(utils.PersistedStorageDiff{})
Expect(storageKeysLookup.LookupCalled).To(BeTrue())
})
It("returns error if lookup fails", func() {
storageKeysLookup.LookupErr = fakes.FakeError
err := t.Execute(utils.PersistedStorageDiff{})
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
It("creates storage row with decoded data", func() {
fakeMetadata := utils.StorageValueMetadata{Type: utils.Address}
storageKeysLookup.Metadata = fakeMetadata
rawValue := common.HexToAddress("0x12345")
fakeBlockNumber := 123
fakeBlockHash := "0x67890"
fakeRow := utils.PersistedStorageDiff{
ID: rand.Int63(),
StorageDiffInput: utils.StorageDiffInput{
HashedAddress: common.Hash{},
BlockHash: common.HexToHash(fakeBlockHash),
BlockHeight: fakeBlockNumber,
StorageKey: common.Hash{},
StorageValue: rawValue.Hash(),
},
}
err := t.Execute(fakeRow)
Expect(err).NotTo(HaveOccurred())
Expect(repository.PassedDiffID).To(Equal(fakeRow.ID))
Expect(repository.PassedMetadata).To(Equal(fakeMetadata))
Expect(repository.PassedValue.(string)).To(Equal(rawValue.Hex()))
})
It("returns error if creating row fails", func() {
rawValue := common.HexToAddress("0x12345")
fakeMetadata := utils.StorageValueMetadata{Type: utils.Address}
storageKeysLookup.Metadata = fakeMetadata
repository.CreateErr = fakes.FakeError
diff := utils.PersistedStorageDiff{StorageDiffInput: utils.StorageDiffInput{StorageValue: rawValue.Hash()}}
err := t.Execute(diff)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
Describe("when a storage row contains more than one item packed in storage", func() {
var (
rawValue = common.HexToAddress("000000000000000000000000000000000000000000000002a300000000002a30")
fakeBlockNumber = 123
fakeBlockHash = "0x67890"
packedTypes = make(map[int]utils.ValueType)
)
packedTypes[0] = utils.Uint48
packedTypes[1] = utils.Uint48
var fakeMetadata = utils.StorageValueMetadata{
Name: "",
Keys: nil,
Type: utils.PackedSlot,
PackedTypes: packedTypes,
}
It("passes the decoded data items to the repository", func() {
storageKeysLookup.Metadata = fakeMetadata
fakeRow := utils.PersistedStorageDiff{
ID: rand.Int63(),
StorageDiffInput: utils.StorageDiffInput{
HashedAddress: common.Hash{},
BlockHash: common.HexToHash(fakeBlockHash),
BlockHeight: fakeBlockNumber,
StorageKey: common.Hash{},
StorageValue: rawValue.Hash(),
},
}
err := t.Execute(fakeRow)
Expect(err).NotTo(HaveOccurred())
Expect(repository.PassedDiffID).To(Equal(fakeRow.ID))
Expect(repository.PassedMetadata).To(Equal(fakeMetadata))
expectedPassedValue := make(map[int]string)
expectedPassedValue[0] = "10800"
expectedPassedValue[1] = "172800"
Expect(repository.PassedValue.(map[int]string)).To(Equal(expectedPassedValue))
})
It("returns error if creating a row fails", func() {
storageKeysLookup.Metadata = fakeMetadata
repository.CreateErr = fakes.FakeError
diff := utils.PersistedStorageDiff{StorageDiffInput: utils.StorageDiffInput{StorageValue: rawValue.Hash()}}
err := t.Execute(diff)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
})
})

View File

@ -1,49 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package fetcher
import (
"strings"
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/fs"
)
type CsvTailStorageFetcher struct {
tailer fs.Tailer
}
func NewCsvTailStorageFetcher(tailer fs.Tailer) CsvTailStorageFetcher {
return CsvTailStorageFetcher{tailer: tailer}
}
func (storageFetcher CsvTailStorageFetcher) FetchStorageDiffs(out chan<- utils.StorageDiffInput, errs chan<- error) {
t, tailErr := storageFetcher.tailer.Tail()
if tailErr != nil {
errs <- tailErr
}
logrus.Debug("fetching storage diffs...")
for line := range t.Lines {
diff, parseErr := utils.FromParityCsvRow(strings.Split(line.Text, ","))
if parseErr != nil {
errs <- parseErr
} else {
out <- diff
}
}
}

View File

@ -1,98 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package fetcher_test
import (
"fmt"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/hpcloud/tail"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
)
var _ = Describe("Csv Tail Storage Fetcher", func() {
var (
errorsChannel chan error
mockTailer *fakes.MockTailer
diffsChannel chan utils.StorageDiffInput
storageFetcher fetcher.CsvTailStorageFetcher
)
BeforeEach(func() {
errorsChannel = make(chan error)
diffsChannel = make(chan utils.StorageDiffInput)
mockTailer = fakes.NewMockTailer()
storageFetcher = fetcher.NewCsvTailStorageFetcher(mockTailer)
})
It("adds error to errors channel if tailing file fails", func(done Done) {
mockTailer.TailErr = fakes.FakeError
go storageFetcher.FetchStorageDiffs(diffsChannel, errorsChannel)
Expect(<-errorsChannel).To(MatchError(fakes.FakeError))
close(done)
})
It("adds parsed csv row to rows channel for storage diff", func(done Done) {
line := getFakeLine()
go storageFetcher.FetchStorageDiffs(diffsChannel, errorsChannel)
mockTailer.Lines <- line
expectedRow, err := utils.FromParityCsvRow(strings.Split(line.Text, ","))
Expect(err).NotTo(HaveOccurred())
Expect(<-diffsChannel).To(Equal(expectedRow))
close(done)
})
It("adds error to errors channel if parsing csv fails", func(done Done) {
line := &tail.Line{Text: "invalid"}
go storageFetcher.FetchStorageDiffs(diffsChannel, errorsChannel)
mockTailer.Lines <- line
Expect(<-errorsChannel).To(HaveOccurred())
select {
case <-diffsChannel:
Fail("value passed to rows channel on error")
default:
Succeed()
}
close(done)
})
})
func getFakeLine() *tail.Line {
address := common.HexToAddress("0x1234567890abcdef")
blockHash := []byte{4, 5, 6}
blockHeight := int64(789)
storageKey := []byte{9, 8, 7}
storageValue := []byte{6, 5, 4}
return &tail.Line{
Text: fmt.Sprintf("%s,%s,%d,%s,%s", common.Bytes2Hex(address.Bytes()), common.Bytes2Hex(blockHash),
blockHeight, common.Bytes2Hex(storageKey), common.Bytes2Hex(storageValue)),
Time: time.Time{},
Err: nil,
}
}

View File

@ -1,35 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package fetcher_test
import (
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
)
func TestFactories(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Shared Fetcher Suite")
}
var _ = BeforeSuite(func() {
log.SetOutput(ioutil.Discard)
})

View File

@ -1,86 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package fetcher
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
)
const (
PayloadChanBufferSize = 20000 // the max eth sub buffer size
)
type GethRPCStorageFetcher struct {
StatediffPayloadChan chan statediff.Payload
streamer streamer.Streamer
}
func NewGethRPCStorageFetcher(streamer streamer.Streamer) GethRPCStorageFetcher {
return GethRPCStorageFetcher{
StatediffPayloadChan: make(chan statediff.Payload, PayloadChanBufferSize),
streamer: streamer,
}
}
func (fetcher GethRPCStorageFetcher) FetchStorageDiffs(out chan<- utils.StorageDiffInput, errs chan<- error) {
ethStatediffPayloadChan := fetcher.StatediffPayloadChan
clientSubscription, clientSubErr := fetcher.streamer.Stream(ethStatediffPayloadChan, statediff.Params{})
if clientSubErr != nil {
errs <- clientSubErr
panic(fmt.Sprintf("Error creating a geth client subscription: %v", clientSubErr))
}
logrus.Info("Successfully created a geth client subscription: ", clientSubscription)
for {
diff := <-ethStatediffPayloadChan
logrus.Trace("received a statediff")
stateDiff := new(statediff.StateObject)
decodeErr := rlp.DecodeBytes(diff.StateObjectRlp, stateDiff)
if decodeErr != nil {
logrus.Warn("Error decoding state diff into RLP: ", decodeErr)
errs <- decodeErr
}
accounts := utils.GetAccountsFromDiff(*stateDiff)
logrus.Trace(fmt.Sprintf("iterating through %d accounts on stateDiff for block %d", len(accounts), stateDiff.BlockNumber))
for _, account := range accounts {
logrus.Trace(fmt.Sprintf("iterating through %d Storage values on account with key %s", len(account.StorageNodes), common.BytesToHash(account.LeafKey).Hex()))
for _, storage := range account.StorageNodes {
diff, formatErr := utils.FromGethStateDiff(account, stateDiff, storage)
if formatErr != nil {
logrus.Error("failed to format utils.StorageDiff from storage with key: ", common.BytesToHash(storage.LeafKey), "from account with key: ", common.BytesToHash(account.LeafKey))
errs <- formatErr
continue
}
logrus.Trace("adding storage diff to out channel",
"keccak of address: ", diff.HashedAddress.Hex(),
"block height: ", diff.BlockHeight,
"storage key: ", diff.StorageKey.Hex(),
"storage value: ", diff.StorageValue.Hex())
out <- diff
}
}
}
}

View File

@ -1,174 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package fetcher_test
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
)
type MockStoragediffStreamer struct {
subscribeError error
PassedPayloadChan chan statediff.Payload
PassedParams statediff.Params
streamPayloads []statediff.Payload
}
func (streamer *MockStoragediffStreamer) Stream(statediffPayloadChan chan statediff.Payload, params statediff.Params) (*rpc.ClientSubscription, error) {
clientSubscription := rpc.ClientSubscription{}
streamer.PassedPayloadChan = statediffPayloadChan
streamer.PassedParams = params
go func() {
for _, payload := range streamer.streamPayloads {
streamer.PassedPayloadChan <- payload
}
}()
return &clientSubscription, streamer.subscribeError
}
func (streamer *MockStoragediffStreamer) SetSubscribeError(err error) {
streamer.subscribeError = err
}
func (streamer *MockStoragediffStreamer) SetPayloads(payloads []statediff.Payload) {
streamer.streamPayloads = payloads
}
var _ = Describe("Geth RPC Storage Fetcher", func() {
var streamer MockStoragediffStreamer
var statediffFetcher fetcher.GethRPCStorageFetcher
var storagediffChan chan utils.StorageDiffInput
var errorChan chan error
BeforeEach(func() {
streamer = MockStoragediffStreamer{}
statediffFetcher = fetcher.NewGethRPCStorageFetcher(&streamer)
storagediffChan = make(chan utils.StorageDiffInput)
errorChan = make(chan error)
})
It("adds errors to error channel if the RPC subscription fails and panics", func(done Done) {
streamer.SetSubscribeError(fakes.FakeError)
go func() {
failedSub := func() {
statediffFetcher.FetchStorageDiffs(storagediffChan, errorChan)
}
Expect(failedSub).To(Panic())
}()
Expect(<-errorChan).To(MatchError(fakes.FakeError))
close(done)
})
It("streams StatediffPayloads from a Geth RPC subscription", func(done Done) {
streamer.SetPayloads([]statediff.Payload{test_data.MockStatediffPayload})
go statediffFetcher.FetchStorageDiffs(storagediffChan, errorChan)
streamedPayload := <-statediffFetcher.StatediffPayloadChan
Expect(streamedPayload).To(Equal(test_data.MockStatediffPayload))
Expect(streamer.PassedPayloadChan).To(Equal(statediffFetcher.StatediffPayloadChan))
close(done)
})
It("adds errors to error channel if decoding the state diff RLP fails", func(done Done) {
badStatediffPayload := statediff.Payload{}
streamer.SetPayloads([]statediff.Payload{badStatediffPayload})
go statediffFetcher.FetchStorageDiffs(storagediffChan, errorChan)
Expect(<-errorChan).To(MatchError("EOF"))
close(done)
})
It("adds parsed statediff payloads to the rows channel", func(done Done) {
streamer.SetPayloads([]statediff.Payload{test_data.MockStatediffPayload})
go statediffFetcher.FetchStorageDiffs(storagediffChan, errorChan)
height := test_data.BlockNumber
intHeight := int(height.Int64())
createdExpectedStorageDiff := utils.StorageDiffInput{
HashedAddress: common.BytesToHash(test_data.ContractLeafKey[:]),
BlockHash: common.HexToHash("0xfa40fbe2d98d98b3363a778d52f2bcd29d6790b9b3f3cab2b167fd12d3550f73"),
BlockHeight: intHeight,
StorageKey: common.BytesToHash(test_data.StorageKey),
StorageValue: common.BytesToHash(test_data.SmallStorageValue),
}
updatedExpectedStorageDiff := utils.StorageDiffInput{
HashedAddress: common.BytesToHash(test_data.AnotherContractLeafKey[:]),
BlockHash: common.HexToHash("0xfa40fbe2d98d98b3363a778d52f2bcd29d6790b9b3f3cab2b167fd12d3550f73"),
BlockHeight: intHeight,
StorageKey: common.BytesToHash(test_data.StorageKey),
StorageValue: common.BytesToHash(test_data.LargeStorageValue),
}
deletedExpectedStorageDiff := utils.StorageDiffInput{
HashedAddress: common.BytesToHash(test_data.AnotherContractLeafKey[:]),
BlockHash: common.HexToHash("0xfa40fbe2d98d98b3363a778d52f2bcd29d6790b9b3f3cab2b167fd12d3550f73"),
BlockHeight: intHeight,
StorageKey: common.BytesToHash(test_data.StorageKey),
StorageValue: common.BytesToHash(test_data.SmallStorageValue),
}
createdStateDiff := <-storagediffChan
updatedStateDiff := <-storagediffChan
deletedStateDiff := <-storagediffChan
Expect(createdStateDiff).To(Equal(createdExpectedStorageDiff))
Expect(updatedStateDiff).To(Equal(updatedExpectedStorageDiff))
Expect(deletedStateDiff).To(Equal(deletedExpectedStorageDiff))
close(done)
})
It("adds errors to error channel if formatting the diff as a StateDiff object fails", func(done Done) {
accountDiffs := test_data.CreatedAccountDiffs
accountDiffs[0].StorageNodes = []statediff.StorageNode{test_data.StorageWithBadValue}
stateDiff := statediff.StateObject{
BlockNumber: test_data.BlockNumber,
BlockHash: common.HexToHash(test_data.BlockHash),
Nodes: accountDiffs,
}
stateDiffRlp, err := rlp.EncodeToBytes(stateDiff)
Expect(err).NotTo(HaveOccurred())
badStatediffPayload := statediff.Payload{
StateObjectRlp: stateDiffRlp,
}
streamer.SetPayloads([]statediff.Payload{badStatediffPayload})
go statediffFetcher.FetchStorageDiffs(storagediffChan, errorChan)
Expect(<-errorChan).To(MatchError("rlp: input contains more than one value"))
close(done)
})
})

View File

@ -1,59 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package fetcher
import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
)
type ILogFetcher interface {
FetchLogs(contractAddresses []common.Address, topics []common.Hash, missingHeader core.Header) ([]types.Log, error)
// TODO Extend FetchLogs for doing several blocks at a time
}
type LogFetcher struct {
blockChain core.BlockChain
}
func NewLogFetcher(blockchain core.BlockChain) *LogFetcher {
return &LogFetcher{
blockChain: blockchain,
}
}
// Checks all topic0s, on all addresses, fetching matching logs for the given header
func (logFetcher LogFetcher) FetchLogs(addresses []common.Address, topic0s []common.Hash, header core.Header) ([]types.Log, error) {
blockHash := common.HexToHash(header.Hash)
query := ethereum.FilterQuery{
BlockHash: &blockHash,
Addresses: addresses,
// Search for _any_ of the topics in topic0 position; see docs on `FilterQuery`
Topics: [][]common.Hash{topic0s},
}
logs, err := logFetcher.blockChain.GetEthLogsWithCustomQuery(query)
if err != nil {
// TODO review aggregate fetching error handling
return []types.Log{}, err
}
return logs, nil
}

View File

@ -1,70 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package fetcher_test
import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
)
var _ = Describe("LogFetcher", func() {
Describe("FetchLogs", func() {
It("fetches logs based on the given query", func() {
blockChain := fakes.NewMockBlockChain()
logFetcher := fetcher.NewLogFetcher(blockChain)
header := fakes.FakeHeader
addresses := []common.Address{
common.HexToAddress("0xfakeAddress"),
common.HexToAddress("0xanotherFakeAddress"),
}
topicZeros := []common.Hash{common.BytesToHash([]byte{1, 2, 3, 4, 5})}
_, err := logFetcher.FetchLogs(addresses, topicZeros, header)
address1 := common.HexToAddress("0xfakeAddress")
address2 := common.HexToAddress("0xanotherFakeAddress")
Expect(err).NotTo(HaveOccurred())
blockHash := common.HexToHash(header.Hash)
expectedQuery := ethereum.FilterQuery{
BlockHash: &blockHash,
Addresses: []common.Address{address1, address2},
Topics: [][]common.Hash{topicZeros},
}
blockChain.AssertGetEthLogsWithCustomQueryCalledWith(expectedQuery)
})
It("returns an error if fetching the logs fails", func() {
blockChain := fakes.NewMockBlockChain()
blockChain.SetGetEthLogsWithCustomQueryErr(fakes.FakeError)
logFetcher := fetcher.NewLogFetcher(blockChain)
_, err := logFetcher.FetchLogs([]common.Address{}, []common.Hash{}, core.Header{})
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
})
})

View File

@ -1,79 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package fetcher
import (
"fmt"
"github.com/ethereum/go-ethereum/statediff"
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
)
// StateDiffFetcher is the state diff fetching interface
type StateDiffFetcher interface {
FetchStateDiffsAt(blockHeights []uint64) ([]statediff.Payload, error)
}
// BatchClient is an interface to a batch-fetching geth rpc client; created to allow mock insertion
type BatchClient interface {
BatchCall(batch []client.BatchElem) error
}
// stateDiffFetcher is the state diff fetching struct
type stateDiffFetcher struct {
// stateDiffFetcher is thread-safe as long as the underlying client is thread-safe, since it has/modifies no other state
// http.Client is thread-safe
client BatchClient
}
const method = "statediff_stateDiffAt"
// NewStateDiffFetcher returns a IStateDiffFetcher
func NewStateDiffFetcher(bc BatchClient) StateDiffFetcher {
return &stateDiffFetcher{
client: bc,
}
}
// FetchStateDiffsAt fetches the statediff payloads at the given block heights
// Calls StateDiffAt(ctx context.Context, blockNumber uint64) (*Payload, error)
func (fetcher *stateDiffFetcher) FetchStateDiffsAt(blockHeights []uint64) ([]statediff.Payload, error) {
batch := make([]client.BatchElem, 0)
for _, height := range blockHeights {
batch = append(batch, client.BatchElem{
Method: method,
Args: []interface{}{height},
Result: new(statediff.Payload),
})
}
batchErr := fetcher.client.BatchCall(batch)
if batchErr != nil {
return nil, fmt.Errorf("stateDiffFetcher err: %s", batchErr.Error())
}
results := make([]statediff.Payload, 0, len(blockHeights))
for _, batchElem := range batch {
if batchElem.Error != nil {
return nil, fmt.Errorf("stateDiffFetcher err: %s", batchElem.Error.Error())
}
payload, ok := batchElem.Result.(*statediff.Payload)
if ok {
results = append(results, *payload)
}
}
return results, nil
}

View File

@ -1,54 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package fetcher_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
)
var _ = Describe("StateDiffFetcher", func() {
Describe("FetchStateDiffsAt", func() {
var (
mc *mocks.BackFillerClient
stateDiffFetcher fetcher.StateDiffFetcher
)
BeforeEach(func() {
mc = new(mocks.BackFillerClient)
setDiffAtErr1 := mc.SetReturnDiffAt(test_data.BlockNumber.Uint64(), test_data.MockStatediffPayload)
Expect(setDiffAtErr1).ToNot(HaveOccurred())
setDiffAtErr2 := mc.SetReturnDiffAt(test_data.BlockNumber2.Uint64(), test_data.MockStatediffPayload2)
Expect(setDiffAtErr2).ToNot(HaveOccurred())
stateDiffFetcher = fetcher.NewStateDiffFetcher(mc)
})
It("Batch calls statediff_stateDiffAt", func() {
blockHeights := []uint64{
test_data.BlockNumber.Uint64(),
test_data.BlockNumber2.Uint64(),
}
stateDiffPayloads, fetchErr := stateDiffFetcher.FetchStateDiffsAt(blockHeights)
Expect(fetchErr).ToNot(HaveOccurred())
Expect(len(stateDiffPayloads)).To(Equal(2))
Expect(stateDiffPayloads[0]).To(Equal(test_data.MockStatediffPayload))
Expect(stateDiffPayloads[1]).To(Equal(test_data.MockStatediffPayload2))
})
})
})

View File

@ -1,21 +0,0 @@
// Copyright 2018 Vulcanize
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fetcher
import "github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
type IStorageFetcher interface {
FetchStorageDiffs(out chan<- utils.StorageDiffInput, errs chan<- error)
}

View File

@ -1,86 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package logs
import (
"errors"
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore"
)
var (
ErrNoLogs = errors.New("no logs available for transforming")
ErrNoTransformers = errors.New("no event transformers configured in the log delegator")
)
type ILogDelegator interface {
AddTransformer(t transformer.EventTransformer)
DelegateLogs() error
}
type LogDelegator struct {
Chunker chunker.Chunker
LogRepository datastore.HeaderSyncLogRepository
Transformers []transformer.EventTransformer
}
func (delegator *LogDelegator) AddTransformer(t transformer.EventTransformer) {
delegator.Transformers = append(delegator.Transformers, t)
delegator.Chunker.AddConfig(t.GetConfig())
}
func (delegator *LogDelegator) DelegateLogs() error {
if len(delegator.Transformers) < 1 {
return ErrNoTransformers
}
persistedLogs, fetchErr := delegator.LogRepository.GetUntransformedHeaderSyncLogs()
if fetchErr != nil {
logrus.Errorf("error loading logs from db: %s", fetchErr.Error())
return fetchErr
}
if len(persistedLogs) < 1 {
return ErrNoLogs
}
transformErr := delegator.delegateLogs(persistedLogs)
if transformErr != nil {
logrus.Errorf("error transforming logs: %s", transformErr)
return transformErr
}
return nil
}
func (delegator *LogDelegator) delegateLogs(logs []core.HeaderSyncLog) error {
chunkedLogs := delegator.Chunker.ChunkLogs(logs)
for _, t := range delegator.Transformers {
transformerName := t.GetConfig().TransformerName
logChunk := chunkedLogs[transformerName]
err := t.Execute(logChunk)
if err != nil {
logrus.Errorf("%v transformer failed to execute in watcher: %v", transformerName, err)
return err
}
}
return nil
}

View File

@ -1,166 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package logs_test
import (
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
"github.com/vulcanize/vulcanizedb/libraries/shared/logs"
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
)
var _ = Describe("Log delegator", func() {
Describe("AddTransformer", func() {
It("adds transformers to the delegator", func() {
fakeTransformer := &mocks.MockEventTransformer{}
delegator := logs.LogDelegator{Chunker: chunker.NewLogChunker()}
delegator.AddTransformer(fakeTransformer)
Expect(delegator.Transformers).To(Equal([]transformer.EventTransformer{fakeTransformer}))
})
It("passes transformers' configs to the chunker", func() {
fakeTransformer := &mocks.MockEventTransformer{}
fakeConfig := mocks.FakeTransformerConfig
fakeTransformer.SetTransformerConfig(fakeConfig)
chunker := chunker.NewLogChunker()
delegator := logs.LogDelegator{Chunker: chunker}
delegator.AddTransformer(fakeTransformer)
expectedName := fakeConfig.TransformerName
expectedTopic := common.HexToHash(fakeConfig.Topic)
Expect(chunker.NameToTopic0).To(Equal(map[string]common.Hash{expectedName: expectedTopic}))
expectedAddress := strings.ToLower(fakeConfig.ContractAddresses[0])
Expect(chunker.AddressToNames).To(Equal(map[string][]string{expectedAddress: {expectedName}}))
})
})
Describe("DelegateLogs", func() {
It("returns error if no transformers configured", func() {
delegator := newDelegator(&fakes.MockHeaderSyncLogRepository{})
err := delegator.DelegateLogs()
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(logs.ErrNoTransformers))
})
It("gets untransformed logs", func() {
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
mockLogRepository.ReturnLogs = []core.HeaderSyncLog{{}}
delegator := newDelegator(mockLogRepository)
delegator.AddTransformer(&mocks.MockEventTransformer{})
err := delegator.DelegateLogs()
Expect(err).NotTo(HaveOccurred())
Expect(mockLogRepository.GetCalled).To(BeTrue())
})
It("returns error if getting untransformed logs fails", func() {
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
mockLogRepository.GetError = fakes.FakeError
delegator := newDelegator(mockLogRepository)
delegator.AddTransformer(&mocks.MockEventTransformer{})
err := delegator.DelegateLogs()
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
It("returns error that no logs were found if no logs returned", func() {
delegator := newDelegator(&fakes.MockHeaderSyncLogRepository{})
delegator.AddTransformer(&mocks.MockEventTransformer{})
err := delegator.DelegateLogs()
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(logs.ErrNoLogs))
})
It("delegates chunked logs to transformers", func() {
fakeTransformer := &mocks.MockEventTransformer{}
config := mocks.FakeTransformerConfig
fakeTransformer.SetTransformerConfig(config)
fakeGethLog := types.Log{
Address: common.HexToAddress(config.ContractAddresses[0]),
Topics: []common.Hash{common.HexToHash(config.Topic)},
}
fakeHeaderSyncLogs := []core.HeaderSyncLog{{Log: fakeGethLog}}
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
mockLogRepository.ReturnLogs = fakeHeaderSyncLogs
delegator := newDelegator(mockLogRepository)
delegator.AddTransformer(fakeTransformer)
err := delegator.DelegateLogs()
Expect(err).NotTo(HaveOccurred())
Expect(fakeTransformer.ExecuteWasCalled).To(BeTrue())
Expect(fakeTransformer.PassedLogs).To(Equal(fakeHeaderSyncLogs))
})
It("returns error if transformer returns an error", func() {
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
mockLogRepository.ReturnLogs = []core.HeaderSyncLog{{}}
delegator := newDelegator(mockLogRepository)
fakeTransformer := &mocks.MockEventTransformer{ExecuteError: fakes.FakeError}
delegator.AddTransformer(fakeTransformer)
err := delegator.DelegateLogs()
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
It("returns nil for error when logs returned and delegated", func() {
fakeTransformer := &mocks.MockEventTransformer{}
config := mocks.FakeTransformerConfig
fakeTransformer.SetTransformerConfig(config)
fakeGethLog := types.Log{
Address: common.HexToAddress(config.ContractAddresses[0]),
Topics: []common.Hash{common.HexToHash(config.Topic)},
}
fakeHeaderSyncLogs := []core.HeaderSyncLog{{Log: fakeGethLog}}
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
mockLogRepository.ReturnLogs = fakeHeaderSyncLogs
delegator := newDelegator(mockLogRepository)
delegator.AddTransformer(fakeTransformer)
err := delegator.DelegateLogs()
Expect(err).NotTo(HaveOccurred())
})
})
})
func newDelegator(headerSyncLogRepository *fakes.MockHeaderSyncLogRepository) *logs.LogDelegator {
return &logs.LogDelegator{
Chunker: chunker.NewLogChunker(),
LogRepository: headerSyncLogRepository,
}
}

View File

@ -1,154 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package logs
import (
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
"github.com/vulcanize/vulcanizedb/libraries/shared/transactions"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore"
)
var (
ErrNoUncheckedHeaders = errors.New("no unchecked headers available for log fetching")
ErrNoWatchedAddresses = errors.New("no watched addresses configured in the log extractor")
)
type ILogExtractor interface {
AddTransformerConfig(config transformer.EventTransformerConfig) error
ExtractLogs(recheckHeaders constants.TransformerExecution) error
}
type LogExtractor struct {
Addresses []common.Address
CheckedHeadersRepository datastore.CheckedHeadersRepository
CheckedLogsRepository datastore.CheckedLogsRepository
Fetcher fetcher.ILogFetcher
LogRepository datastore.HeaderSyncLogRepository
StartingBlock *int64
Syncer transactions.ITransactionsSyncer
Topics []common.Hash
}
// Add additional logs to extract
func (extractor *LogExtractor) AddTransformerConfig(config transformer.EventTransformerConfig) error {
checkedHeadersErr := extractor.updateCheckedHeaders(config)
if checkedHeadersErr != nil {
return checkedHeadersErr
}
if extractor.StartingBlock == nil {
extractor.StartingBlock = &config.StartingBlockNumber
} else if earlierStartingBlockNumber(config.StartingBlockNumber, *extractor.StartingBlock) {
extractor.StartingBlock = &config.StartingBlockNumber
}
addresses := transformer.HexStringsToAddresses(config.ContractAddresses)
extractor.Addresses = append(extractor.Addresses, addresses...)
extractor.Topics = append(extractor.Topics, common.HexToHash(config.Topic))
return nil
}
// Fetch and persist watched logs
func (extractor LogExtractor) ExtractLogs(recheckHeaders constants.TransformerExecution) error {
if len(extractor.Addresses) < 1 {
logrus.Errorf("error extracting logs: %s", ErrNoWatchedAddresses.Error())
return ErrNoWatchedAddresses
}
uncheckedHeaders, uncheckedHeadersErr := extractor.CheckedHeadersRepository.UncheckedHeaders(*extractor.StartingBlock, -1, getCheckCount(recheckHeaders))
if uncheckedHeadersErr != nil {
logrus.Errorf("error fetching missing headers: %s", uncheckedHeadersErr)
return uncheckedHeadersErr
}
if len(uncheckedHeaders) < 1 {
return ErrNoUncheckedHeaders
}
for _, header := range uncheckedHeaders {
logs, fetchLogsErr := extractor.Fetcher.FetchLogs(extractor.Addresses, extractor.Topics, header)
if fetchLogsErr != nil {
logError("error fetching logs for header: %s", fetchLogsErr, header)
return fetchLogsErr
}
if len(logs) > 0 {
transactionsSyncErr := extractor.Syncer.SyncTransactions(header.ID, logs)
if transactionsSyncErr != nil {
logError("error syncing transactions: %s", transactionsSyncErr, header)
return transactionsSyncErr
}
createLogsErr := extractor.LogRepository.CreateHeaderSyncLogs(header.ID, logs)
if createLogsErr != nil {
logError("error persisting logs: %s", createLogsErr, header)
return createLogsErr
}
}
markHeaderCheckedErr := extractor.CheckedHeadersRepository.MarkHeaderChecked(header.ID)
if markHeaderCheckedErr != nil {
logError("error marking header checked: %s", markHeaderCheckedErr, header)
return markHeaderCheckedErr
}
}
return nil
}
func earlierStartingBlockNumber(transformerBlock, watcherBlock int64) bool {
return transformerBlock < watcherBlock
}
func logError(description string, err error, header core.Header) {
logrus.WithFields(logrus.Fields{
"headerId": header.ID,
"headerHash": header.Hash,
"blockNumber": header.BlockNumber,
}).Errorf(description, err.Error())
}
func getCheckCount(recheckHeaders constants.TransformerExecution) int64 {
if recheckHeaders == constants.HeaderUnchecked {
return 1
}
return constants.RecheckHeaderCap
}
func (extractor *LogExtractor) updateCheckedHeaders(config transformer.EventTransformerConfig) error {
alreadyWatchingLog, watchingLogErr := extractor.CheckedLogsRepository.AlreadyWatchingLog(config.ContractAddresses, config.Topic)
if watchingLogErr != nil {
return watchingLogErr
}
if !alreadyWatchingLog {
uncheckHeadersErr := extractor.CheckedHeadersRepository.MarkHeadersUnchecked(config.StartingBlockNumber)
if uncheckHeadersErr != nil {
return uncheckHeadersErr
}
markLogWatchedErr := extractor.CheckedLogsRepository.MarkLogWatched(config.ContractAddresses, config.Topic)
if markLogWatchedErr != nil {
return markLogWatchedErr
}
}
return nil
}

View File

@ -1,415 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package logs_test
import (
"math/rand"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
"github.com/vulcanize/vulcanizedb/libraries/shared/logs"
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
)
var _ = Describe("Log extractor", func() {
var (
checkedHeadersRepository *fakes.MockCheckedHeadersRepository
checkedLogsRepository *fakes.MockCheckedLogsRepository
extractor *logs.LogExtractor
)
BeforeEach(func() {
checkedHeadersRepository = &fakes.MockCheckedHeadersRepository{}
checkedLogsRepository = &fakes.MockCheckedLogsRepository{}
extractor = &logs.LogExtractor{
CheckedHeadersRepository: checkedHeadersRepository,
CheckedLogsRepository: checkedLogsRepository,
Fetcher: &mocks.MockLogFetcher{},
LogRepository: &fakes.MockHeaderSyncLogRepository{},
Syncer: &fakes.MockTransactionSyncer{},
}
})
Describe("AddTransformerConfig", func() {
It("updates extractor's starting block number to earliest available", func() {
earlierStartingBlockNumber := rand.Int63()
laterStartingBlockNumber := earlierStartingBlockNumber + 1
errOne := extractor.AddTransformerConfig(getTransformerConfig(laterStartingBlockNumber))
Expect(errOne).NotTo(HaveOccurred())
errTwo := extractor.AddTransformerConfig(getTransformerConfig(earlierStartingBlockNumber))
Expect(errTwo).NotTo(HaveOccurred())
Expect(*extractor.StartingBlock).To(Equal(earlierStartingBlockNumber))
})
It("adds transformer's addresses to extractor's watched addresses", func() {
addresses := []string{"0xA", "0xB"}
configWithAddresses := transformer.EventTransformerConfig{
ContractAddresses: addresses,
StartingBlockNumber: rand.Int63(),
}
err := extractor.AddTransformerConfig(configWithAddresses)
Expect(err).NotTo(HaveOccurred())
expectedAddresses := transformer.HexStringsToAddresses(addresses)
Expect(extractor.Addresses).To(Equal(expectedAddresses))
})
It("adds transformer's topic to extractor's watched topics", func() {
topic := "0x1"
configWithTopic := transformer.EventTransformerConfig{
ContractAddresses: []string{fakes.FakeAddress.Hex()},
Topic: topic,
StartingBlockNumber: rand.Int63(),
}
err := extractor.AddTransformerConfig(configWithTopic)
Expect(err).NotTo(HaveOccurred())
Expect(extractor.Topics).To(Equal([]common.Hash{common.HexToHash(topic)}))
})
It("returns error if checking whether log has been checked returns error", func() {
checkedLogsRepository.AlreadyWatchingLogError = fakes.FakeError
err := extractor.AddTransformerConfig(getTransformerConfig(rand.Int63()))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
Describe("when log has previously been checked", func() {
It("does not mark any headers unchecked", func() {
checkedLogsRepository.AlreadyWatchingLogReturn = true
err := extractor.AddTransformerConfig(getTransformerConfig(rand.Int63()))
Expect(err).NotTo(HaveOccurred())
Expect(checkedHeadersRepository.MarkHeadersUncheckedCalled).To(BeFalse())
})
})
Describe("when log has not previously been checked", func() {
BeforeEach(func() {
checkedLogsRepository.AlreadyWatchingLogReturn = false
})
It("marks headers since transformer's starting block number as unchecked", func() {
blockNumber := rand.Int63()
err := extractor.AddTransformerConfig(getTransformerConfig(blockNumber))
Expect(err).NotTo(HaveOccurred())
Expect(checkedHeadersRepository.MarkHeadersUncheckedCalled).To(BeTrue())
Expect(checkedHeadersRepository.MarkHeadersUncheckedStartingBlockNumber).To(Equal(blockNumber))
})
It("returns error if marking headers unchecked returns error", func() {
checkedHeadersRepository.MarkHeadersUncheckedReturnError = fakes.FakeError
err := extractor.AddTransformerConfig(getTransformerConfig(rand.Int63()))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
It("persists that tranformer's log has been checked", func() {
config := getTransformerConfig(rand.Int63())
err := extractor.AddTransformerConfig(config)
Expect(err).NotTo(HaveOccurred())
Expect(checkedLogsRepository.MarkLogWatchedAddresses).To(Equal(config.ContractAddresses))
Expect(checkedLogsRepository.MarkLogWatchedTopicZero).To(Equal(config.Topic))
})
It("returns error if marking logs checked returns error", func() {
checkedLogsRepository.MarkLogWatchedError = fakes.FakeError
err := extractor.AddTransformerConfig(getTransformerConfig(rand.Int63()))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
})
})
Describe("ExtractLogs", func() {
It("returns error if no watched addresses configured", func() {
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(logs.ErrNoWatchedAddresses))
})
Describe("when checking unchecked headers", func() {
It("gets headers since configured starting block with check_count < 1", func() {
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
mockCheckedHeadersRepository.UncheckedHeadersReturnHeaders = []core.Header{{}}
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
startingBlockNumber := rand.Int63()
extractor.AddTransformerConfig(getTransformerConfig(startingBlockNumber))
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).NotTo(HaveOccurred())
Expect(mockCheckedHeadersRepository.UncheckedHeadersStartingBlockNumber).To(Equal(startingBlockNumber))
Expect(mockCheckedHeadersRepository.UncheckedHeadersEndingBlockNumber).To(Equal(int64(-1)))
Expect(mockCheckedHeadersRepository.UncheckedHeadersCheckCount).To(Equal(int64(1)))
})
})
Describe("when rechecking headers", func() {
It("gets headers since configured starting block with check_count < RecheckHeaderCap", func() {
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
mockCheckedHeadersRepository.UncheckedHeadersReturnHeaders = []core.Header{{}}
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
startingBlockNumber := rand.Int63()
extractor.AddTransformerConfig(getTransformerConfig(startingBlockNumber))
err := extractor.ExtractLogs(constants.HeaderRecheck)
Expect(err).NotTo(HaveOccurred())
Expect(mockCheckedHeadersRepository.UncheckedHeadersStartingBlockNumber).To(Equal(startingBlockNumber))
Expect(mockCheckedHeadersRepository.UncheckedHeadersEndingBlockNumber).To(Equal(int64(-1)))
Expect(mockCheckedHeadersRepository.UncheckedHeadersCheckCount).To(Equal(constants.RecheckHeaderCap))
})
})
It("returns error if getting unchecked headers fails", func() {
addTransformerConfig(extractor)
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
mockCheckedHeadersRepository.UncheckedHeadersReturnError = fakes.FakeError
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
Describe("when no unchecked headers", func() {
It("does not fetch logs", func() {
addTransformerConfig(extractor)
mockLogFetcher := &mocks.MockLogFetcher{}
extractor.Fetcher = mockLogFetcher
_ = extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(mockLogFetcher.FetchCalled).To(BeFalse())
})
It("returns error that no unchecked headers were found", func() {
addTransformerConfig(extractor)
mockLogFetcher := &mocks.MockLogFetcher{}
extractor.Fetcher = mockLogFetcher
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).To(MatchError(logs.ErrNoUncheckedHeaders))
})
})
Describe("when there are unchecked headers", func() {
It("fetches logs for unchecked headers", func() {
addUncheckedHeader(extractor)
config := transformer.EventTransformerConfig{
ContractAddresses: []string{fakes.FakeAddress.Hex()},
Topic: fakes.FakeHash.Hex(),
StartingBlockNumber: rand.Int63(),
}
addTransformerErr := extractor.AddTransformerConfig(config)
Expect(addTransformerErr).NotTo(HaveOccurred())
mockLogFetcher := &mocks.MockLogFetcher{}
extractor.Fetcher = mockLogFetcher
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).NotTo(HaveOccurred())
Expect(mockLogFetcher.FetchCalled).To(BeTrue())
expectedTopics := []common.Hash{common.HexToHash(config.Topic)}
Expect(mockLogFetcher.Topics).To(Equal(expectedTopics))
expectedAddresses := transformer.HexStringsToAddresses(config.ContractAddresses)
Expect(mockLogFetcher.ContractAddresses).To(Equal(expectedAddresses))
})
It("returns error if fetching logs fails", func() {
addUncheckedHeader(extractor)
addTransformerConfig(extractor)
mockLogFetcher := &mocks.MockLogFetcher{}
mockLogFetcher.ReturnError = fakes.FakeError
extractor.Fetcher = mockLogFetcher
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
Describe("when no fetched logs", func() {
It("does not sync transactions", func() {
addUncheckedHeader(extractor)
addTransformerConfig(extractor)
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
extractor.Syncer = mockTransactionSyncer
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).NotTo(HaveOccurred())
Expect(mockTransactionSyncer.SyncTransactionsCalled).To(BeFalse())
})
})
Describe("when there are fetched logs", func() {
It("syncs transactions", func() {
addUncheckedHeader(extractor)
addFetchedLog(extractor)
addTransformerConfig(extractor)
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
extractor.Syncer = mockTransactionSyncer
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).NotTo(HaveOccurred())
Expect(mockTransactionSyncer.SyncTransactionsCalled).To(BeTrue())
})
It("returns error if syncing transactions fails", func() {
addUncheckedHeader(extractor)
addFetchedLog(extractor)
addTransformerConfig(extractor)
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
mockTransactionSyncer.SyncTransactionsError = fakes.FakeError
extractor.Syncer = mockTransactionSyncer
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
It("persists fetched logs", func() {
addUncheckedHeader(extractor)
addTransformerConfig(extractor)
fakeLogs := []types.Log{{
Address: common.HexToAddress("0xA"),
Topics: []common.Hash{common.HexToHash("0xA")},
Data: []byte{},
Index: 0,
}}
mockLogFetcher := &mocks.MockLogFetcher{ReturnLogs: fakeLogs}
extractor.Fetcher = mockLogFetcher
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
extractor.LogRepository = mockLogRepository
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).NotTo(HaveOccurred())
Expect(mockLogRepository.PassedLogs).To(Equal(fakeLogs))
})
It("returns error if persisting logs fails", func() {
addUncheckedHeader(extractor)
addFetchedLog(extractor)
addTransformerConfig(extractor)
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
mockLogRepository.CreateError = fakes.FakeError
extractor.LogRepository = mockLogRepository
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
})
It("marks header checked", func() {
addFetchedLog(extractor)
addTransformerConfig(extractor)
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
headerID := rand.Int63()
mockCheckedHeadersRepository.UncheckedHeadersReturnHeaders = []core.Header{{ID: headerID}}
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).NotTo(HaveOccurred())
Expect(mockCheckedHeadersRepository.MarkHeaderCheckedHeaderID).To(Equal(headerID))
})
It("returns error if marking header checked fails", func() {
addFetchedLog(extractor)
addTransformerConfig(extractor)
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
mockCheckedHeadersRepository.UncheckedHeadersReturnHeaders = []core.Header{{ID: rand.Int63()}}
mockCheckedHeadersRepository.MarkHeaderCheckedReturnError = fakes.FakeError
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
It("returns nil for error if everything succeeds", func() {
addUncheckedHeader(extractor)
addTransformerConfig(extractor)
err := extractor.ExtractLogs(constants.HeaderUnchecked)
Expect(err).NotTo(HaveOccurred())
})
})
})
})
func addTransformerConfig(extractor *logs.LogExtractor) {
fakeConfig := transformer.EventTransformerConfig{
ContractAddresses: []string{fakes.FakeAddress.Hex()},
Topic: fakes.FakeHash.Hex(),
StartingBlockNumber: rand.Int63(),
}
extractor.AddTransformerConfig(fakeConfig)
}
func addUncheckedHeader(extractor *logs.LogExtractor) {
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
mockCheckedHeadersRepository.UncheckedHeadersReturnHeaders = []core.Header{{}}
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
}
func addFetchedLog(extractor *logs.LogExtractor) {
mockLogFetcher := &mocks.MockLogFetcher{}
mockLogFetcher.ReturnLogs = []types.Log{{}}
extractor.Fetcher = mockLogFetcher
}
func getTransformerConfig(startingBlockNumber int64) transformer.EventTransformerConfig {
return transformer.EventTransformerConfig{
ContractAddresses: []string{fakes.FakeAddress.Hex()},
Topic: fakes.FakeHash.Hex(),
StartingBlockNumber: startingBlockNumber,
}
}

Some files were not shown because too many files have changed in this diff Show More