Add tests for plugin helpers

- Prevents code analysis from showing functions as unused
- Also address other code analysis issues
This commit is contained in:
Rob Mulholand 2019-02-28 08:59:04 -06:00
parent a806cb2818
commit b6bef49318
17 changed files with 390 additions and 154 deletions

104
README.md
View File

@ -79,7 +79,7 @@ Lastly, ensure that `GOPATH` is defined in your shell. If necessary, `GOPATH` ca
- The default location is: - The default location is:
- Mac: `<full home path>/Library/Application\ Support/io.parity.ethereum/` - Mac: `<full home path>/Library/Application\ Support/io.parity.ethereum/`
- Linux: `<full home path>/local/share/io.parity.ethereum/` - Linux: `<full home path>/local/share/io.parity.ethereum/`
- `levelDbPath` should match Geth's chaindata directory path. - `levelDbPath` should match Geth's chaindata directory path.
- The geth LevelDB chaindata path is printed to the console when you start geth. - The geth LevelDB chaindata path is printed to the console when you start geth.
- The default location is: - The default location is:
@ -112,8 +112,8 @@ Syncs VulcanizeDB with the configured Ethereum node, populating only block heade
This command is useful when you want a minimal baseline from which to track targeted data on the blockchain (e.g. individual smart contract storage values). This command is useful when you want a minimal baseline from which to track targeted data on the blockchain (e.g. individual smart contract storage values).
1. Start Ethereum node 1. Start Ethereum node
1. In a separate terminal start VulcanizeDB: 1. In a separate terminal start VulcanizeDB:
- `./vulcanizedb lightSync --config <config.toml> --starting-block-number <block-number>` - `./vulcanizedb lightSync --config <config.toml> --starting-block-number <block-number>`
## Start full environment in docker by single command ## Start full environment in docker by single command
### Geth Rinkeby ### Geth Rinkeby
@ -150,68 +150,68 @@ If you have full rinkeby chaindata you can move it to `rinkeby_vulcanizedb_geth_
## Contract Watchers ## Contract Watchers
Contract watchers work with a light or full sync vDB to fetch raw ethereum data and execute a set of transformations over them, persisting the output. Contract watchers work with a light or full sync vDB to fetch raw ethereum data and execute a set of transformations over them, persisting the output.
A watcher is composed of at least a fetcher and a transformer or set of transformers, where a fetcher is an interface for retrieving raw Ethereum data from some source (e.g. eth_jsonrpc, IPFS) A watcher is composed of at least a fetcher and a transformer or set of transformers, where a fetcher is an interface for retrieving raw Ethereum data from some source (e.g. eth_jsonrpc, IPFS)
and a transformer is an interface for filtering through that raw Ethereum data to extract, process, and persist data for specific contracts or accounts. and a transformer is an interface for filtering through that raw Ethereum data to extract, process, and persist data for specific contracts or accounts.
### omniWatcher ### omniWatcher
The `omniWatcher` command is a built-in generic contract watcher. It can watch any and all events for a given contract provided the contract's ABI is available. The `omniWatcher` command is a built-in generic contract watcher. It can watch any and all events for a given contract provided the contract's ABI is available.
It also provides some state variable coverage by automating polling of public methods, with some restrictions. It also provides some state variable coverage by automating polling of public methods, with some restrictions.
This command requires a pre-synced (full or light) vulcanizeDB (see above sections) and currently requires the contract ABI be available on etherscan or provided by the user. This command requires a pre-synced (full or light) vulcanizeDB (see above sections) and currently requires the contract ABI be available on etherscan or provided by the user.
To watch all events of a contract using a light synced vDB: To watch all events of a contract using a light synced vDB:
- Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address>` - Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address>`
Or if you are using a full synced vDB, change the mode to full: Or if you are using a full synced vDB, change the mode to full:
- Execute `./vulcanizedb omniWatcher --mode full --config <path to config.toml> --contract-address <contract address>` - Execute `./vulcanizedb omniWatcher --mode full --config <path to config.toml> --contract-address <contract address>`
To watch contracts on a network other than mainnet, use the network flag: To watch contracts on a network other than mainnet, use the network flag:
- Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --network <ropsten, kovan, or rinkeby>` - Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --network <ropsten, kovan, or rinkeby>`
To watch events starting at a certain block use the starting block flag: To watch events starting at a certain block use the starting block flag:
- Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --starting-block-number <#>` - Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --starting-block-number <#>`
To watch only specified events use the events flag: To watch only specified events use the events flag:
- Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --events <EventName1> --events <EventName2>` - Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --events <EventName1> --events <EventName2>`
To watch events and poll the specified methods with any addresses and hashes emitted by the watched events utilize the methods flag: To watch events and poll the specified methods with any addresses and hashes emitted by the watched events utilize the methods flag:
- Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --methods <methodName1> --methods <methodName2>` - Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --methods <methodName1> --methods <methodName2>`
To watch specified events and poll the specified method with any addresses and hashes emitted by the watched events: To watch specified events and poll the specified method with any addresses and hashes emitted by the watched events:
- Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --events <EventName1> --events <EventName2> --methods <methodName>` - Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --events <EventName1> --events <EventName2> --methods <methodName>`
To turn on method piping so that values returned from previous method calls are cached and used as arguments in subsequent method calls: To turn on method piping so that values returned from previous method calls are cached and used as arguments in subsequent method calls:
- Execute `./vulcanizedb omniWatcher --config <path to config.toml> --piping true --contract-address <contract address> --events <EventName1> --events <EventName2> --methods <methodName>` - Execute `./vulcanizedb omniWatcher --config <path to config.toml> --piping true --contract-address <contract address> --events <EventName1> --events <EventName2> --methods <methodName>`
To watch all types of events of the contract but only persist the ones that emit one of the filtered-for argument values: To watch all types of events of the contract but only persist the ones that emit one of the filtered-for argument values:
- Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --event-args <arg1> --event-args <arg2>` - Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --event-args <arg1> --event-args <arg2>`
To watch all events of the contract but only poll the specified method with specified argument values (if they are emitted from the watched events): To watch all events of the contract but only poll the specified method with specified argument values (if they are emitted from the watched events):
- Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --methods <methodName> --method-args <arg1> --method-args <arg2>` - Execute `./vulcanizedb omniWatcher --config <path to config.toml> --contract-address <contract address> --methods <methodName> --method-args <arg1> --method-args <arg2>`
#### omniWatcher output #### omniWatcher output
Transformed events and polled method results are committed to Postgres in schemas and tables generated according to the contract abi. Transformed events and polled method results are committed to Postgres in schemas and tables generated according to the contract abi.
Schemas are created for each contract using the naming convention `<sync-type>_<lowercase contract-address>` Schemas are created for each contract using the naming convention `<sync-type>_<lowercase contract-address>`
Under this schema, tables are generated for watched events as `<lowercase event name>_event` and for polled methods as `<lowercase method name>_method` Under this schema, tables are generated for watched events as `<lowercase event name>_event` and for polled methods as `<lowercase method name>_method`
The 'method' and 'event' identifiers are tacked onto the end of the table names to prevent collisions between methods and events of the same lowercase name The 'method' and 'event' identifiers are tacked onto the end of the table names to prevent collisions between methods and events of the same lowercase name
Example: Example:
Running `./vulcanizedb omniWatcher --config <path to config> --starting-block-number=5197514 --contract-address=0x8dd5fbce2f6a956c3022ba3663759011dd51e73e --events=Transfer --events=Mint --methods=balanceOf` Running `./vulcanizedb omniWatcher --config <path to config> --starting-block-number=5197514 --contract-address=0x8dd5fbce2f6a956c3022ba3663759011dd51e73e --events=Transfer --events=Mint --methods=balanceOf`
watches Transfer and Mint events of the TrueUSD contract and polls its balanceOf method using the addresses we find emitted from those events watches Transfer and Mint events of the TrueUSD contract and polls its balanceOf method using the addresses we find emitted from those events
It produces and populates a schema with three tables: It produces and populates a schema with three tables:
`light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.transfer_event` `light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.transfer_event`
`light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.mint_event` `light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.mint_event`
`light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.balanceof_method` `light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.balanceof_method`
Column ids and types for these tables are generated based on the event and method argument names and types and method return types, resulting in tables such as: Column ids and types for these tables are generated based on the event and method argument names and types and method return types, resulting in tables such as:
Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.transfer_event" Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.transfer_event"
| Column | Type | Collation | Nullable | Default | Storage | Stats target | Description | Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|:----------:|:---------------------:|:---------:|:--------:|:-------------------------------------------------------------------------------------------:|:--------:|:------------:|:-----------:| |:----------:|:---------------------:|:---------:|:--------:|:-------------------------------------------------------------------------------------------:|:--------:|:------------:|:-----------:|
@ -224,9 +224,9 @@ Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.transfer_event"
| from_ | character varying(66) | | not null | | extended | | | | from_ | character varying(66) | | not null | | extended | | |
| to_ | character varying(66) | | not null | | extended | | | | to_ | character varying(66) | | not null | | extended | | |
| value_ | numeric | | not null | | main | | | | value_ | numeric | | not null | | main | | |
Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.balanceof_method"
Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.balanceof_method"
| Column | Type | Collation | Nullable | Default | Storage | Stats target | Description | | Column | Type | Collation | Nullable | Default | Storage | Stats target | Description |
|:----------:|:---------------------:|:---------:|:--------:|:-------------------------------------------------------------------------------------------:|:--------:|:------------:|:-----------:| |:----------:|:---------------------:|:---------:|:--------:|:-------------------------------------------------------------------------------------------:|:--------:|:------------:|:-----------:|
@ -235,32 +235,32 @@ Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.balanceof_method"
| block | integer | | not null | | plain | | | | block | integer | | not null | | plain | | |
| who_ | character varying(66) | | not null | | extended | | | | who_ | character varying(66) | | not null | | extended | | |
| returned | numeric | | not null | | main | | | | returned | numeric | | not null | | main | | |
The addition of '_' after table names is to prevent collisions with reserved Postgres words The addition of '_' after table names is to prevent collisions with reserved Postgres words
### composeAndExecute ### composeAndExecute
The `composeAndExecute` command is used to compose and execute over an arbitrary set of custom transformers. The `composeAndExecute` command is used to compose and execute over an arbitrary set of custom transformers.
This is accomplished by generating a Go pluggin which allows our `vulcanizedb` binary to link to external transformers, so This is accomplished by generating a Go pluggin which allows our `vulcanizedb` binary to link to external transformers, so
long as they abide by our standard [interfaces](https://github.com/vulcanize/maker-vulcanizedb/tree/compose_and_execute/libraries/shared/transformer). long as they abide by our standard [interfaces](https://github.com/vulcanize/maker-vulcanizedb/tree/compose_and_execute/libraries/shared/transformer).
This command requires Go 1.11+ and [Go plugins](https://golang.org/pkg/plugin/) only work on Unix based systems. This command requires Go 1.11+ and [Go plugins](https://golang.org/pkg/plugin/) only work on Unix based systems.
#### Writing custom transformers #### Writing custom transformers
Storage Transformers Storage Transformers
* [Guide](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/factories/storage/README.md) * [Guide](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/factories/storage/README.md)
* [Example](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/factories/storage/EXAMPLE.md) * [Example](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/factories/storage/EXAMPLE.md)
Event Transformers Event Transformers
* Guide * Guide
* Example * Example
#### composeAndExecute configuration #### composeAndExecute configuration
A .toml config file is specified when executing the command: A .toml config file is specified when executing the command:
`./vulcanizedb composeAndExecute --config=./environments/config_name.toml` `./vulcanizedb composeAndExecute --config=./environments/config_name.toml`
The config provides information for composing a set of transformers: The config provides information for composing a set of transformers:
```toml ```toml
[database] [database]
name = "vulcanize_public" name = "vulcanize_public"
hostname = "localhost" hostname = "localhost"
@ -316,14 +316,14 @@ The config provides information for composing a set of transformers:
- `eth_event` indicates the transformer works with the [event watcher](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/watcher/event_watcher.go) - `eth_event` indicates the transformer works with the [event watcher](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/watcher/event_watcher.go)
that fetches event logs from an ETH node that fetches event logs from an ETH node
- `migrations` is the relative path from `repository` to the db migrations directory for the transformer - `migrations` is the relative path from `repository` to the db migrations directory for the transformer
- Note: If any of the imported transformers need additional config variables those need to be included as well - Note: If any of the imported transformers need additional config variables those need to be included as well
This information is used to write and build a Go plugin which exports the configured transformers. This information is used to write and build a Go plugin which exports the configured transformers.
These transformers are loaded onto their specified watchers and executed. These transformers are loaded onto their specified watchers and executed.
Transformers of different types can be run together in the same command using a single config file or in separate instances using different config files Transformers of different types can be run together in the same command using a single config file or in separate instances using different config files
The general structure of a plugin .go file, and what we would see built with the above config is shown below The general structure of a plugin .go file, and what we would see built with the above config is shown below
```go ```go
package main package main
@ -352,7 +352,7 @@ func (e exporter) Export() []interface1.TransformerInitializer, []interface1.Sto
``` ```
#### Preparing transformer(s) to work as pluggins for composeAndExecute #### Preparing transformer(s) to work as pluggins for composeAndExecute
To plug in an external transformer we need to: To plug in an external transformer we need to:
* Create a [package](https://github.com/vulcanize/mcd_transformers/blob/staging/transformers/bite/initializer/initializer.go) * Create a [package](https://github.com/vulcanize/mcd_transformers/blob/staging/transformers/bite/initializer/initializer.go)
that exports a variable `TransformerInitializer` or `StorageTransformerInitializer` that are of type [TransformerInitializer](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/transformer/event_transformer.go#L33) that exports a variable `TransformerInitializer` or `StorageTransformerInitializer` that are of type [TransformerInitializer](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/transformer/event_transformer.go#L33)
@ -361,7 +361,7 @@ or [StorageTransformerInitializer](https://github.com/vulcanize/maker-vulcanized
or [storage](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/watcher/storage_watcher.go#L53) watchers or [storage](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/watcher/storage_watcher.go#L53) watchers
* Create db migrations to run against vulcanizeDB so that we can store the transformer output * Create db migrations to run against vulcanizeDB so that we can store the transformer output
* Specify migration locations for each transformer in the config with the `exporter.transformer.migrations` fields * Specify migration locations for each transformer in the config with the `exporter.transformer.migrations` fields
* Do not `goose fix` the transformer migrations * Do not `goose fix` the transformer migrations
To update a plugin repository with changes to the core vulcanizedb repository, replace the vulcanizedb vendored in the plugin repo (`plugin_repo/vendor/github.com/vulcanize/vulcanizedb`) To update a plugin repository with changes to the core vulcanizedb repository, replace the vulcanizedb vendored in the plugin repo (`plugin_repo/vendor/github.com/vulcanize/vulcanizedb`)
with the newly updated version with the newly updated version

View File

@ -2,9 +2,14 @@ package integration
import ( import (
"fmt" "fmt"
"math/rand"
"strings"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/core" "github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres" "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories" "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
@ -12,9 +17,6 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/omni/shared/constants" "github.com/vulcanize/vulcanizedb/pkg/omni/shared/constants"
"github.com/vulcanize/vulcanizedb/pkg/omni/shared/helpers/test_helpers" "github.com/vulcanize/vulcanizedb/pkg/omni/shared/helpers/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/omni/shared/helpers/test_helpers/mocks" "github.com/vulcanize/vulcanizedb/pkg/omni/shared/helpers/test_helpers/mocks"
"math/rand"
"strings"
"time"
) )
var _ = Describe("Omni full transformer", func() { var _ = Describe("Omni full transformer", func() {

View File

@ -0,0 +1,8 @@
# Shared Tools
## Description
Code that is useful for or used by plugins writeen on top of VulcanizeDB.
## Note
Much code in this directory may not be used outside of the tests, but don't delete it - it could be used by a plugin.
Renaming and/or deleting functions in this namespace requires a version bump to avoid breaking plugins.

View File

@ -58,9 +58,9 @@ var _ = Describe("Log chunker", func() {
Describe("initialisation", func() { Describe("initialisation", func() {
It("creates lookup maps correctly", func() { It("creates lookup maps correctly", func() {
Expect(chunker.AddressToNames).To(Equal(map[string][]string{ Expect(chunker.AddressToNames).To(Equal(map[string][]string{
"0x00000000000000000000000000000000000000a1": []string{"TransformerA"}, "0x00000000000000000000000000000000000000a1": {"TransformerA"},
"0x00000000000000000000000000000000000000a2": []string{"TransformerA", "TransformerC"}, "0x00000000000000000000000000000000000000a2": {"TransformerA", "TransformerC"},
"0x00000000000000000000000000000000000000b1": []string{"TransformerB"}, "0x00000000000000000000000000000000000000b1": {"TransformerB"},
})) }))
Expect(chunker.NameToTopic0).To(Equal(map[string]common.Hash{ Expect(chunker.NameToTopic0).To(Equal(map[string]common.Hash{

View File

@ -18,5 +18,6 @@ package constants
var DataItemLength = 32 var DataItemLength = 32
// TODO Grab this from DB, since it can change through governance // TODO: Move to Plugin
// TODO: Grab this from DB, since it can change through governance
var TTL = int64(10800) // 60 * 60 * 3 == 10800 seconds == 3 hours var TTL = int64(10800) // 60 * 60 * 3 == 10800 seconds == 3 hours

View File

@ -156,6 +156,7 @@ func CreateNotCheckedSQL(boolColumns []string, recheckHeaders constants.Transfor
return result.String() return result.String()
} }
// TODO: Move to plugin
func GetTicInTx(headerID int64, tx *sql.Tx) (int64, error) { func GetTicInTx(headerID int64, tx *sql.Tx) (int64, error) {
var blockTimestamp int64 var blockTimestamp int64
err := tx.QueryRow(`SELECT block_timestamp FROM public.headers WHERE id = $1;`, headerID).Scan(&blockTimestamp) err := tx.QueryRow(`SELECT block_timestamp FROM public.headers WHERE id = $1;`, headerID).Scan(&blockTimestamp)

View File

@ -19,6 +19,7 @@ package repository_test
import ( import (
"fmt" "fmt"
"math/rand" "math/rand"
"strconv"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -34,7 +35,83 @@ import (
"github.com/vulcanize/vulcanizedb/test_config" "github.com/vulcanize/vulcanizedb/test_config"
) )
var _ = Describe("Repository utilities", func() { var _ = Describe("Repository", func() {
Describe("MarkHeaderChecked", func() {
var (
checkedHeadersColumn string
db *postgres.DB
)
BeforeEach(func() {
db = test_config.NewTestDB(test_config.NewTestNode())
test_config.CleanTestDB(db)
checkedHeadersColumn = "test_column_checked"
_, migrateErr := db.Exec(`ALTER TABLE public.checked_headers
ADD COLUMN ` + checkedHeadersColumn + ` integer`)
Expect(migrateErr).NotTo(HaveOccurred())
})
AfterEach(func() {
_, cleanupMigrateErr := db.Exec(`ALTER TABLE public.checked_headers DROP COLUMN ` + checkedHeadersColumn)
Expect(cleanupMigrateErr).NotTo(HaveOccurred())
})
It("marks passed column as checked for passed header", func() {
headerRepository := repositories.NewHeaderRepository(db)
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
Expect(headerErr).NotTo(HaveOccurred())
err := shared.MarkHeaderChecked(headerID, db, checkedHeadersColumn)
Expect(err).NotTo(HaveOccurred())
var checkedCount int
fetchErr := db.Get(&checkedCount, `SELECT `+checkedHeadersColumn+` FROM public.checked_headers LIMIT 1`)
Expect(fetchErr).NotTo(HaveOccurred())
Expect(checkedCount).To(Equal(1))
})
})
Describe("MarkHeaderCheckedInTransaction", func() {
var (
checkedHeadersColumn string
db *postgres.DB
)
BeforeEach(func() {
db = test_config.NewTestDB(test_config.NewTestNode())
test_config.CleanTestDB(db)
checkedHeadersColumn = "test_column_checked"
_, migrateErr := db.Exec(`ALTER TABLE public.checked_headers
ADD COLUMN ` + checkedHeadersColumn + ` integer`)
Expect(migrateErr).NotTo(HaveOccurred())
})
AfterEach(func() {
_, cleanupMigrateErr := db.Exec(`ALTER TABLE public.checked_headers DROP COLUMN ` + checkedHeadersColumn)
Expect(cleanupMigrateErr).NotTo(HaveOccurred())
})
It("marks passed column as checked for passed header within a passed transaction", func() {
headerRepository := repositories.NewHeaderRepository(db)
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
Expect(headerErr).NotTo(HaveOccurred())
tx, txErr := db.Begin()
Expect(txErr).NotTo(HaveOccurred())
err := shared.MarkHeaderCheckedInTransaction(headerID, tx, checkedHeadersColumn)
Expect(err).NotTo(HaveOccurred())
commitErr := tx.Commit()
Expect(commitErr).NotTo(HaveOccurred())
var checkedCount int
fetchErr := db.Get(&checkedCount, `SELECT `+checkedHeadersColumn+` FROM public.checked_headers LIMIT 1`)
Expect(fetchErr).NotTo(HaveOccurred())
Expect(checkedCount).To(Equal(1))
})
})
Describe("MissingHeaders", func() { Describe("MissingHeaders", func() {
var ( var (
db *postgres.DB db *postgres.DB
@ -116,6 +193,84 @@ var _ = Describe("Repository utilities", func() {
}) })
}) })
Describe("RecheckHeaders", func() {
var (
checkedHeadersColumn string
db *postgres.DB
headerOneID, headerTwoID, headerThreeID, headerFourID int64
headerOneErr, headerTwoErr, headerThreeErr, headerFourErr error
)
BeforeEach(func() {
db = test_config.NewTestDB(test_config.NewTestNode())
test_config.CleanTestDB(db)
// create header checked column
checkedHeadersColumn = "test_column_checked"
_, migrateErr := db.Exec(`ALTER TABLE public.checked_headers ADD COLUMN ` + checkedHeadersColumn + ` integer`)
Expect(migrateErr).NotTo(HaveOccurred())
// create headers
headerRepository := repositories.NewHeaderRepository(db)
headerOneID, headerOneErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(1))
Expect(headerOneErr).NotTo(HaveOccurred())
headerTwoID, headerTwoErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(2))
Expect(headerTwoErr).NotTo(HaveOccurred())
headerThreeID, headerThreeErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(3))
Expect(headerThreeErr).NotTo(HaveOccurred())
headerFourID, headerFourErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(4))
Expect(headerFourErr).NotTo(HaveOccurred())
// mark every header checked at least once, with one fully rechecked (headerThree)
maxCheckCount, intConversionErr := strconv.Atoi(constants.RecheckHeaderCap)
Expect(intConversionErr).NotTo(HaveOccurred())
_, markHeaderOneCheckedErr := db.Exec(
`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`,
headerOneID, maxCheckCount)
Expect(markHeaderOneCheckedErr).NotTo(HaveOccurred())
_, markHeaderTwoCheckedErr := db.Exec(
`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`,
headerTwoID, maxCheckCount)
Expect(markHeaderTwoCheckedErr).NotTo(HaveOccurred())
_, markHeaderThreeCheckedErr := db.Exec(
`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`,
headerThreeID, maxCheckCount+1)
Expect(markHeaderThreeCheckedErr).NotTo(HaveOccurred())
_, markHeaderFourCheckedErr := db.Exec(
`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`,
headerFourID, maxCheckCount)
Expect(markHeaderFourCheckedErr).NotTo(HaveOccurred())
})
AfterEach(func() {
_, cleanupMigrateErr := db.Exec(`ALTER TABLE public.checked_headers DROP COLUMN ` + checkedHeadersColumn)
Expect(cleanupMigrateErr).NotTo(HaveOccurred())
})
Describe("when no ending block number (ending block number == -1)", func() {
It("returns all headers since starting block where checked count is less than cap", func() {
headers, err := shared.RecheckHeaders(1, -1, db, checkedHeadersColumn)
Expect(err).NotTo(HaveOccurred())
Expect(len(headers)).To(Equal(3))
Expect(headers[0].Id).To(Or(Equal(headerOneID), Equal(headerTwoID), Equal(headerFourID)))
Expect(headers[1].Id).To(Or(Equal(headerOneID), Equal(headerTwoID), Equal(headerFourID)))
Expect(headers[2].Id).To(Or(Equal(headerOneID), Equal(headerTwoID), Equal(headerFourID)))
})
})
Describe("when ending block number specified", func() {
It("returns headers between starting and ending block where checked count is less than cap", func() {
headers, err := shared.RecheckHeaders(1, 3, db, checkedHeadersColumn)
Expect(err).NotTo(HaveOccurred())
Expect(len(headers)).To(Equal(2))
Expect(headers[0].Id).To(Or(Equal(headerOneID), Equal(headerTwoID)))
Expect(headers[1].Id).To(Or(Equal(headerOneID), Equal(headerTwoID)))
})
})
})
Describe("GetCheckedColumnNames", func() { Describe("GetCheckedColumnNames", func() {
It("gets the column names from checked_headers", func() { It("gets the column names from checked_headers", func() {
db := test_config.NewTestDB(test_config.NewTestNode()) db := test_config.NewTestDB(test_config.NewTestNode())
@ -162,33 +317,9 @@ var _ = Describe("Repository utilities", func() {
func getExpectedColumnNames() []string { func getExpectedColumnNames() []string {
return []string{ return []string{
"price_feeds_checked", "column_1_checked",
"flip_kick_checked", "column_2_checked",
"frob_checked", "column_3_checked",
"tend_checked", "column_4_checked",
"bite_checked",
"dent_checked",
"pit_file_debt_ceiling_checked",
"pit_file_ilk_checked",
"vat_init_checked",
"drip_file_ilk_checked",
"drip_file_repo_checked",
"drip_file_vow_checked",
"deal_checked",
"drip_drip_checked",
"cat_file_chop_lump_checked",
"cat_file_flip_checked",
"cat_file_pit_vow_checked",
"flop_kick_checked",
"vat_move_checked",
"vat_fold_checked",
"vat_heal_checked",
"vat_toll_checked",
"vat_tune_checked",
"vat_grab_checked",
"vat_flux_checked",
"vat_slip_checked",
"vow_flog_checked",
"flap_kick_checked",
} }
} }

View File

@ -0,0 +1,68 @@
package storage_test
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage"
"math/big"
)
var _ = Describe("Mappings", func() {
Describe("GetMapping", func() {
It("returns the storage key for a mapping when passed the mapping's index on the contract and the desired value's key", func() {
// ex. solidity:
// mapping (bytes32 => uint) public amounts
// pass in the index of the mapping on the contract + the bytes32 key for the uint val being looked up
indexOfMappingOnContract := storage.IndexZero
keyForDesiredValueInMapping := "fake_bytes32"
storageKey := storage.GetMapping(indexOfMappingOnContract, keyForDesiredValueInMapping)
expectedStorageKeyBytes := crypto.Keccak256(common.FromHex(keyForDesiredValueInMapping + indexOfMappingOnContract))
expectedStorageKey := common.BytesToHash(expectedStorageKeyBytes)
Expect(storageKey).To(Equal(expectedStorageKey))
})
})
Describe("GetNestedMapping", func() {
It("returns the storage key for a nested mapping when passed the mapping's index on the contract and the desired value's keys", func() {
// ex. solidity:
// mapping (address => mapping (uint => bytes32)) public addressNames
// pass in the index of the mapping on the contract + the address and uint keys for the bytes32 val being looked up
indexOfMappingOnContract := storage.IndexOne
keyForOuterMapping := "fake_address"
keyForInnerMapping := "123"
storageKey := storage.GetNestedMapping(indexOfMappingOnContract, keyForOuterMapping, keyForInnerMapping)
hashedOuterMappingStorageKey := crypto.Keccak256(common.FromHex(keyForOuterMapping + indexOfMappingOnContract))
fullStorageKeyBytes := crypto.Keccak256(common.FromHex(keyForInnerMapping), hashedOuterMappingStorageKey)
expectedStorageKey := common.BytesToHash(fullStorageKeyBytes)
Expect(storageKey).To(Equal(expectedStorageKey))
})
})
Describe("GetIncrementedKey", func() {
It("returns the storage key for later values sharing an index on the contract with other earlier values", func() {
// ex. solidity:
// struct Data {
// uint256 quantity;
// uint256 quality;
// }
// mapping (bytes32 => Data) public itemData;
// pass in the storage key for the zero-indexed value ("quantity") + the number of increments required.
// (For "quality", we must increment the storage key for the corresponding "quantity" by 1).
indexOfMappingOnContract := storage.IndexTwo
keyForDesiredValueInMapping := "fake_bytes32"
storageKeyForFirstPropertyOnStruct := storage.GetMapping(indexOfMappingOnContract, keyForDesiredValueInMapping)
storageKey := storage.GetIncrementedKey(storageKeyForFirstPropertyOnStruct, 1)
incrementedStorageKey := storageKeyForFirstPropertyOnStruct.Big().Add(storageKeyForFirstPropertyOnStruct.Big(), big.NewInt(1))
expectedStorageKey := common.BytesToHash(incrementedStorageKey.Bytes())
Expect(storageKey).To(Equal(expectedStorageKey))
})
})
})

View File

@ -0,0 +1,22 @@
package utils_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
)
var _ = Describe("Storage value metadata getter", func() {
It("returns a storage value metadata instance with corresponding fields assigned", func() {
metadataName := "fake_name"
metadataKeys := map[utils.Key]string{"key": "value"}
metadataType := utils.Uint256
expectedMetadata := utils.StorageValueMetadata{
Name: metadataName,
Keys: metadataKeys,
Type: metadataType,
}
Expect(utils.GetStorageValueMetadata(metadataName, metadataKeys, metadataType)).To(Equal(expectedMetadata))
})
})

View File

@ -49,7 +49,7 @@ var GenericTestConfig = transformer.TransformerConfig{
} }
func randomString(length int) string { func randomString(length int) string {
var seededRand *rand.Rand = rand.New( var seededRand = rand.New(
rand.NewSource(time.Now().UnixNano())) rand.NewSource(time.Now().UnixNano()))
charset := "abcdefghijklmnopqrstuvwxyz1234567890" charset := "abcdefghijklmnopqrstuvwxyz1234567890"
b := make([]byte, length) b := make([]byte, length)

View File

@ -50,8 +50,8 @@ func NewStorageWatcher(tailer fs.Tailer, db *postgres.DB) StorageWatcher {
func (watcher StorageWatcher) AddTransformers(initializers []transformer.StorageTransformerInitializer) { func (watcher StorageWatcher) AddTransformers(initializers []transformer.StorageTransformerInitializer) {
for _, initializer := range initializers { for _, initializer := range initializers {
transformer := initializer(watcher.db) storageTransformer := initializer(watcher.db)
watcher.Transformers[transformer.ContractAddress()] = transformer watcher.Transformers[storageTransformer.ContractAddress()] = storageTransformer
} }
} }
@ -65,12 +65,12 @@ func (watcher StorageWatcher) Execute() error {
if parseErr != nil { if parseErr != nil {
return parseErr return parseErr
} }
transformer, ok := watcher.Transformers[row.Contract] storageTransformer, ok := watcher.Transformers[row.Contract]
if !ok { if !ok {
logrus.Warn(utils.ErrContractNotFound{Contract: row.Contract.Hex()}.Error()) logrus.Warn(utils.ErrContractNotFound{Contract: row.Contract.Hex()}.Error())
continue continue
} }
executeErr := transformer.Execute(row) executeErr := storageTransformer.Execute(row)
if executeErr != nil { if executeErr != nil {
if isKeyNotFound(executeErr) { if isKeyNotFound(executeErr) {
queueErr := watcher.Queue.Add(row) queueErr := watcher.Queue.Add(row)

View File

@ -55,10 +55,10 @@ func (c *converter) Update(info *contract.Contract) {
// Convert the given watched event log into a types.Log for the given event // Convert the given watched event log into a types.Log for the given event
func (c *converter) Convert(watchedEvent core.WatchedEvent, event types.Event) (*types.Log, error) { func (c *converter) Convert(watchedEvent core.WatchedEvent, event types.Event) (*types.Log, error) {
contract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) boundContract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil)
values := make(map[string]interface{}) values := make(map[string]interface{})
log := helpers.ConvertToLog(watchedEvent) log := helpers.ConvertToLog(watchedEvent)
err := contract.UnpackLogIntoMap(values, event.Name, log) err := boundContract.UnpackLogIntoMap(values, event.Name, log)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -101,60 +101,59 @@ func NewTransformer(network string, BC core.BlockChain, DB *postgres.DB) *Transf
// Loops over all of the addr => filter sets // Loops over all of the addr => filter sets
// Uses parser to pull event info from abi // Uses parser to pull event info from abi
// Use this info to generate event filters // Use this info to generate event filters
func (t *Transformer) Init() error { func (transformer *Transformer) Init() error {
for contractAddr, subset := range t.WatchedEvents { for contractAddr, subset := range transformer.WatchedEvents {
// Get Abi // Get Abi
err := t.Parser.Parse(contractAddr) err := transformer.Parser.Parse(contractAddr)
if err != nil { if err != nil {
return err return err
} }
// Get first block and most recent block number in the header repo // Get first block and most recent block number in the header repo
firstBlock, err := t.BlockRetriever.RetrieveFirstBlock(contractAddr) firstBlock, err := transformer.BlockRetriever.RetrieveFirstBlock(contractAddr)
if err != nil { if err != nil {
return err return err
} }
lastBlock, err := t.BlockRetriever.RetrieveMostRecentBlock() lastBlock, err := transformer.BlockRetriever.RetrieveMostRecentBlock()
if err != nil { if err != nil {
return err return err
} }
// Set to specified range if it falls within the bounds // Set to specified range if it falls within the bounds
if firstBlock < t.ContractStart[contractAddr] { if firstBlock < transformer.ContractStart[contractAddr] {
firstBlock = t.ContractStart[contractAddr] firstBlock = transformer.ContractStart[contractAddr]
} }
// Get contract name if it has one // Get contract name if it has one
var name = new(string) var name = new(string)
t.Poller.FetchContractData(t.Abi(), contractAddr, "name", nil, name, lastBlock) transformer.Poller.FetchContractData(transformer.Abi(), contractAddr, "name", nil, name, lastBlock)
// Remove any potential accidental duplicate inputs in arg filter values // Remove any potential accidental duplicate inputs in arg filter values
eventArgs := map[string]bool{} eventArgs := map[string]bool{}
for _, arg := range t.EventArgs[contractAddr] { for _, arg := range transformer.EventArgs[contractAddr] {
eventArgs[arg] = true eventArgs[arg] = true
} }
methodArgs := map[string]bool{} methodArgs := map[string]bool{}
for _, arg := range t.MethodArgs[contractAddr] { for _, arg := range transformer.MethodArgs[contractAddr] {
methodArgs[arg] = true methodArgs[arg] = true
} }
// Aggregate info into contract object // Aggregate info into contract object
info := contract.Contract{ info := contract.Contract{
Name: *name, Name: *name,
Network: t.Network, Network: transformer.Network,
Address: contractAddr, Address: contractAddr,
Abi: t.Parser.Abi(), Abi: transformer.Parser.Abi(),
ParsedAbi: t.Parser.ParsedAbi(), ParsedAbi: transformer.Parser.ParsedAbi(),
StartingBlock: firstBlock, StartingBlock: firstBlock,
LastBlock: lastBlock, LastBlock: lastBlock,
// TODO: consider whether this duplicated knowledge from t.WatchedEvents Events: transformer.Parser.GetEvents(subset),
Events: t.Parser.GetEvents(subset), Methods: transformer.Parser.GetSelectMethods(transformer.WantedMethods[contractAddr]),
Methods: t.Parser.GetSelectMethods(t.WantedMethods[contractAddr]),
FilterArgs: eventArgs, FilterArgs: eventArgs,
MethodArgs: methodArgs, MethodArgs: methodArgs,
CreateAddrList: t.CreateAddrList[contractAddr], CreateAddrList: transformer.CreateAddrList[contractAddr],
CreateHashList: t.CreateHashList[contractAddr], CreateHashList: transformer.CreateHashList[contractAddr],
Piping: t.Piping[contractAddr], Piping: transformer.Piping[contractAddr],
}.Init() }.Init()
// Use info to create filters // Use info to create filters
@ -165,14 +164,14 @@ func (t *Transformer) Init() error {
// Iterate over filters and push them to the repo using filter repository interface // Iterate over filters and push them to the repo using filter repository interface
for _, filter := range info.Filters { for _, filter := range info.Filters {
err = t.FilterRepository.CreateFilter(filter) err = transformer.FilterRepository.CreateFilter(filter)
if err != nil { if err != nil {
return err return err
} }
} }
// Store contract info for further processing // Store contract info for further processing
t.Contracts[contractAddr] = info transformer.Contracts[contractAddr] = info
} }
return nil return nil
@ -183,18 +182,18 @@ func (t *Transformer) Init() error {
// Uses converter to convert logs into custom log type // Uses converter to convert logs into custom log type
// Persists converted logs into custuom postgres tables // Persists converted logs into custuom postgres tables
// Calls selected methods, using token holder address generated during event log conversion // Calls selected methods, using token holder address generated during event log conversion
func (tr Transformer) Execute() error { func (transformer Transformer) Execute() error {
if len(tr.Contracts) == 0 { if len(transformer.Contracts) == 0 {
return errors.New("error: transformer has no initialized contracts to work with") return errors.New("error: transformer has no initialized contracts to work with")
} }
// Iterate through all internal contracts // Iterate through all internal contracts
for _, con := range tr.Contracts { for _, con := range transformer.Contracts {
// Update converter with current contract // Update converter with current contract
tr.Update(con) transformer.Update(con)
// Iterate through contract filters and get watched event logs // Iterate through contract filters and get watched event logs
for eventSig, filter := range con.Filters { for eventSig, filter := range con.Filters {
watchedEvents, err := tr.GetWatchedEvents(filter.Name) watchedEvents, err := transformer.GetWatchedEvents(filter.Name)
if err != nil { if err != nil {
return err return err
} }
@ -202,7 +201,7 @@ func (tr Transformer) Execute() error {
// Iterate over watched event logs // Iterate over watched event logs
for _, we := range watchedEvents { for _, we := range watchedEvents {
// Convert them to our custom log type // Convert them to our custom log type
cstm, err := tr.Converter.Convert(*we, con.Events[eventSig]) cstm, err := transformer.Converter.Convert(*we, con.Events[eventSig])
if err != nil { if err != nil {
return err return err
} }
@ -212,7 +211,7 @@ func (tr Transformer) Execute() error {
// If log is not empty, immediately persist in repo // If log is not empty, immediately persist in repo
// Run this in seperate goroutine? // Run this in seperate goroutine?
err = tr.PersistLogs([]types.Log{*cstm}, con.Events[eventSig], con.Address, con.Name) err = transformer.PersistLogs([]types.Log{*cstm}, con.Events[eventSig], con.Address, con.Name)
if err != nil { if err != nil {
return err return err
} }
@ -223,7 +222,7 @@ func (tr Transformer) Execute() error {
// poller polls select contract methods // poller polls select contract methods
// and persists the results into custom pg tables // and persists the results into custom pg tables
// Run this in seperate goroutine? // Run this in seperate goroutine?
if err := tr.PollContract(*con); err != nil { if err := transformer.PollContract(*con); err != nil {
return err return err
} }
} }
@ -232,41 +231,41 @@ func (tr Transformer) Execute() error {
} }
// Used to set which contract addresses and which of their events to watch // Used to set which contract addresses and which of their events to watch
func (tr *Transformer) SetEvents(contractAddr string, filterSet []string) { func (transformer *Transformer) SetEvents(contractAddr string, filterSet []string) {
tr.WatchedEvents[strings.ToLower(contractAddr)] = filterSet transformer.WatchedEvents[strings.ToLower(contractAddr)] = filterSet
} }
// Used to set subset of account addresses to watch events for // Used to set subset of account addresses to watch events for
func (tr *Transformer) SetEventArgs(contractAddr string, filterSet []string) { func (transformer *Transformer) SetEventArgs(contractAddr string, filterSet []string) {
tr.EventArgs[strings.ToLower(contractAddr)] = filterSet transformer.EventArgs[strings.ToLower(contractAddr)] = filterSet
} }
// Used to set which contract addresses and which of their methods to call // Used to set which contract addresses and which of their methods to call
func (tr *Transformer) SetMethods(contractAddr string, filterSet []string) { func (transformer *Transformer) SetMethods(contractAddr string, filterSet []string) {
tr.WantedMethods[strings.ToLower(contractAddr)] = filterSet transformer.WantedMethods[strings.ToLower(contractAddr)] = filterSet
} }
// Used to set subset of account addresses to poll methods on // Used to set subset of account addresses to poll methods on
func (tr *Transformer) SetMethodArgs(contractAddr string, filterSet []string) { func (transformer *Transformer) SetMethodArgs(contractAddr string, filterSet []string) {
tr.MethodArgs[strings.ToLower(contractAddr)] = filterSet transformer.MethodArgs[strings.ToLower(contractAddr)] = filterSet
} }
// Used to set the block range to watch for a given address // Used to set the block range to watch for a given address
func (tr *Transformer) SetStartingBlock(contractAddr string, start int64) { func (transformer *Transformer) SetStartingBlock(contractAddr string, start int64) {
tr.ContractStart[strings.ToLower(contractAddr)] = start transformer.ContractStart[strings.ToLower(contractAddr)] = start
} }
// Used to set whether or not to persist an account address list // Used to set whether or not to persist an account address list
func (tr *Transformer) SetCreateAddrList(contractAddr string, on bool) { func (transformer *Transformer) SetCreateAddrList(contractAddr string, on bool) {
tr.CreateAddrList[strings.ToLower(contractAddr)] = on transformer.CreateAddrList[strings.ToLower(contractAddr)] = on
} }
// Used to set whether or not to persist an hash list // Used to set whether or not to persist an hash list
func (tr *Transformer) SetCreateHashList(contractAddr string, on bool) { func (transformer *Transformer) SetCreateHashList(contractAddr string, on bool) {
tr.CreateHashList[strings.ToLower(contractAddr)] = on transformer.CreateHashList[strings.ToLower(contractAddr)] = on
} }
// Used to turn method piping on for a contract // Used to turn method piping on for a contract
func (tr *Transformer) SetPiping(contractAddr string, on bool) { func (transformer *Transformer) SetPiping(contractAddr string, on bool) {
tr.Piping[strings.ToLower(contractAddr)] = on transformer.Piping[strings.ToLower(contractAddr)] = on
} }

View File

@ -54,7 +54,7 @@ func (c *converter) Update(info *contract.Contract) {
// Convert the given watched event log into a types.Log for the given event // Convert the given watched event log into a types.Log for the given event
func (c *converter) Convert(logs []gethTypes.Log, event types.Event, headerID int64) ([]types.Log, error) { func (c *converter) Convert(logs []gethTypes.Log, event types.Event, headerID int64) ([]types.Log, error) {
contract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) boundContract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil)
returnLogs := make([]types.Log, 0, len(logs)) returnLogs := make([]types.Log, 0, len(logs))
for _, log := range logs { for _, log := range logs {
values := make(map[string]interface{}) values := make(map[string]interface{})
@ -63,7 +63,7 @@ func (c *converter) Convert(logs []gethTypes.Log, event types.Event, headerID in
values[field.Name] = i values[field.Name] = i
} }
err := contract.UnpackLogIntoMap(values, event.Name, log) err := boundContract.UnpackLogIntoMap(values, event.Name, log)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -133,7 +133,7 @@ func (c *converter) Convert(logs []gethTypes.Log, event types.Event, headerID in
// Convert the given watched event logs into types.Logs; returns a map of event names to a slice of their converted logs // Convert the given watched event logs into types.Logs; returns a map of event names to a slice of their converted logs
func (c *converter) ConvertBatch(logs []gethTypes.Log, events map[string]types.Event, headerID int64) (map[string][]types.Log, error) { func (c *converter) ConvertBatch(logs []gethTypes.Log, events map[string]types.Event, headerID int64) (map[string][]types.Log, error) {
contract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) boundContract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil)
eventsToLogs := make(map[string][]types.Log) eventsToLogs := make(map[string][]types.Log)
for _, event := range events { for _, event := range events {
eventsToLogs[event.Name] = make([]types.Log, 0, len(logs)) eventsToLogs[event.Name] = make([]types.Log, 0, len(logs))
@ -142,7 +142,7 @@ func (c *converter) ConvertBatch(logs []gethTypes.Log, events map[string]types.E
// If the log is of this event type, process it as such // If the log is of this event type, process it as such
if event.Sig() == log.Topics[0] { if event.Sig() == log.Topics[0] {
values := make(map[string]interface{}) values := make(map[string]interface{})
err := contract.UnpackLogIntoMap(values, event.Name, log) err := boundContract.UnpackLogIntoMap(values, event.Name, log)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -72,6 +72,7 @@ var TusdContractAddress = "0x8dd5fbCe2F6a956C3022bA3663759011Dd51e73E"
var EnsContractAddress = "0x314159265dD8dbb310642f98f50C066173C1259b" var EnsContractAddress = "0x314159265dD8dbb310642f98f50C066173C1259b"
var PublicResolverAddress = "0x1da022710dF5002339274AaDEe8D58218e9D6AB5" var PublicResolverAddress = "0x1da022710dF5002339274AaDEe8D58218e9D6AB5"
// TODO: Consider whether these should be moved to plugins
// Contract Owner // Contract Owner
var DaiContractOwner = "0x0000000000000000000000000000000000000000" var DaiContractOwner = "0x0000000000000000000000000000000000000000"
var TusdContractOwner = "0x9978d2d229a69b3aef93420d132ab22b44e3578f" var TusdContractOwner = "0x9978d2d229a69b3aef93420d132ab22b44e3578f"

View File

@ -106,6 +106,7 @@ type Owner struct {
Address string `db:"returned"` Address string `db:"returned"`
} }
// TODO: consider whether this should be moved to libraries/shared
func SetupBC() core.BlockChain { func SetupBC() core.BlockChain {
infuraIPC := "https://mainnet.infura.io/v3/b09888c1113640cc9ab42750ce750c05" infuraIPC := "https://mainnet.infura.io/v3/b09888c1113640cc9ab42750ce750c05"
rawRpcClient, err := rpc.Dial(infuraIPC) rawRpcClient, err := rpc.Dial(infuraIPC)
@ -113,9 +114,9 @@ func SetupBC() core.BlockChain {
rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC) rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC)
ethClient := ethclient.NewClient(rawRpcClient) ethClient := ethclient.NewClient(rawRpcClient)
blockChainClient := client.NewEthClient(ethClient) blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient) blockChainNode := node.MakeNode(rpcClient)
transactionConverter := rpc2.NewRpcTransactionConverter(ethClient) transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter) blockChain := geth.NewBlockChain(blockChainClient, rpcClient, blockChainNode, transactionConverter)
return blockChain return blockChain
} }
@ -127,9 +128,9 @@ func SetupDBandBC() (*postgres.DB, core.BlockChain) {
rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC) rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC)
ethClient := ethclient.NewClient(rawRpcClient) ethClient := ethclient.NewClient(rawRpcClient)
blockChainClient := client.NewEthClient(ethClient) blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient) blockChainNode := node.MakeNode(rpcClient)
transactionConverter := rpc2.NewRpcTransactionConverter(ethClient) transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter) blockChain := geth.NewBlockChain(blockChainClient, rpcClient, blockChainNode, transactionConverter)
db, err := postgres.NewDB(config.Database{ db, err := postgres.NewDB(config.Database{
Hostname: "localhost", Hostname: "localhost",
@ -188,6 +189,7 @@ func SetupTusdContract(wantedEvents, wantedMethods []string) *contract.Contract
}.Init() }.Init()
} }
// TODO: consider whether this can be moved to plugin or libraries/shared
func SetupENSRepo(vulcanizeLogId *int64, wantedEvents, wantedMethods []string) (*postgres.DB, *contract.Contract) { func SetupENSRepo(vulcanizeLogId *int64, wantedEvents, wantedMethods []string) (*postgres.DB, *contract.Contract) {
db, err := postgres.NewDB(config.Database{ db, err := postgres.NewDB(config.Database{
Hostname: "localhost", Hostname: "localhost",

View File

@ -31,6 +31,7 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/geth/node" "github.com/vulcanize/vulcanizedb/pkg/geth/node"
) )
// TODO: consider whether this should be moved to libraries/shared
func SetupDBandBC() (*postgres.DB, core.BlockChain) { func SetupDBandBC() (*postgres.DB, core.BlockChain) {
infuraIPC := "http://kovan0.vulcanize.io:8545" infuraIPC := "http://kovan0.vulcanize.io:8545"
rawRpcClient, err := rpc.Dial(infuraIPC) rawRpcClient, err := rpc.Dial(infuraIPC)
@ -38,9 +39,9 @@ func SetupDBandBC() (*postgres.DB, core.BlockChain) {
rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC) rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC)
ethClient := ethclient.NewClient(rawRpcClient) ethClient := ethclient.NewClient(rawRpcClient)
blockChainClient := client.NewEthClient(ethClient) blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient) blockChainNode := node.MakeNode(rpcClient)
transactionConverter := rpc2.NewRpcTransactionConverter(ethClient) transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter) blockChain := geth.NewBlockChain(blockChainClient, rpcClient, blockChainNode, transactionConverter)
db, err := postgres.NewDB(config.Database{ db, err := postgres.NewDB(config.Database{
Hostname: "localhost", Hostname: "localhost",