Add tests for plugin helpers

- Prevents code analysis from showing functions as unused
- Also address other code analysis issues
This commit is contained in:
Rob Mulholand 2019-02-28 08:59:04 -06:00
parent a806cb2818
commit b6bef49318
17 changed files with 390 additions and 154 deletions

View File

@ -2,9 +2,14 @@ package integration
import ( import (
"fmt" "fmt"
"math/rand"
"strings"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/core" "github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres" "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories" "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
@ -12,9 +17,6 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/omni/shared/constants" "github.com/vulcanize/vulcanizedb/pkg/omni/shared/constants"
"github.com/vulcanize/vulcanizedb/pkg/omni/shared/helpers/test_helpers" "github.com/vulcanize/vulcanizedb/pkg/omni/shared/helpers/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/omni/shared/helpers/test_helpers/mocks" "github.com/vulcanize/vulcanizedb/pkg/omni/shared/helpers/test_helpers/mocks"
"math/rand"
"strings"
"time"
) )
var _ = Describe("Omni full transformer", func() { var _ = Describe("Omni full transformer", func() {

View File

@ -0,0 +1,8 @@
# Shared Tools
## Description
Code that is useful for or used by plugins writeen on top of VulcanizeDB.
## Note
Much code in this directory may not be used outside of the tests, but don't delete it - it could be used by a plugin.
Renaming and/or deleting functions in this namespace requires a version bump to avoid breaking plugins.

View File

@ -58,9 +58,9 @@ var _ = Describe("Log chunker", func() {
Describe("initialisation", func() { Describe("initialisation", func() {
It("creates lookup maps correctly", func() { It("creates lookup maps correctly", func() {
Expect(chunker.AddressToNames).To(Equal(map[string][]string{ Expect(chunker.AddressToNames).To(Equal(map[string][]string{
"0x00000000000000000000000000000000000000a1": []string{"TransformerA"}, "0x00000000000000000000000000000000000000a1": {"TransformerA"},
"0x00000000000000000000000000000000000000a2": []string{"TransformerA", "TransformerC"}, "0x00000000000000000000000000000000000000a2": {"TransformerA", "TransformerC"},
"0x00000000000000000000000000000000000000b1": []string{"TransformerB"}, "0x00000000000000000000000000000000000000b1": {"TransformerB"},
})) }))
Expect(chunker.NameToTopic0).To(Equal(map[string]common.Hash{ Expect(chunker.NameToTopic0).To(Equal(map[string]common.Hash{

View File

@ -18,5 +18,6 @@ package constants
var DataItemLength = 32 var DataItemLength = 32
// TODO Grab this from DB, since it can change through governance // TODO: Move to Plugin
// TODO: Grab this from DB, since it can change through governance
var TTL = int64(10800) // 60 * 60 * 3 == 10800 seconds == 3 hours var TTL = int64(10800) // 60 * 60 * 3 == 10800 seconds == 3 hours

View File

@ -156,6 +156,7 @@ func CreateNotCheckedSQL(boolColumns []string, recheckHeaders constants.Transfor
return result.String() return result.String()
} }
// TODO: Move to plugin
func GetTicInTx(headerID int64, tx *sql.Tx) (int64, error) { func GetTicInTx(headerID int64, tx *sql.Tx) (int64, error) {
var blockTimestamp int64 var blockTimestamp int64
err := tx.QueryRow(`SELECT block_timestamp FROM public.headers WHERE id = $1;`, headerID).Scan(&blockTimestamp) err := tx.QueryRow(`SELECT block_timestamp FROM public.headers WHERE id = $1;`, headerID).Scan(&blockTimestamp)

View File

@ -19,6 +19,7 @@ package repository_test
import ( import (
"fmt" "fmt"
"math/rand" "math/rand"
"strconv"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@ -34,7 +35,83 @@ import (
"github.com/vulcanize/vulcanizedb/test_config" "github.com/vulcanize/vulcanizedb/test_config"
) )
var _ = Describe("Repository utilities", func() { var _ = Describe("Repository", func() {
Describe("MarkHeaderChecked", func() {
var (
checkedHeadersColumn string
db *postgres.DB
)
BeforeEach(func() {
db = test_config.NewTestDB(test_config.NewTestNode())
test_config.CleanTestDB(db)
checkedHeadersColumn = "test_column_checked"
_, migrateErr := db.Exec(`ALTER TABLE public.checked_headers
ADD COLUMN ` + checkedHeadersColumn + ` integer`)
Expect(migrateErr).NotTo(HaveOccurred())
})
AfterEach(func() {
_, cleanupMigrateErr := db.Exec(`ALTER TABLE public.checked_headers DROP COLUMN ` + checkedHeadersColumn)
Expect(cleanupMigrateErr).NotTo(HaveOccurred())
})
It("marks passed column as checked for passed header", func() {
headerRepository := repositories.NewHeaderRepository(db)
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
Expect(headerErr).NotTo(HaveOccurred())
err := shared.MarkHeaderChecked(headerID, db, checkedHeadersColumn)
Expect(err).NotTo(HaveOccurred())
var checkedCount int
fetchErr := db.Get(&checkedCount, `SELECT `+checkedHeadersColumn+` FROM public.checked_headers LIMIT 1`)
Expect(fetchErr).NotTo(HaveOccurred())
Expect(checkedCount).To(Equal(1))
})
})
Describe("MarkHeaderCheckedInTransaction", func() {
var (
checkedHeadersColumn string
db *postgres.DB
)
BeforeEach(func() {
db = test_config.NewTestDB(test_config.NewTestNode())
test_config.CleanTestDB(db)
checkedHeadersColumn = "test_column_checked"
_, migrateErr := db.Exec(`ALTER TABLE public.checked_headers
ADD COLUMN ` + checkedHeadersColumn + ` integer`)
Expect(migrateErr).NotTo(HaveOccurred())
})
AfterEach(func() {
_, cleanupMigrateErr := db.Exec(`ALTER TABLE public.checked_headers DROP COLUMN ` + checkedHeadersColumn)
Expect(cleanupMigrateErr).NotTo(HaveOccurred())
})
It("marks passed column as checked for passed header within a passed transaction", func() {
headerRepository := repositories.NewHeaderRepository(db)
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
Expect(headerErr).NotTo(HaveOccurred())
tx, txErr := db.Begin()
Expect(txErr).NotTo(HaveOccurred())
err := shared.MarkHeaderCheckedInTransaction(headerID, tx, checkedHeadersColumn)
Expect(err).NotTo(HaveOccurred())
commitErr := tx.Commit()
Expect(commitErr).NotTo(HaveOccurred())
var checkedCount int
fetchErr := db.Get(&checkedCount, `SELECT `+checkedHeadersColumn+` FROM public.checked_headers LIMIT 1`)
Expect(fetchErr).NotTo(HaveOccurred())
Expect(checkedCount).To(Equal(1))
})
})
Describe("MissingHeaders", func() { Describe("MissingHeaders", func() {
var ( var (
db *postgres.DB db *postgres.DB
@ -116,6 +193,84 @@ var _ = Describe("Repository utilities", func() {
}) })
}) })
Describe("RecheckHeaders", func() {
var (
checkedHeadersColumn string
db *postgres.DB
headerOneID, headerTwoID, headerThreeID, headerFourID int64
headerOneErr, headerTwoErr, headerThreeErr, headerFourErr error
)
BeforeEach(func() {
db = test_config.NewTestDB(test_config.NewTestNode())
test_config.CleanTestDB(db)
// create header checked column
checkedHeadersColumn = "test_column_checked"
_, migrateErr := db.Exec(`ALTER TABLE public.checked_headers ADD COLUMN ` + checkedHeadersColumn + ` integer`)
Expect(migrateErr).NotTo(HaveOccurred())
// create headers
headerRepository := repositories.NewHeaderRepository(db)
headerOneID, headerOneErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(1))
Expect(headerOneErr).NotTo(HaveOccurred())
headerTwoID, headerTwoErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(2))
Expect(headerTwoErr).NotTo(HaveOccurred())
headerThreeID, headerThreeErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(3))
Expect(headerThreeErr).NotTo(HaveOccurred())
headerFourID, headerFourErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(4))
Expect(headerFourErr).NotTo(HaveOccurred())
// mark every header checked at least once, with one fully rechecked (headerThree)
maxCheckCount, intConversionErr := strconv.Atoi(constants.RecheckHeaderCap)
Expect(intConversionErr).NotTo(HaveOccurred())
_, markHeaderOneCheckedErr := db.Exec(
`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`,
headerOneID, maxCheckCount)
Expect(markHeaderOneCheckedErr).NotTo(HaveOccurred())
_, markHeaderTwoCheckedErr := db.Exec(
`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`,
headerTwoID, maxCheckCount)
Expect(markHeaderTwoCheckedErr).NotTo(HaveOccurred())
_, markHeaderThreeCheckedErr := db.Exec(
`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`,
headerThreeID, maxCheckCount+1)
Expect(markHeaderThreeCheckedErr).NotTo(HaveOccurred())
_, markHeaderFourCheckedErr := db.Exec(
`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`,
headerFourID, maxCheckCount)
Expect(markHeaderFourCheckedErr).NotTo(HaveOccurred())
})
AfterEach(func() {
_, cleanupMigrateErr := db.Exec(`ALTER TABLE public.checked_headers DROP COLUMN ` + checkedHeadersColumn)
Expect(cleanupMigrateErr).NotTo(HaveOccurred())
})
Describe("when no ending block number (ending block number == -1)", func() {
It("returns all headers since starting block where checked count is less than cap", func() {
headers, err := shared.RecheckHeaders(1, -1, db, checkedHeadersColumn)
Expect(err).NotTo(HaveOccurred())
Expect(len(headers)).To(Equal(3))
Expect(headers[0].Id).To(Or(Equal(headerOneID), Equal(headerTwoID), Equal(headerFourID)))
Expect(headers[1].Id).To(Or(Equal(headerOneID), Equal(headerTwoID), Equal(headerFourID)))
Expect(headers[2].Id).To(Or(Equal(headerOneID), Equal(headerTwoID), Equal(headerFourID)))
})
})
Describe("when ending block number specified", func() {
It("returns headers between starting and ending block where checked count is less than cap", func() {
headers, err := shared.RecheckHeaders(1, 3, db, checkedHeadersColumn)
Expect(err).NotTo(HaveOccurred())
Expect(len(headers)).To(Equal(2))
Expect(headers[0].Id).To(Or(Equal(headerOneID), Equal(headerTwoID)))
Expect(headers[1].Id).To(Or(Equal(headerOneID), Equal(headerTwoID)))
})
})
})
Describe("GetCheckedColumnNames", func() { Describe("GetCheckedColumnNames", func() {
It("gets the column names from checked_headers", func() { It("gets the column names from checked_headers", func() {
db := test_config.NewTestDB(test_config.NewTestNode()) db := test_config.NewTestDB(test_config.NewTestNode())
@ -162,33 +317,9 @@ var _ = Describe("Repository utilities", func() {
func getExpectedColumnNames() []string { func getExpectedColumnNames() []string {
return []string{ return []string{
"price_feeds_checked", "column_1_checked",
"flip_kick_checked", "column_2_checked",
"frob_checked", "column_3_checked",
"tend_checked", "column_4_checked",
"bite_checked",
"dent_checked",
"pit_file_debt_ceiling_checked",
"pit_file_ilk_checked",
"vat_init_checked",
"drip_file_ilk_checked",
"drip_file_repo_checked",
"drip_file_vow_checked",
"deal_checked",
"drip_drip_checked",
"cat_file_chop_lump_checked",
"cat_file_flip_checked",
"cat_file_pit_vow_checked",
"flop_kick_checked",
"vat_move_checked",
"vat_fold_checked",
"vat_heal_checked",
"vat_toll_checked",
"vat_tune_checked",
"vat_grab_checked",
"vat_flux_checked",
"vat_slip_checked",
"vow_flog_checked",
"flap_kick_checked",
} }
} }

View File

@ -0,0 +1,68 @@
package storage_test
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage"
"math/big"
)
var _ = Describe("Mappings", func() {
Describe("GetMapping", func() {
It("returns the storage key for a mapping when passed the mapping's index on the contract and the desired value's key", func() {
// ex. solidity:
// mapping (bytes32 => uint) public amounts
// pass in the index of the mapping on the contract + the bytes32 key for the uint val being looked up
indexOfMappingOnContract := storage.IndexZero
keyForDesiredValueInMapping := "fake_bytes32"
storageKey := storage.GetMapping(indexOfMappingOnContract, keyForDesiredValueInMapping)
expectedStorageKeyBytes := crypto.Keccak256(common.FromHex(keyForDesiredValueInMapping + indexOfMappingOnContract))
expectedStorageKey := common.BytesToHash(expectedStorageKeyBytes)
Expect(storageKey).To(Equal(expectedStorageKey))
})
})
Describe("GetNestedMapping", func() {
It("returns the storage key for a nested mapping when passed the mapping's index on the contract and the desired value's keys", func() {
// ex. solidity:
// mapping (address => mapping (uint => bytes32)) public addressNames
// pass in the index of the mapping on the contract + the address and uint keys for the bytes32 val being looked up
indexOfMappingOnContract := storage.IndexOne
keyForOuterMapping := "fake_address"
keyForInnerMapping := "123"
storageKey := storage.GetNestedMapping(indexOfMappingOnContract, keyForOuterMapping, keyForInnerMapping)
hashedOuterMappingStorageKey := crypto.Keccak256(common.FromHex(keyForOuterMapping + indexOfMappingOnContract))
fullStorageKeyBytes := crypto.Keccak256(common.FromHex(keyForInnerMapping), hashedOuterMappingStorageKey)
expectedStorageKey := common.BytesToHash(fullStorageKeyBytes)
Expect(storageKey).To(Equal(expectedStorageKey))
})
})
Describe("GetIncrementedKey", func() {
It("returns the storage key for later values sharing an index on the contract with other earlier values", func() {
// ex. solidity:
// struct Data {
// uint256 quantity;
// uint256 quality;
// }
// mapping (bytes32 => Data) public itemData;
// pass in the storage key for the zero-indexed value ("quantity") + the number of increments required.
// (For "quality", we must increment the storage key for the corresponding "quantity" by 1).
indexOfMappingOnContract := storage.IndexTwo
keyForDesiredValueInMapping := "fake_bytes32"
storageKeyForFirstPropertyOnStruct := storage.GetMapping(indexOfMappingOnContract, keyForDesiredValueInMapping)
storageKey := storage.GetIncrementedKey(storageKeyForFirstPropertyOnStruct, 1)
incrementedStorageKey := storageKeyForFirstPropertyOnStruct.Big().Add(storageKeyForFirstPropertyOnStruct.Big(), big.NewInt(1))
expectedStorageKey := common.BytesToHash(incrementedStorageKey.Bytes())
Expect(storageKey).To(Equal(expectedStorageKey))
})
})
})

View File

@ -0,0 +1,22 @@
package utils_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
)
var _ = Describe("Storage value metadata getter", func() {
It("returns a storage value metadata instance with corresponding fields assigned", func() {
metadataName := "fake_name"
metadataKeys := map[utils.Key]string{"key": "value"}
metadataType := utils.Uint256
expectedMetadata := utils.StorageValueMetadata{
Name: metadataName,
Keys: metadataKeys,
Type: metadataType,
}
Expect(utils.GetStorageValueMetadata(metadataName, metadataKeys, metadataType)).To(Equal(expectedMetadata))
})
})

View File

@ -49,7 +49,7 @@ var GenericTestConfig = transformer.TransformerConfig{
} }
func randomString(length int) string { func randomString(length int) string {
var seededRand *rand.Rand = rand.New( var seededRand = rand.New(
rand.NewSource(time.Now().UnixNano())) rand.NewSource(time.Now().UnixNano()))
charset := "abcdefghijklmnopqrstuvwxyz1234567890" charset := "abcdefghijklmnopqrstuvwxyz1234567890"
b := make([]byte, length) b := make([]byte, length)

View File

@ -50,8 +50,8 @@ func NewStorageWatcher(tailer fs.Tailer, db *postgres.DB) StorageWatcher {
func (watcher StorageWatcher) AddTransformers(initializers []transformer.StorageTransformerInitializer) { func (watcher StorageWatcher) AddTransformers(initializers []transformer.StorageTransformerInitializer) {
for _, initializer := range initializers { for _, initializer := range initializers {
transformer := initializer(watcher.db) storageTransformer := initializer(watcher.db)
watcher.Transformers[transformer.ContractAddress()] = transformer watcher.Transformers[storageTransformer.ContractAddress()] = storageTransformer
} }
} }
@ -65,12 +65,12 @@ func (watcher StorageWatcher) Execute() error {
if parseErr != nil { if parseErr != nil {
return parseErr return parseErr
} }
transformer, ok := watcher.Transformers[row.Contract] storageTransformer, ok := watcher.Transformers[row.Contract]
if !ok { if !ok {
logrus.Warn(utils.ErrContractNotFound{Contract: row.Contract.Hex()}.Error()) logrus.Warn(utils.ErrContractNotFound{Contract: row.Contract.Hex()}.Error())
continue continue
} }
executeErr := transformer.Execute(row) executeErr := storageTransformer.Execute(row)
if executeErr != nil { if executeErr != nil {
if isKeyNotFound(executeErr) { if isKeyNotFound(executeErr) {
queueErr := watcher.Queue.Add(row) queueErr := watcher.Queue.Add(row)

View File

@ -55,10 +55,10 @@ func (c *converter) Update(info *contract.Contract) {
// Convert the given watched event log into a types.Log for the given event // Convert the given watched event log into a types.Log for the given event
func (c *converter) Convert(watchedEvent core.WatchedEvent, event types.Event) (*types.Log, error) { func (c *converter) Convert(watchedEvent core.WatchedEvent, event types.Event) (*types.Log, error) {
contract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) boundContract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil)
values := make(map[string]interface{}) values := make(map[string]interface{})
log := helpers.ConvertToLog(watchedEvent) log := helpers.ConvertToLog(watchedEvent)
err := contract.UnpackLogIntoMap(values, event.Name, log) err := boundContract.UnpackLogIntoMap(values, event.Name, log)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -101,60 +101,59 @@ func NewTransformer(network string, BC core.BlockChain, DB *postgres.DB) *Transf
// Loops over all of the addr => filter sets // Loops over all of the addr => filter sets
// Uses parser to pull event info from abi // Uses parser to pull event info from abi
// Use this info to generate event filters // Use this info to generate event filters
func (t *Transformer) Init() error { func (transformer *Transformer) Init() error {
for contractAddr, subset := range t.WatchedEvents { for contractAddr, subset := range transformer.WatchedEvents {
// Get Abi // Get Abi
err := t.Parser.Parse(contractAddr) err := transformer.Parser.Parse(contractAddr)
if err != nil { if err != nil {
return err return err
} }
// Get first block and most recent block number in the header repo // Get first block and most recent block number in the header repo
firstBlock, err := t.BlockRetriever.RetrieveFirstBlock(contractAddr) firstBlock, err := transformer.BlockRetriever.RetrieveFirstBlock(contractAddr)
if err != nil { if err != nil {
return err return err
} }
lastBlock, err := t.BlockRetriever.RetrieveMostRecentBlock() lastBlock, err := transformer.BlockRetriever.RetrieveMostRecentBlock()
if err != nil { if err != nil {
return err return err
} }
// Set to specified range if it falls within the bounds // Set to specified range if it falls within the bounds
if firstBlock < t.ContractStart[contractAddr] { if firstBlock < transformer.ContractStart[contractAddr] {
firstBlock = t.ContractStart[contractAddr] firstBlock = transformer.ContractStart[contractAddr]
} }
// Get contract name if it has one // Get contract name if it has one
var name = new(string) var name = new(string)
t.Poller.FetchContractData(t.Abi(), contractAddr, "name", nil, name, lastBlock) transformer.Poller.FetchContractData(transformer.Abi(), contractAddr, "name", nil, name, lastBlock)
// Remove any potential accidental duplicate inputs in arg filter values // Remove any potential accidental duplicate inputs in arg filter values
eventArgs := map[string]bool{} eventArgs := map[string]bool{}
for _, arg := range t.EventArgs[contractAddr] { for _, arg := range transformer.EventArgs[contractAddr] {
eventArgs[arg] = true eventArgs[arg] = true
} }
methodArgs := map[string]bool{} methodArgs := map[string]bool{}
for _, arg := range t.MethodArgs[contractAddr] { for _, arg := range transformer.MethodArgs[contractAddr] {
methodArgs[arg] = true methodArgs[arg] = true
} }
// Aggregate info into contract object // Aggregate info into contract object
info := contract.Contract{ info := contract.Contract{
Name: *name, Name: *name,
Network: t.Network, Network: transformer.Network,
Address: contractAddr, Address: contractAddr,
Abi: t.Parser.Abi(), Abi: transformer.Parser.Abi(),
ParsedAbi: t.Parser.ParsedAbi(), ParsedAbi: transformer.Parser.ParsedAbi(),
StartingBlock: firstBlock, StartingBlock: firstBlock,
LastBlock: lastBlock, LastBlock: lastBlock,
// TODO: consider whether this duplicated knowledge from t.WatchedEvents Events: transformer.Parser.GetEvents(subset),
Events: t.Parser.GetEvents(subset), Methods: transformer.Parser.GetSelectMethods(transformer.WantedMethods[contractAddr]),
Methods: t.Parser.GetSelectMethods(t.WantedMethods[contractAddr]),
FilterArgs: eventArgs, FilterArgs: eventArgs,
MethodArgs: methodArgs, MethodArgs: methodArgs,
CreateAddrList: t.CreateAddrList[contractAddr], CreateAddrList: transformer.CreateAddrList[contractAddr],
CreateHashList: t.CreateHashList[contractAddr], CreateHashList: transformer.CreateHashList[contractAddr],
Piping: t.Piping[contractAddr], Piping: transformer.Piping[contractAddr],
}.Init() }.Init()
// Use info to create filters // Use info to create filters
@ -165,14 +164,14 @@ func (t *Transformer) Init() error {
// Iterate over filters and push them to the repo using filter repository interface // Iterate over filters and push them to the repo using filter repository interface
for _, filter := range info.Filters { for _, filter := range info.Filters {
err = t.FilterRepository.CreateFilter(filter) err = transformer.FilterRepository.CreateFilter(filter)
if err != nil { if err != nil {
return err return err
} }
} }
// Store contract info for further processing // Store contract info for further processing
t.Contracts[contractAddr] = info transformer.Contracts[contractAddr] = info
} }
return nil return nil
@ -183,18 +182,18 @@ func (t *Transformer) Init() error {
// Uses converter to convert logs into custom log type // Uses converter to convert logs into custom log type
// Persists converted logs into custuom postgres tables // Persists converted logs into custuom postgres tables
// Calls selected methods, using token holder address generated during event log conversion // Calls selected methods, using token holder address generated during event log conversion
func (tr Transformer) Execute() error { func (transformer Transformer) Execute() error {
if len(tr.Contracts) == 0 { if len(transformer.Contracts) == 0 {
return errors.New("error: transformer has no initialized contracts to work with") return errors.New("error: transformer has no initialized contracts to work with")
} }
// Iterate through all internal contracts // Iterate through all internal contracts
for _, con := range tr.Contracts { for _, con := range transformer.Contracts {
// Update converter with current contract // Update converter with current contract
tr.Update(con) transformer.Update(con)
// Iterate through contract filters and get watched event logs // Iterate through contract filters and get watched event logs
for eventSig, filter := range con.Filters { for eventSig, filter := range con.Filters {
watchedEvents, err := tr.GetWatchedEvents(filter.Name) watchedEvents, err := transformer.GetWatchedEvents(filter.Name)
if err != nil { if err != nil {
return err return err
} }
@ -202,7 +201,7 @@ func (tr Transformer) Execute() error {
// Iterate over watched event logs // Iterate over watched event logs
for _, we := range watchedEvents { for _, we := range watchedEvents {
// Convert them to our custom log type // Convert them to our custom log type
cstm, err := tr.Converter.Convert(*we, con.Events[eventSig]) cstm, err := transformer.Converter.Convert(*we, con.Events[eventSig])
if err != nil { if err != nil {
return err return err
} }
@ -212,7 +211,7 @@ func (tr Transformer) Execute() error {
// If log is not empty, immediately persist in repo // If log is not empty, immediately persist in repo
// Run this in seperate goroutine? // Run this in seperate goroutine?
err = tr.PersistLogs([]types.Log{*cstm}, con.Events[eventSig], con.Address, con.Name) err = transformer.PersistLogs([]types.Log{*cstm}, con.Events[eventSig], con.Address, con.Name)
if err != nil { if err != nil {
return err return err
} }
@ -223,7 +222,7 @@ func (tr Transformer) Execute() error {
// poller polls select contract methods // poller polls select contract methods
// and persists the results into custom pg tables // and persists the results into custom pg tables
// Run this in seperate goroutine? // Run this in seperate goroutine?
if err := tr.PollContract(*con); err != nil { if err := transformer.PollContract(*con); err != nil {
return err return err
} }
} }
@ -232,41 +231,41 @@ func (tr Transformer) Execute() error {
} }
// Used to set which contract addresses and which of their events to watch // Used to set which contract addresses and which of their events to watch
func (tr *Transformer) SetEvents(contractAddr string, filterSet []string) { func (transformer *Transformer) SetEvents(contractAddr string, filterSet []string) {
tr.WatchedEvents[strings.ToLower(contractAddr)] = filterSet transformer.WatchedEvents[strings.ToLower(contractAddr)] = filterSet
} }
// Used to set subset of account addresses to watch events for // Used to set subset of account addresses to watch events for
func (tr *Transformer) SetEventArgs(contractAddr string, filterSet []string) { func (transformer *Transformer) SetEventArgs(contractAddr string, filterSet []string) {
tr.EventArgs[strings.ToLower(contractAddr)] = filterSet transformer.EventArgs[strings.ToLower(contractAddr)] = filterSet
} }
// Used to set which contract addresses and which of their methods to call // Used to set which contract addresses and which of their methods to call
func (tr *Transformer) SetMethods(contractAddr string, filterSet []string) { func (transformer *Transformer) SetMethods(contractAddr string, filterSet []string) {
tr.WantedMethods[strings.ToLower(contractAddr)] = filterSet transformer.WantedMethods[strings.ToLower(contractAddr)] = filterSet
} }
// Used to set subset of account addresses to poll methods on // Used to set subset of account addresses to poll methods on
func (tr *Transformer) SetMethodArgs(contractAddr string, filterSet []string) { func (transformer *Transformer) SetMethodArgs(contractAddr string, filterSet []string) {
tr.MethodArgs[strings.ToLower(contractAddr)] = filterSet transformer.MethodArgs[strings.ToLower(contractAddr)] = filterSet
} }
// Used to set the block range to watch for a given address // Used to set the block range to watch for a given address
func (tr *Transformer) SetStartingBlock(contractAddr string, start int64) { func (transformer *Transformer) SetStartingBlock(contractAddr string, start int64) {
tr.ContractStart[strings.ToLower(contractAddr)] = start transformer.ContractStart[strings.ToLower(contractAddr)] = start
} }
// Used to set whether or not to persist an account address list // Used to set whether or not to persist an account address list
func (tr *Transformer) SetCreateAddrList(contractAddr string, on bool) { func (transformer *Transformer) SetCreateAddrList(contractAddr string, on bool) {
tr.CreateAddrList[strings.ToLower(contractAddr)] = on transformer.CreateAddrList[strings.ToLower(contractAddr)] = on
} }
// Used to set whether or not to persist an hash list // Used to set whether or not to persist an hash list
func (tr *Transformer) SetCreateHashList(contractAddr string, on bool) { func (transformer *Transformer) SetCreateHashList(contractAddr string, on bool) {
tr.CreateHashList[strings.ToLower(contractAddr)] = on transformer.CreateHashList[strings.ToLower(contractAddr)] = on
} }
// Used to turn method piping on for a contract // Used to turn method piping on for a contract
func (tr *Transformer) SetPiping(contractAddr string, on bool) { func (transformer *Transformer) SetPiping(contractAddr string, on bool) {
tr.Piping[strings.ToLower(contractAddr)] = on transformer.Piping[strings.ToLower(contractAddr)] = on
} }

View File

@ -54,7 +54,7 @@ func (c *converter) Update(info *contract.Contract) {
// Convert the given watched event log into a types.Log for the given event // Convert the given watched event log into a types.Log for the given event
func (c *converter) Convert(logs []gethTypes.Log, event types.Event, headerID int64) ([]types.Log, error) { func (c *converter) Convert(logs []gethTypes.Log, event types.Event, headerID int64) ([]types.Log, error) {
contract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) boundContract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil)
returnLogs := make([]types.Log, 0, len(logs)) returnLogs := make([]types.Log, 0, len(logs))
for _, log := range logs { for _, log := range logs {
values := make(map[string]interface{}) values := make(map[string]interface{})
@ -63,7 +63,7 @@ func (c *converter) Convert(logs []gethTypes.Log, event types.Event, headerID in
values[field.Name] = i values[field.Name] = i
} }
err := contract.UnpackLogIntoMap(values, event.Name, log) err := boundContract.UnpackLogIntoMap(values, event.Name, log)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -133,7 +133,7 @@ func (c *converter) Convert(logs []gethTypes.Log, event types.Event, headerID in
// Convert the given watched event logs into types.Logs; returns a map of event names to a slice of their converted logs // Convert the given watched event logs into types.Logs; returns a map of event names to a slice of their converted logs
func (c *converter) ConvertBatch(logs []gethTypes.Log, events map[string]types.Event, headerID int64) (map[string][]types.Log, error) { func (c *converter) ConvertBatch(logs []gethTypes.Log, events map[string]types.Event, headerID int64) (map[string][]types.Log, error) {
contract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) boundContract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil)
eventsToLogs := make(map[string][]types.Log) eventsToLogs := make(map[string][]types.Log)
for _, event := range events { for _, event := range events {
eventsToLogs[event.Name] = make([]types.Log, 0, len(logs)) eventsToLogs[event.Name] = make([]types.Log, 0, len(logs))
@ -142,7 +142,7 @@ func (c *converter) ConvertBatch(logs []gethTypes.Log, events map[string]types.E
// If the log is of this event type, process it as such // If the log is of this event type, process it as such
if event.Sig() == log.Topics[0] { if event.Sig() == log.Topics[0] {
values := make(map[string]interface{}) values := make(map[string]interface{})
err := contract.UnpackLogIntoMap(values, event.Name, log) err := boundContract.UnpackLogIntoMap(values, event.Name, log)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -72,6 +72,7 @@ var TusdContractAddress = "0x8dd5fbCe2F6a956C3022bA3663759011Dd51e73E"
var EnsContractAddress = "0x314159265dD8dbb310642f98f50C066173C1259b" var EnsContractAddress = "0x314159265dD8dbb310642f98f50C066173C1259b"
var PublicResolverAddress = "0x1da022710dF5002339274AaDEe8D58218e9D6AB5" var PublicResolverAddress = "0x1da022710dF5002339274AaDEe8D58218e9D6AB5"
// TODO: Consider whether these should be moved to plugins
// Contract Owner // Contract Owner
var DaiContractOwner = "0x0000000000000000000000000000000000000000" var DaiContractOwner = "0x0000000000000000000000000000000000000000"
var TusdContractOwner = "0x9978d2d229a69b3aef93420d132ab22b44e3578f" var TusdContractOwner = "0x9978d2d229a69b3aef93420d132ab22b44e3578f"

View File

@ -106,6 +106,7 @@ type Owner struct {
Address string `db:"returned"` Address string `db:"returned"`
} }
// TODO: consider whether this should be moved to libraries/shared
func SetupBC() core.BlockChain { func SetupBC() core.BlockChain {
infuraIPC := "https://mainnet.infura.io/v3/b09888c1113640cc9ab42750ce750c05" infuraIPC := "https://mainnet.infura.io/v3/b09888c1113640cc9ab42750ce750c05"
rawRpcClient, err := rpc.Dial(infuraIPC) rawRpcClient, err := rpc.Dial(infuraIPC)
@ -113,9 +114,9 @@ func SetupBC() core.BlockChain {
rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC) rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC)
ethClient := ethclient.NewClient(rawRpcClient) ethClient := ethclient.NewClient(rawRpcClient)
blockChainClient := client.NewEthClient(ethClient) blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient) blockChainNode := node.MakeNode(rpcClient)
transactionConverter := rpc2.NewRpcTransactionConverter(ethClient) transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter) blockChain := geth.NewBlockChain(blockChainClient, rpcClient, blockChainNode, transactionConverter)
return blockChain return blockChain
} }
@ -127,9 +128,9 @@ func SetupDBandBC() (*postgres.DB, core.BlockChain) {
rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC) rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC)
ethClient := ethclient.NewClient(rawRpcClient) ethClient := ethclient.NewClient(rawRpcClient)
blockChainClient := client.NewEthClient(ethClient) blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient) blockChainNode := node.MakeNode(rpcClient)
transactionConverter := rpc2.NewRpcTransactionConverter(ethClient) transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter) blockChain := geth.NewBlockChain(blockChainClient, rpcClient, blockChainNode, transactionConverter)
db, err := postgres.NewDB(config.Database{ db, err := postgres.NewDB(config.Database{
Hostname: "localhost", Hostname: "localhost",
@ -188,6 +189,7 @@ func SetupTusdContract(wantedEvents, wantedMethods []string) *contract.Contract
}.Init() }.Init()
} }
// TODO: consider whether this can be moved to plugin or libraries/shared
func SetupENSRepo(vulcanizeLogId *int64, wantedEvents, wantedMethods []string) (*postgres.DB, *contract.Contract) { func SetupENSRepo(vulcanizeLogId *int64, wantedEvents, wantedMethods []string) (*postgres.DB, *contract.Contract) {
db, err := postgres.NewDB(config.Database{ db, err := postgres.NewDB(config.Database{
Hostname: "localhost", Hostname: "localhost",

View File

@ -31,6 +31,7 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/geth/node" "github.com/vulcanize/vulcanizedb/pkg/geth/node"
) )
// TODO: consider whether this should be moved to libraries/shared
func SetupDBandBC() (*postgres.DB, core.BlockChain) { func SetupDBandBC() (*postgres.DB, core.BlockChain) {
infuraIPC := "http://kovan0.vulcanize.io:8545" infuraIPC := "http://kovan0.vulcanize.io:8545"
rawRpcClient, err := rpc.Dial(infuraIPC) rawRpcClient, err := rpc.Dial(infuraIPC)
@ -38,9 +39,9 @@ func SetupDBandBC() (*postgres.DB, core.BlockChain) {
rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC) rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC)
ethClient := ethclient.NewClient(rawRpcClient) ethClient := ethclient.NewClient(rawRpcClient)
blockChainClient := client.NewEthClient(ethClient) blockChainClient := client.NewEthClient(ethClient)
node := node.MakeNode(rpcClient) blockChainNode := node.MakeNode(rpcClient)
transactionConverter := rpc2.NewRpcTransactionConverter(ethClient) transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter) blockChain := geth.NewBlockChain(blockChainClient, rpcClient, blockChainNode, transactionConverter)
db, err := postgres.NewDB(config.Database{ db, err := postgres.NewDB(config.Database{
Hostname: "localhost", Hostname: "localhost",