diff --git a/README.md b/README.md index 6685af83..1299d4e3 100644 --- a/README.md +++ b/README.md @@ -79,7 +79,7 @@ Lastly, ensure that `GOPATH` is defined in your shell. If necessary, `GOPATH` ca - The default location is: - Mac: `/Library/Application\ Support/io.parity.ethereum/` - Linux: `/local/share/io.parity.ethereum/` - + - `levelDbPath` should match Geth's chaindata directory path. - The geth LevelDB chaindata path is printed to the console when you start geth. - The default location is: @@ -112,8 +112,8 @@ Syncs VulcanizeDB with the configured Ethereum node, populating only block heade This command is useful when you want a minimal baseline from which to track targeted data on the blockchain (e.g. individual smart contract storage values). 1. Start Ethereum node 1. In a separate terminal start VulcanizeDB: - - `./vulcanizedb lightSync --config --starting-block-number ` - + - `./vulcanizedb lightSync --config --starting-block-number ` + ## Start full environment in docker by single command ### Geth Rinkeby @@ -150,68 +150,68 @@ If you have full rinkeby chaindata you can move it to `rinkeby_vulcanizedb_geth_ ## Contract Watchers Contract watchers work with a light or full sync vDB to fetch raw ethereum data and execute a set of transformations over them, persisting the output. - + A watcher is composed of at least a fetcher and a transformer or set of transformers, where a fetcher is an interface for retrieving raw Ethereum data from some source (e.g. eth_jsonrpc, IPFS) and a transformer is an interface for filtering through that raw Ethereum data to extract, process, and persist data for specific contracts or accounts. -### omniWatcher +### omniWatcher The `omniWatcher` command is a built-in generic contract watcher. It can watch any and all events for a given contract provided the contract's ABI is available. -It also provides some state variable coverage by automating polling of public methods, with some restrictions. +It also provides some state variable coverage by automating polling of public methods, with some restrictions. This command requires a pre-synced (full or light) vulcanizeDB (see above sections) and currently requires the contract ABI be available on etherscan or provided by the user. - -To watch all events of a contract using a light synced vDB: - - Execute `./vulcanizedb omniWatcher --config --contract-address ` - -Or if you are using a full synced vDB, change the mode to full: - - Execute `./vulcanizedb omniWatcher --mode full --config --contract-address ` - -To watch contracts on a network other than mainnet, use the network flag: + +To watch all events of a contract using a light synced vDB: + - Execute `./vulcanizedb omniWatcher --config --contract-address ` + +Or if you are using a full synced vDB, change the mode to full: + - Execute `./vulcanizedb omniWatcher --mode full --config --contract-address ` + +To watch contracts on a network other than mainnet, use the network flag: - Execute `./vulcanizedb omniWatcher --config --contract-address --network ` - + To watch events starting at a certain block use the starting block flag: - Execute `./vulcanizedb omniWatcher --config --contract-address --starting-block-number <#>` - -To watch only specified events use the events flag: + +To watch only specified events use the events flag: - Execute `./vulcanizedb omniWatcher --config --contract-address --events --events ` - + To watch events and poll the specified methods with any addresses and hashes emitted by the watched events utilize the methods flag: - Execute `./vulcanizedb omniWatcher --config --contract-address --methods --methods ` - -To watch specified events and poll the specified method with any addresses and hashes emitted by the watched events: + +To watch specified events and poll the specified method with any addresses and hashes emitted by the watched events: - Execute `./vulcanizedb omniWatcher --config --contract-address --events --events --methods ` - + To turn on method piping so that values returned from previous method calls are cached and used as arguments in subsequent method calls: - Execute `./vulcanizedb omniWatcher --config --piping true --contract-address --events --events --methods ` - -To watch all types of events of the contract but only persist the ones that emit one of the filtered-for argument values: + +To watch all types of events of the contract but only persist the ones that emit one of the filtered-for argument values: - Execute `./vulcanizedb omniWatcher --config --contract-address --event-args --event-args ` - + To watch all events of the contract but only poll the specified method with specified argument values (if they are emitted from the watched events): - Execute `./vulcanizedb omniWatcher --config --contract-address --methods --method-args --method-args ` -#### omniWatcher output +#### omniWatcher output Transformed events and polled method results are committed to Postgres in schemas and tables generated according to the contract abi. -Schemas are created for each contract using the naming convention `_` +Schemas are created for each contract using the naming convention `_` Under this schema, tables are generated for watched events as `_event` and for polled methods as `_method` The 'method' and 'event' identifiers are tacked onto the end of the table names to prevent collisions between methods and events of the same lowercase name -Example: +Example: Running `./vulcanizedb omniWatcher --config --starting-block-number=5197514 --contract-address=0x8dd5fbce2f6a956c3022ba3663759011dd51e73e --events=Transfer --events=Mint --methods=balanceOf` watches Transfer and Mint events of the TrueUSD contract and polls its balanceOf method using the addresses we find emitted from those events -It produces and populates a schema with three tables: +It produces and populates a schema with three tables: -`light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.transfer_event` -`light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.mint_event` -`light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.balanceof_method` +`light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.transfer_event` +`light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.mint_event` +`light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.balanceof_method` Column ids and types for these tables are generated based on the event and method argument names and types and method return types, resulting in tables such as: -Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.transfer_event" +Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.transfer_event" | Column | Type | Collation | Nullable | Default | Storage | Stats target | Description |:----------:|:---------------------:|:---------:|:--------:|:-------------------------------------------------------------------------------------------:|:--------:|:------------:|:-----------:| @@ -224,9 +224,9 @@ Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.transfer_event" | from_ | character varying(66) | | not null | | extended | | | | to_ | character varying(66) | | not null | | extended | | | | value_ | numeric | | not null | | main | | | - -Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.balanceof_method" + +Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.balanceof_method" | Column | Type | Collation | Nullable | Default | Storage | Stats target | Description | |:----------:|:---------------------:|:---------:|:--------:|:-------------------------------------------------------------------------------------------:|:--------:|:------------:|:-----------:| @@ -235,32 +235,32 @@ Table "light_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e.balanceof_method" | block | integer | | not null | | plain | | | | who_ | character varying(66) | | not null | | extended | | | | returned | numeric | | not null | | main | | | - -The addition of '_' after table names is to prevent collisions with reserved Postgres words + +The addition of '_' after table names is to prevent collisions with reserved Postgres words -### composeAndExecute +### composeAndExecute The `composeAndExecute` command is used to compose and execute over an arbitrary set of custom transformers. This is accomplished by generating a Go pluggin which allows our `vulcanizedb` binary to link to external transformers, so long as they abide by our standard [interfaces](https://github.com/vulcanize/maker-vulcanizedb/tree/compose_and_execute/libraries/shared/transformer). -This command requires Go 1.11+ and [Go plugins](https://golang.org/pkg/plugin/) only work on Unix based systems. +This command requires Go 1.11+ and [Go plugins](https://golang.org/pkg/plugin/) only work on Unix based systems. -#### Writing custom transformers -Storage Transformers +#### Writing custom transformers +Storage Transformers * [Guide](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/factories/storage/README.md) * [Example](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/factories/storage/EXAMPLE.md) - -Event Transformers + +Event Transformers * Guide * Example - + #### composeAndExecute configuration -A .toml config file is specified when executing the command: -`./vulcanizedb composeAndExecute --config=./environments/config_name.toml` +A .toml config file is specified when executing the command: +`./vulcanizedb composeAndExecute --config=./environments/config_name.toml` -The config provides information for composing a set of transformers: +The config provides information for composing a set of transformers: -```toml +```toml [database] name = "vulcanize_public" hostname = "localhost" @@ -316,14 +316,14 @@ The config provides information for composing a set of transformers: - `eth_event` indicates the transformer works with the [event watcher](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/watcher/event_watcher.go) that fetches event logs from an ETH node - `migrations` is the relative path from `repository` to the db migrations directory for the transformer -- Note: If any of the imported transformers need additional config variables those need to be included as well +- Note: If any of the imported transformers need additional config variables those need to be included as well This information is used to write and build a Go plugin which exports the configured transformers. -These transformers are loaded onto their specified watchers and executed. +These transformers are loaded onto their specified watchers and executed. Transformers of different types can be run together in the same command using a single config file or in separate instances using different config files -The general structure of a plugin .go file, and what we would see built with the above config is shown below +The general structure of a plugin .go file, and what we would see built with the above config is shown below ```go package main @@ -352,7 +352,7 @@ func (e exporter) Export() []interface1.TransformerInitializer, []interface1.Sto ``` #### Preparing transformer(s) to work as pluggins for composeAndExecute -To plug in an external transformer we need to: +To plug in an external transformer we need to: * Create a [package](https://github.com/vulcanize/mcd_transformers/blob/staging/transformers/bite/initializer/initializer.go) that exports a variable `TransformerInitializer` or `StorageTransformerInitializer` that are of type [TransformerInitializer](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/transformer/event_transformer.go#L33) @@ -361,7 +361,7 @@ or [StorageTransformerInitializer](https://github.com/vulcanize/maker-vulcanized or [storage](https://github.com/vulcanize/maker-vulcanizedb/blob/compose_and_execute/libraries/shared/watcher/storage_watcher.go#L53) watchers * Create db migrations to run against vulcanizeDB so that we can store the transformer output * Specify migration locations for each transformer in the config with the `exporter.transformer.migrations` fields - * Do not `goose fix` the transformer migrations + * Do not `goose fix` the transformer migrations To update a plugin repository with changes to the core vulcanizedb repository, replace the vulcanizedb vendored in the plugin repo (`plugin_repo/vendor/github.com/vulcanize/vulcanizedb`) with the newly updated version diff --git a/integration_test/omni_full_transformer.go b/integration_test/omni_full_transformer.go index fcbfdde6..5346584d 100644 --- a/integration_test/omni_full_transformer.go +++ b/integration_test/omni_full_transformer.go @@ -2,9 +2,14 @@ package integration import ( "fmt" + "math/rand" + "strings" + "time" + "github.com/ethereum/go-ethereum/common" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/vulcanize/vulcanizedb/pkg/core" "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres" "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories" @@ -12,9 +17,6 @@ import ( "github.com/vulcanize/vulcanizedb/pkg/omni/shared/constants" "github.com/vulcanize/vulcanizedb/pkg/omni/shared/helpers/test_helpers" "github.com/vulcanize/vulcanizedb/pkg/omni/shared/helpers/test_helpers/mocks" - "math/rand" - "strings" - "time" ) var _ = Describe("Omni full transformer", func() { diff --git a/libraries/shared/README.md b/libraries/shared/README.md new file mode 100644 index 00000000..be4f8ac2 --- /dev/null +++ b/libraries/shared/README.md @@ -0,0 +1,8 @@ +# Shared Tools + +## Description +Code that is useful for or used by plugins writeen on top of VulcanizeDB. + +## Note +Much code in this directory may not be used outside of the tests, but don't delete it - it could be used by a plugin. +Renaming and/or deleting functions in this namespace requires a version bump to avoid breaking plugins. \ No newline at end of file diff --git a/libraries/shared/chunker/log_chunker_test.go b/libraries/shared/chunker/log_chunker_test.go index 9a724485..27e73c24 100644 --- a/libraries/shared/chunker/log_chunker_test.go +++ b/libraries/shared/chunker/log_chunker_test.go @@ -58,9 +58,9 @@ var _ = Describe("Log chunker", func() { Describe("initialisation", func() { It("creates lookup maps correctly", func() { Expect(chunker.AddressToNames).To(Equal(map[string][]string{ - "0x00000000000000000000000000000000000000a1": []string{"TransformerA"}, - "0x00000000000000000000000000000000000000a2": []string{"TransformerA", "TransformerC"}, - "0x00000000000000000000000000000000000000b1": []string{"TransformerB"}, + "0x00000000000000000000000000000000000000a1": {"TransformerA"}, + "0x00000000000000000000000000000000000000a2": {"TransformerA", "TransformerC"}, + "0x00000000000000000000000000000000000000b1": {"TransformerB"}, })) Expect(chunker.NameToTopic0).To(Equal(map[string]common.Hash{ diff --git a/libraries/shared/constants/data.go b/libraries/shared/constants/data.go index e55ae76c..1e87e5ee 100644 --- a/libraries/shared/constants/data.go +++ b/libraries/shared/constants/data.go @@ -18,5 +18,6 @@ package constants var DataItemLength = 32 -// TODO Grab this from DB, since it can change through governance +// TODO: Move to Plugin +// TODO: Grab this from DB, since it can change through governance var TTL = int64(10800) // 60 * 60 * 3 == 10800 seconds == 3 hours diff --git a/libraries/shared/repository/repository.go b/libraries/shared/repository/repository.go index 04d9ea4c..fe8aaa4f 100644 --- a/libraries/shared/repository/repository.go +++ b/libraries/shared/repository/repository.go @@ -156,6 +156,7 @@ func CreateNotCheckedSQL(boolColumns []string, recheckHeaders constants.Transfor return result.String() } +// TODO: Move to plugin func GetTicInTx(headerID int64, tx *sql.Tx) (int64, error) { var blockTimestamp int64 err := tx.QueryRow(`SELECT block_timestamp FROM public.headers WHERE id = $1;`, headerID).Scan(&blockTimestamp) diff --git a/libraries/shared/repository/repository_utility_test.go b/libraries/shared/repository/repository_test.go similarity index 51% rename from libraries/shared/repository/repository_utility_test.go rename to libraries/shared/repository/repository_test.go index 09791ec0..a3e0dbbf 100644 --- a/libraries/shared/repository/repository_utility_test.go +++ b/libraries/shared/repository/repository_test.go @@ -19,6 +19,7 @@ package repository_test import ( "fmt" "math/rand" + "strconv" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -34,7 +35,83 @@ import ( "github.com/vulcanize/vulcanizedb/test_config" ) -var _ = Describe("Repository utilities", func() { +var _ = Describe("Repository", func() { + Describe("MarkHeaderChecked", func() { + var ( + checkedHeadersColumn string + db *postgres.DB + ) + + BeforeEach(func() { + db = test_config.NewTestDB(test_config.NewTestNode()) + test_config.CleanTestDB(db) + + checkedHeadersColumn = "test_column_checked" + _, migrateErr := db.Exec(`ALTER TABLE public.checked_headers + ADD COLUMN ` + checkedHeadersColumn + ` integer`) + Expect(migrateErr).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _, cleanupMigrateErr := db.Exec(`ALTER TABLE public.checked_headers DROP COLUMN ` + checkedHeadersColumn) + Expect(cleanupMigrateErr).NotTo(HaveOccurred()) + }) + + It("marks passed column as checked for passed header", func() { + headerRepository := repositories.NewHeaderRepository(db) + headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader) + Expect(headerErr).NotTo(HaveOccurred()) + + err := shared.MarkHeaderChecked(headerID, db, checkedHeadersColumn) + + Expect(err).NotTo(HaveOccurred()) + var checkedCount int + fetchErr := db.Get(&checkedCount, `SELECT `+checkedHeadersColumn+` FROM public.checked_headers LIMIT 1`) + Expect(fetchErr).NotTo(HaveOccurred()) + Expect(checkedCount).To(Equal(1)) + }) + }) + + Describe("MarkHeaderCheckedInTransaction", func() { + var ( + checkedHeadersColumn string + db *postgres.DB + ) + + BeforeEach(func() { + db = test_config.NewTestDB(test_config.NewTestNode()) + test_config.CleanTestDB(db) + + checkedHeadersColumn = "test_column_checked" + _, migrateErr := db.Exec(`ALTER TABLE public.checked_headers + ADD COLUMN ` + checkedHeadersColumn + ` integer`) + Expect(migrateErr).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _, cleanupMigrateErr := db.Exec(`ALTER TABLE public.checked_headers DROP COLUMN ` + checkedHeadersColumn) + Expect(cleanupMigrateErr).NotTo(HaveOccurred()) + }) + + It("marks passed column as checked for passed header within a passed transaction", func() { + headerRepository := repositories.NewHeaderRepository(db) + headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader) + Expect(headerErr).NotTo(HaveOccurred()) + tx, txErr := db.Begin() + Expect(txErr).NotTo(HaveOccurred()) + + err := shared.MarkHeaderCheckedInTransaction(headerID, tx, checkedHeadersColumn) + + Expect(err).NotTo(HaveOccurred()) + commitErr := tx.Commit() + Expect(commitErr).NotTo(HaveOccurred()) + var checkedCount int + fetchErr := db.Get(&checkedCount, `SELECT `+checkedHeadersColumn+` FROM public.checked_headers LIMIT 1`) + Expect(fetchErr).NotTo(HaveOccurred()) + Expect(checkedCount).To(Equal(1)) + }) + }) + Describe("MissingHeaders", func() { var ( db *postgres.DB @@ -116,6 +193,84 @@ var _ = Describe("Repository utilities", func() { }) }) + Describe("RecheckHeaders", func() { + var ( + checkedHeadersColumn string + db *postgres.DB + headerOneID, headerTwoID, headerThreeID, headerFourID int64 + headerOneErr, headerTwoErr, headerThreeErr, headerFourErr error + ) + + BeforeEach(func() { + db = test_config.NewTestDB(test_config.NewTestNode()) + test_config.CleanTestDB(db) + + // create header checked column + checkedHeadersColumn = "test_column_checked" + _, migrateErr := db.Exec(`ALTER TABLE public.checked_headers ADD COLUMN ` + checkedHeadersColumn + ` integer`) + Expect(migrateErr).NotTo(HaveOccurred()) + + // create headers + headerRepository := repositories.NewHeaderRepository(db) + headerOneID, headerOneErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(1)) + Expect(headerOneErr).NotTo(HaveOccurred()) + headerTwoID, headerTwoErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(2)) + Expect(headerTwoErr).NotTo(HaveOccurred()) + headerThreeID, headerThreeErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(3)) + Expect(headerThreeErr).NotTo(HaveOccurred()) + headerFourID, headerFourErr = headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(4)) + Expect(headerFourErr).NotTo(HaveOccurred()) + + // mark every header checked at least once, with one fully rechecked (headerThree) + maxCheckCount, intConversionErr := strconv.Atoi(constants.RecheckHeaderCap) + Expect(intConversionErr).NotTo(HaveOccurred()) + _, markHeaderOneCheckedErr := db.Exec( + `INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`, + headerOneID, maxCheckCount) + Expect(markHeaderOneCheckedErr).NotTo(HaveOccurred()) + _, markHeaderTwoCheckedErr := db.Exec( + `INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`, + headerTwoID, maxCheckCount) + Expect(markHeaderTwoCheckedErr).NotTo(HaveOccurred()) + _, markHeaderThreeCheckedErr := db.Exec( + `INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`, + headerThreeID, maxCheckCount+1) + Expect(markHeaderThreeCheckedErr).NotTo(HaveOccurred()) + _, markHeaderFourCheckedErr := db.Exec( + `INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`) VALUES ($1, $2)`, + headerFourID, maxCheckCount) + Expect(markHeaderFourCheckedErr).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _, cleanupMigrateErr := db.Exec(`ALTER TABLE public.checked_headers DROP COLUMN ` + checkedHeadersColumn) + Expect(cleanupMigrateErr).NotTo(HaveOccurred()) + }) + + Describe("when no ending block number (ending block number == -1)", func() { + It("returns all headers since starting block where checked count is less than cap", func() { + headers, err := shared.RecheckHeaders(1, -1, db, checkedHeadersColumn) + + Expect(err).NotTo(HaveOccurred()) + Expect(len(headers)).To(Equal(3)) + Expect(headers[0].Id).To(Or(Equal(headerOneID), Equal(headerTwoID), Equal(headerFourID))) + Expect(headers[1].Id).To(Or(Equal(headerOneID), Equal(headerTwoID), Equal(headerFourID))) + Expect(headers[2].Id).To(Or(Equal(headerOneID), Equal(headerTwoID), Equal(headerFourID))) + }) + }) + + Describe("when ending block number specified", func() { + It("returns headers between starting and ending block where checked count is less than cap", func() { + headers, err := shared.RecheckHeaders(1, 3, db, checkedHeadersColumn) + + Expect(err).NotTo(HaveOccurred()) + Expect(len(headers)).To(Equal(2)) + Expect(headers[0].Id).To(Or(Equal(headerOneID), Equal(headerTwoID))) + Expect(headers[1].Id).To(Or(Equal(headerOneID), Equal(headerTwoID))) + }) + }) + }) + Describe("GetCheckedColumnNames", func() { It("gets the column names from checked_headers", func() { db := test_config.NewTestDB(test_config.NewTestNode()) @@ -162,33 +317,9 @@ var _ = Describe("Repository utilities", func() { func getExpectedColumnNames() []string { return []string{ - "price_feeds_checked", - "flip_kick_checked", - "frob_checked", - "tend_checked", - "bite_checked", - "dent_checked", - "pit_file_debt_ceiling_checked", - "pit_file_ilk_checked", - "vat_init_checked", - "drip_file_ilk_checked", - "drip_file_repo_checked", - "drip_file_vow_checked", - "deal_checked", - "drip_drip_checked", - "cat_file_chop_lump_checked", - "cat_file_flip_checked", - "cat_file_pit_vow_checked", - "flop_kick_checked", - "vat_move_checked", - "vat_fold_checked", - "vat_heal_checked", - "vat_toll_checked", - "vat_tune_checked", - "vat_grab_checked", - "vat_flux_checked", - "vat_slip_checked", - "vow_flog_checked", - "flap_kick_checked", + "column_1_checked", + "column_2_checked", + "column_3_checked", + "column_4_checked", } } diff --git a/libraries/shared/storage/mappings_test.go b/libraries/shared/storage/mappings_test.go new file mode 100644 index 00000000..9cc271e3 --- /dev/null +++ b/libraries/shared/storage/mappings_test.go @@ -0,0 +1,68 @@ +package storage_test + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/vulcanize/vulcanizedb/libraries/shared/storage" + "math/big" +) + +var _ = Describe("Mappings", func() { + Describe("GetMapping", func() { + It("returns the storage key for a mapping when passed the mapping's index on the contract and the desired value's key", func() { + // ex. solidity: + // mapping (bytes32 => uint) public amounts + // pass in the index of the mapping on the contract + the bytes32 key for the uint val being looked up + indexOfMappingOnContract := storage.IndexZero + keyForDesiredValueInMapping := "fake_bytes32" + + storageKey := storage.GetMapping(indexOfMappingOnContract, keyForDesiredValueInMapping) + + expectedStorageKeyBytes := crypto.Keccak256(common.FromHex(keyForDesiredValueInMapping + indexOfMappingOnContract)) + expectedStorageKey := common.BytesToHash(expectedStorageKeyBytes) + Expect(storageKey).To(Equal(expectedStorageKey)) + }) + }) + + Describe("GetNestedMapping", func() { + It("returns the storage key for a nested mapping when passed the mapping's index on the contract and the desired value's keys", func() { + // ex. solidity: + // mapping (address => mapping (uint => bytes32)) public addressNames + // pass in the index of the mapping on the contract + the address and uint keys for the bytes32 val being looked up + indexOfMappingOnContract := storage.IndexOne + keyForOuterMapping := "fake_address" + keyForInnerMapping := "123" + + storageKey := storage.GetNestedMapping(indexOfMappingOnContract, keyForOuterMapping, keyForInnerMapping) + + hashedOuterMappingStorageKey := crypto.Keccak256(common.FromHex(keyForOuterMapping + indexOfMappingOnContract)) + fullStorageKeyBytes := crypto.Keccak256(common.FromHex(keyForInnerMapping), hashedOuterMappingStorageKey) + expectedStorageKey := common.BytesToHash(fullStorageKeyBytes) + Expect(storageKey).To(Equal(expectedStorageKey)) + }) + }) + + Describe("GetIncrementedKey", func() { + It("returns the storage key for later values sharing an index on the contract with other earlier values", func() { + // ex. solidity: + // struct Data { + // uint256 quantity; + // uint256 quality; + // } + // mapping (bytes32 => Data) public itemData; + // pass in the storage key for the zero-indexed value ("quantity") + the number of increments required. + // (For "quality", we must increment the storage key for the corresponding "quantity" by 1). + indexOfMappingOnContract := storage.IndexTwo + keyForDesiredValueInMapping := "fake_bytes32" + storageKeyForFirstPropertyOnStruct := storage.GetMapping(indexOfMappingOnContract, keyForDesiredValueInMapping) + + storageKey := storage.GetIncrementedKey(storageKeyForFirstPropertyOnStruct, 1) + + incrementedStorageKey := storageKeyForFirstPropertyOnStruct.Big().Add(storageKeyForFirstPropertyOnStruct.Big(), big.NewInt(1)) + expectedStorageKey := common.BytesToHash(incrementedStorageKey.Bytes()) + Expect(storageKey).To(Equal(expectedStorageKey)) + }) + }) +}) diff --git a/libraries/shared/storage/utils/value_test.go b/libraries/shared/storage/utils/value_test.go new file mode 100644 index 00000000..6af20499 --- /dev/null +++ b/libraries/shared/storage/utils/value_test.go @@ -0,0 +1,22 @@ +package utils_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils" +) + +var _ = Describe("Storage value metadata getter", func() { + It("returns a storage value metadata instance with corresponding fields assigned", func() { + metadataName := "fake_name" + metadataKeys := map[utils.Key]string{"key": "value"} + metadataType := utils.Uint256 + + expectedMetadata := utils.StorageValueMetadata{ + Name: metadataName, + Keys: metadataKeys, + Type: metadataType, + } + Expect(utils.GetStorageValueMetadata(metadataName, metadataKeys, metadataType)).To(Equal(expectedMetadata)) + }) +}) diff --git a/libraries/shared/test_data/generic.go b/libraries/shared/test_data/generic.go index d4a34a7a..9dd53ad7 100644 --- a/libraries/shared/test_data/generic.go +++ b/libraries/shared/test_data/generic.go @@ -49,7 +49,7 @@ var GenericTestConfig = transformer.TransformerConfig{ } func randomString(length int) string { - var seededRand *rand.Rand = rand.New( + var seededRand = rand.New( rand.NewSource(time.Now().UnixNano())) charset := "abcdefghijklmnopqrstuvwxyz1234567890" b := make([]byte, length) diff --git a/libraries/shared/watcher/storage_watcher.go b/libraries/shared/watcher/storage_watcher.go index 87d078a0..8b871e5b 100644 --- a/libraries/shared/watcher/storage_watcher.go +++ b/libraries/shared/watcher/storage_watcher.go @@ -50,8 +50,8 @@ func NewStorageWatcher(tailer fs.Tailer, db *postgres.DB) StorageWatcher { func (watcher StorageWatcher) AddTransformers(initializers []transformer.StorageTransformerInitializer) { for _, initializer := range initializers { - transformer := initializer(watcher.db) - watcher.Transformers[transformer.ContractAddress()] = transformer + storageTransformer := initializer(watcher.db) + watcher.Transformers[storageTransformer.ContractAddress()] = storageTransformer } } @@ -65,12 +65,12 @@ func (watcher StorageWatcher) Execute() error { if parseErr != nil { return parseErr } - transformer, ok := watcher.Transformers[row.Contract] + storageTransformer, ok := watcher.Transformers[row.Contract] if !ok { logrus.Warn(utils.ErrContractNotFound{Contract: row.Contract.Hex()}.Error()) continue } - executeErr := transformer.Execute(row) + executeErr := storageTransformer.Execute(row) if executeErr != nil { if isKeyNotFound(executeErr) { queueErr := watcher.Queue.Add(row) diff --git a/pkg/omni/full/converter/converter.go b/pkg/omni/full/converter/converter.go index dd5b9725..cabba692 100644 --- a/pkg/omni/full/converter/converter.go +++ b/pkg/omni/full/converter/converter.go @@ -55,10 +55,10 @@ func (c *converter) Update(info *contract.Contract) { // Convert the given watched event log into a types.Log for the given event func (c *converter) Convert(watchedEvent core.WatchedEvent, event types.Event) (*types.Log, error) { - contract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) + boundContract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) values := make(map[string]interface{}) log := helpers.ConvertToLog(watchedEvent) - err := contract.UnpackLogIntoMap(values, event.Name, log) + err := boundContract.UnpackLogIntoMap(values, event.Name, log) if err != nil { return nil, err } diff --git a/pkg/omni/full/transformer/transformer.go b/pkg/omni/full/transformer/transformer.go index c4f6cedf..7e8e1528 100644 --- a/pkg/omni/full/transformer/transformer.go +++ b/pkg/omni/full/transformer/transformer.go @@ -101,60 +101,59 @@ func NewTransformer(network string, BC core.BlockChain, DB *postgres.DB) *Transf // Loops over all of the addr => filter sets // Uses parser to pull event info from abi // Use this info to generate event filters -func (t *Transformer) Init() error { - for contractAddr, subset := range t.WatchedEvents { +func (transformer *Transformer) Init() error { + for contractAddr, subset := range transformer.WatchedEvents { // Get Abi - err := t.Parser.Parse(contractAddr) + err := transformer.Parser.Parse(contractAddr) if err != nil { return err } // Get first block and most recent block number in the header repo - firstBlock, err := t.BlockRetriever.RetrieveFirstBlock(contractAddr) + firstBlock, err := transformer.BlockRetriever.RetrieveFirstBlock(contractAddr) if err != nil { return err } - lastBlock, err := t.BlockRetriever.RetrieveMostRecentBlock() + lastBlock, err := transformer.BlockRetriever.RetrieveMostRecentBlock() if err != nil { return err } // Set to specified range if it falls within the bounds - if firstBlock < t.ContractStart[contractAddr] { - firstBlock = t.ContractStart[contractAddr] + if firstBlock < transformer.ContractStart[contractAddr] { + firstBlock = transformer.ContractStart[contractAddr] } // Get contract name if it has one var name = new(string) - t.Poller.FetchContractData(t.Abi(), contractAddr, "name", nil, name, lastBlock) + transformer.Poller.FetchContractData(transformer.Abi(), contractAddr, "name", nil, name, lastBlock) // Remove any potential accidental duplicate inputs in arg filter values eventArgs := map[string]bool{} - for _, arg := range t.EventArgs[contractAddr] { + for _, arg := range transformer.EventArgs[contractAddr] { eventArgs[arg] = true } methodArgs := map[string]bool{} - for _, arg := range t.MethodArgs[contractAddr] { + for _, arg := range transformer.MethodArgs[contractAddr] { methodArgs[arg] = true } // Aggregate info into contract object info := contract.Contract{ - Name: *name, - Network: t.Network, - Address: contractAddr, - Abi: t.Parser.Abi(), - ParsedAbi: t.Parser.ParsedAbi(), - StartingBlock: firstBlock, - LastBlock: lastBlock, - // TODO: consider whether this duplicated knowledge from t.WatchedEvents - Events: t.Parser.GetEvents(subset), - Methods: t.Parser.GetSelectMethods(t.WantedMethods[contractAddr]), + Name: *name, + Network: transformer.Network, + Address: contractAddr, + Abi: transformer.Parser.Abi(), + ParsedAbi: transformer.Parser.ParsedAbi(), + StartingBlock: firstBlock, + LastBlock: lastBlock, + Events: transformer.Parser.GetEvents(subset), + Methods: transformer.Parser.GetSelectMethods(transformer.WantedMethods[contractAddr]), FilterArgs: eventArgs, MethodArgs: methodArgs, - CreateAddrList: t.CreateAddrList[contractAddr], - CreateHashList: t.CreateHashList[contractAddr], - Piping: t.Piping[contractAddr], + CreateAddrList: transformer.CreateAddrList[contractAddr], + CreateHashList: transformer.CreateHashList[contractAddr], + Piping: transformer.Piping[contractAddr], }.Init() // Use info to create filters @@ -165,14 +164,14 @@ func (t *Transformer) Init() error { // Iterate over filters and push them to the repo using filter repository interface for _, filter := range info.Filters { - err = t.FilterRepository.CreateFilter(filter) + err = transformer.FilterRepository.CreateFilter(filter) if err != nil { return err } } // Store contract info for further processing - t.Contracts[contractAddr] = info + transformer.Contracts[contractAddr] = info } return nil @@ -183,18 +182,18 @@ func (t *Transformer) Init() error { // Uses converter to convert logs into custom log type // Persists converted logs into custuom postgres tables // Calls selected methods, using token holder address generated during event log conversion -func (tr Transformer) Execute() error { - if len(tr.Contracts) == 0 { +func (transformer Transformer) Execute() error { + if len(transformer.Contracts) == 0 { return errors.New("error: transformer has no initialized contracts to work with") } // Iterate through all internal contracts - for _, con := range tr.Contracts { + for _, con := range transformer.Contracts { // Update converter with current contract - tr.Update(con) + transformer.Update(con) // Iterate through contract filters and get watched event logs for eventSig, filter := range con.Filters { - watchedEvents, err := tr.GetWatchedEvents(filter.Name) + watchedEvents, err := transformer.GetWatchedEvents(filter.Name) if err != nil { return err } @@ -202,7 +201,7 @@ func (tr Transformer) Execute() error { // Iterate over watched event logs for _, we := range watchedEvents { // Convert them to our custom log type - cstm, err := tr.Converter.Convert(*we, con.Events[eventSig]) + cstm, err := transformer.Converter.Convert(*we, con.Events[eventSig]) if err != nil { return err } @@ -212,7 +211,7 @@ func (tr Transformer) Execute() error { // If log is not empty, immediately persist in repo // Run this in seperate goroutine? - err = tr.PersistLogs([]types.Log{*cstm}, con.Events[eventSig], con.Address, con.Name) + err = transformer.PersistLogs([]types.Log{*cstm}, con.Events[eventSig], con.Address, con.Name) if err != nil { return err } @@ -223,7 +222,7 @@ func (tr Transformer) Execute() error { // poller polls select contract methods // and persists the results into custom pg tables // Run this in seperate goroutine? - if err := tr.PollContract(*con); err != nil { + if err := transformer.PollContract(*con); err != nil { return err } } @@ -232,41 +231,41 @@ func (tr Transformer) Execute() error { } // Used to set which contract addresses and which of their events to watch -func (tr *Transformer) SetEvents(contractAddr string, filterSet []string) { - tr.WatchedEvents[strings.ToLower(contractAddr)] = filterSet +func (transformer *Transformer) SetEvents(contractAddr string, filterSet []string) { + transformer.WatchedEvents[strings.ToLower(contractAddr)] = filterSet } // Used to set subset of account addresses to watch events for -func (tr *Transformer) SetEventArgs(contractAddr string, filterSet []string) { - tr.EventArgs[strings.ToLower(contractAddr)] = filterSet +func (transformer *Transformer) SetEventArgs(contractAddr string, filterSet []string) { + transformer.EventArgs[strings.ToLower(contractAddr)] = filterSet } // Used to set which contract addresses and which of their methods to call -func (tr *Transformer) SetMethods(contractAddr string, filterSet []string) { - tr.WantedMethods[strings.ToLower(contractAddr)] = filterSet +func (transformer *Transformer) SetMethods(contractAddr string, filterSet []string) { + transformer.WantedMethods[strings.ToLower(contractAddr)] = filterSet } // Used to set subset of account addresses to poll methods on -func (tr *Transformer) SetMethodArgs(contractAddr string, filterSet []string) { - tr.MethodArgs[strings.ToLower(contractAddr)] = filterSet +func (transformer *Transformer) SetMethodArgs(contractAddr string, filterSet []string) { + transformer.MethodArgs[strings.ToLower(contractAddr)] = filterSet } // Used to set the block range to watch for a given address -func (tr *Transformer) SetStartingBlock(contractAddr string, start int64) { - tr.ContractStart[strings.ToLower(contractAddr)] = start +func (transformer *Transformer) SetStartingBlock(contractAddr string, start int64) { + transformer.ContractStart[strings.ToLower(contractAddr)] = start } // Used to set whether or not to persist an account address list -func (tr *Transformer) SetCreateAddrList(contractAddr string, on bool) { - tr.CreateAddrList[strings.ToLower(contractAddr)] = on +func (transformer *Transformer) SetCreateAddrList(contractAddr string, on bool) { + transformer.CreateAddrList[strings.ToLower(contractAddr)] = on } // Used to set whether or not to persist an hash list -func (tr *Transformer) SetCreateHashList(contractAddr string, on bool) { - tr.CreateHashList[strings.ToLower(contractAddr)] = on +func (transformer *Transformer) SetCreateHashList(contractAddr string, on bool) { + transformer.CreateHashList[strings.ToLower(contractAddr)] = on } // Used to turn method piping on for a contract -func (tr *Transformer) SetPiping(contractAddr string, on bool) { - tr.Piping[strings.ToLower(contractAddr)] = on +func (transformer *Transformer) SetPiping(contractAddr string, on bool) { + transformer.Piping[strings.ToLower(contractAddr)] = on } diff --git a/pkg/omni/light/converter/converter.go b/pkg/omni/light/converter/converter.go index 3deba288..e8a91ceb 100644 --- a/pkg/omni/light/converter/converter.go +++ b/pkg/omni/light/converter/converter.go @@ -54,7 +54,7 @@ func (c *converter) Update(info *contract.Contract) { // Convert the given watched event log into a types.Log for the given event func (c *converter) Convert(logs []gethTypes.Log, event types.Event, headerID int64) ([]types.Log, error) { - contract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) + boundContract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) returnLogs := make([]types.Log, 0, len(logs)) for _, log := range logs { values := make(map[string]interface{}) @@ -63,7 +63,7 @@ func (c *converter) Convert(logs []gethTypes.Log, event types.Event, headerID in values[field.Name] = i } - err := contract.UnpackLogIntoMap(values, event.Name, log) + err := boundContract.UnpackLogIntoMap(values, event.Name, log) if err != nil { return nil, err } @@ -133,7 +133,7 @@ func (c *converter) Convert(logs []gethTypes.Log, event types.Event, headerID in // Convert the given watched event logs into types.Logs; returns a map of event names to a slice of their converted logs func (c *converter) ConvertBatch(logs []gethTypes.Log, events map[string]types.Event, headerID int64) (map[string][]types.Log, error) { - contract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) + boundContract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil) eventsToLogs := make(map[string][]types.Log) for _, event := range events { eventsToLogs[event.Name] = make([]types.Log, 0, len(logs)) @@ -142,7 +142,7 @@ func (c *converter) ConvertBatch(logs []gethTypes.Log, events map[string]types.E // If the log is of this event type, process it as such if event.Sig() == log.Topics[0] { values := make(map[string]interface{}) - err := contract.UnpackLogIntoMap(values, event.Name, log) + err := boundContract.UnpackLogIntoMap(values, event.Name, log) if err != nil { return nil, err } diff --git a/pkg/omni/shared/constants/constants.go b/pkg/omni/shared/constants/constants.go index 594e0fce..e5a9edfe 100644 --- a/pkg/omni/shared/constants/constants.go +++ b/pkg/omni/shared/constants/constants.go @@ -72,6 +72,7 @@ var TusdContractAddress = "0x8dd5fbCe2F6a956C3022bA3663759011Dd51e73E" var EnsContractAddress = "0x314159265dD8dbb310642f98f50C066173C1259b" var PublicResolverAddress = "0x1da022710dF5002339274AaDEe8D58218e9D6AB5" +// TODO: Consider whether these should be moved to plugins // Contract Owner var DaiContractOwner = "0x0000000000000000000000000000000000000000" var TusdContractOwner = "0x9978d2d229a69b3aef93420d132ab22b44e3578f" diff --git a/pkg/omni/shared/helpers/test_helpers/database.go b/pkg/omni/shared/helpers/test_helpers/database.go index 1b9558b4..5b81dc83 100644 --- a/pkg/omni/shared/helpers/test_helpers/database.go +++ b/pkg/omni/shared/helpers/test_helpers/database.go @@ -106,6 +106,7 @@ type Owner struct { Address string `db:"returned"` } +// TODO: consider whether this should be moved to libraries/shared func SetupBC() core.BlockChain { infuraIPC := "https://mainnet.infura.io/v3/b09888c1113640cc9ab42750ce750c05" rawRpcClient, err := rpc.Dial(infuraIPC) @@ -113,9 +114,9 @@ func SetupBC() core.BlockChain { rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC) ethClient := ethclient.NewClient(rawRpcClient) blockChainClient := client.NewEthClient(ethClient) - node := node.MakeNode(rpcClient) + blockChainNode := node.MakeNode(rpcClient) transactionConverter := rpc2.NewRpcTransactionConverter(ethClient) - blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter) + blockChain := geth.NewBlockChain(blockChainClient, rpcClient, blockChainNode, transactionConverter) return blockChain } @@ -127,9 +128,9 @@ func SetupDBandBC() (*postgres.DB, core.BlockChain) { rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC) ethClient := ethclient.NewClient(rawRpcClient) blockChainClient := client.NewEthClient(ethClient) - node := node.MakeNode(rpcClient) + blockChainNode := node.MakeNode(rpcClient) transactionConverter := rpc2.NewRpcTransactionConverter(ethClient) - blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter) + blockChain := geth.NewBlockChain(blockChainClient, rpcClient, blockChainNode, transactionConverter) db, err := postgres.NewDB(config.Database{ Hostname: "localhost", @@ -188,6 +189,7 @@ func SetupTusdContract(wantedEvents, wantedMethods []string) *contract.Contract }.Init() } +// TODO: consider whether this can be moved to plugin or libraries/shared func SetupENSRepo(vulcanizeLogId *int64, wantedEvents, wantedMethods []string) (*postgres.DB, *contract.Contract) { db, err := postgres.NewDB(config.Database{ Hostname: "localhost", diff --git a/pkg/plugin/test_helpers/database.go b/pkg/plugin/test_helpers/database.go index 8edea9c2..846e331a 100644 --- a/pkg/plugin/test_helpers/database.go +++ b/pkg/plugin/test_helpers/database.go @@ -31,6 +31,7 @@ import ( "github.com/vulcanize/vulcanizedb/pkg/geth/node" ) +// TODO: consider whether this should be moved to libraries/shared func SetupDBandBC() (*postgres.DB, core.BlockChain) { infuraIPC := "http://kovan0.vulcanize.io:8545" rawRpcClient, err := rpc.Dial(infuraIPC) @@ -38,9 +39,9 @@ func SetupDBandBC() (*postgres.DB, core.BlockChain) { rpcClient := client.NewRpcClient(rawRpcClient, infuraIPC) ethClient := ethclient.NewClient(rawRpcClient) blockChainClient := client.NewEthClient(ethClient) - node := node.MakeNode(rpcClient) + blockChainNode := node.MakeNode(rpcClient) transactionConverter := rpc2.NewRpcTransactionConverter(ethClient) - blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter) + blockChain := geth.NewBlockChain(blockChainClient, rpcClient, blockChainNode, transactionConverter) db, err := postgres.NewDB(config.Database{ Hostname: "localhost",