major refactor part 2: remove cold import, full sync, generalize node table

This commit is contained in:
Ian Norden 2020-02-10 09:00:55 -06:00
parent 0f765df12c
commit ca273a026d
171 changed files with 196 additions and 6330 deletions

View File

@ -110,7 +110,7 @@ As mentioned above, VulcanizeDB's processes can be split into three categories:
### Data syncing
To provide data for transformations, raw Ethereum data must first be synced into VulcanizeDB.
This is accomplished through the use of the `headerSync`, `fullSync`, or `coldImport` commands.
This is accomplished through the use of the `headerSync` command.
These commands are described in detail [here](documentation/data-syncing.md).
### Data transformation

View File

@ -1,97 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/vulcanize/vulcanizedb/pkg/eth/cold_import"
"github.com/vulcanize/vulcanizedb/pkg/eth/converters/cold_db"
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/eth/converters/common"
"github.com/vulcanize/vulcanizedb/pkg/eth/crypto"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/ethereum"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/fs"
"github.com/vulcanize/vulcanizedb/utils"
)
var coldImportCmd = &cobra.Command{
Use: "coldImport",
Short: "Sync vulcanize from a cold instance of LevelDB.",
Long: `Populate core vulcanize db data directly out of LevelDB, rather than over rpc calls. For example:
./vulcanizedb coldImport -s 0 -e 5000000
Geth must be synced over all of the desired blocks and must not be running in order to execute this command.`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
coldImport()
},
}
func init() {
rootCmd.AddCommand(coldImportCmd)
coldImportCmd.Flags().Int64VarP(&startingBlockNumber, "starting-block-number", "s", 0, "BlockNumber for first block to cold import.")
coldImportCmd.Flags().Int64VarP(&endingBlockNumber, "ending-block-number", "e", 5500000, "BlockNumber for last block to cold import.")
coldImportCmd.Flags().BoolVarP(&syncAll, "all", "a", false, "Option to sync all missing blocks.")
}
func coldImport() {
// init eth db
ethDBConfig := ethereum.CreateDatabaseConfig(ethereum.Level, levelDbPath)
ethDB, err := ethereum.CreateDatabase(ethDBConfig)
if err != nil {
logWithCommand.Fatal("Error connecting to ethereum db: ", err)
}
mostRecentBlockNumberInDb := ethDB.GetHeadBlockNumber()
if syncAll {
startingBlockNumber = 0
endingBlockNumber = mostRecentBlockNumberInDb
}
if endingBlockNumber < startingBlockNumber {
logWithCommand.Fatal("Ending block number must be greater than starting block number for cold import.")
}
if endingBlockNumber > mostRecentBlockNumberInDb {
logWithCommand.Fatal("Ending block number is greater than most recent block in db: ", mostRecentBlockNumberInDb)
}
// init pg db
genesisBlock := ethDB.GetBlockHash(0)
reader := fs.FsReader{}
parser := crypto.EthPublicKeyParser{}
nodeBuilder := cold_import.NewColdImportNodeBuilder(reader, parser)
coldNode, err := nodeBuilder.GetNode(genesisBlock, levelDbPath)
if err != nil {
logWithCommand.Fatal("Error getting node: ", err)
}
pgDB := utils.LoadPostgres(databaseConfig, coldNode)
// init cold importer deps
blockRepository := repositories.NewBlockRepository(&pgDB)
receiptRepository := repositories.FullSyncReceiptRepository{DB: &pgDB}
transactionConverter := cold_db.NewColdDbTransactionConverter()
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
// init and execute cold importer
coldImporter := cold_import.NewColdImporter(ethDB, blockRepository, receiptRepository, blockConverter)
err = coldImporter.Execute(startingBlockNumber, endingBlockNumber, coldNode.ID)
if err != nil {
logWithCommand.Fatal("Error executing cold import: ", err)
}
}

View File

@ -25,7 +25,6 @@ import (
st "github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/config"
ft "github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/full/transformer"
ht "github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/header/transformer"
"github.com/vulcanize/vulcanizedb/utils"
)
@ -99,14 +98,7 @@ func contractWatcher() {
var t st.ContractTransformer
con := config.ContractConfig{}
con.PrepConfig()
switch mode {
case "header":
t = ht.NewTransformer(con, blockChain, &db)
case "full":
t = ft.NewTransformer(con, blockChain, &db)
default:
logWithCommand.Fatal("Invalid mode")
}
err := t.Init()
if err != nil {
@ -123,5 +115,4 @@ func contractWatcher() {
func init() {
rootCmd.AddCommand(contractWatcherCmd)
contractWatcherCmd.Flags().StringVarP(&mode, "mode", "o", "header", "'header' or 'full' mode to work with either header synced or fully synced vDB (default is header)")
}

View File

@ -1,105 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/eth/history"
"github.com/vulcanize/vulcanizedb/utils"
)
// fullSyncCmd represents the fullSync command
var fullSyncCmd = &cobra.Command{
Use: "fullSync",
Short: "Syncs VulcanizeDB with local ethereum node",
Long: `Syncs VulcanizeDB with local ethereum node. Populates
Postgres with blocks, transactions, receipts, and logs.
./vulcanizedb fullSync --starting-block-number 0 --config public.toml
Expects ethereum node to be running and requires a .toml config:
[database]
name = "vulcanize_public"
hostname = "localhost"
port = 5432
[client]
ipcPath = "/Users/user/Library/Ethereum/geth.ipc"
`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
fullSync()
},
}
func init() {
rootCmd.AddCommand(fullSyncCmd)
fullSyncCmd.Flags().Int64VarP(&startingBlockNumber, "starting-block-number", "s", 0, "Block number to start syncing from")
}
func backFillAllBlocks(blockchain core.BlockChain, blockRepository datastore.BlockRepository, missingBlocksPopulated chan int, startingBlockNumber int64) {
populated, err := history.PopulateMissingBlocks(blockchain, blockRepository, startingBlockNumber)
if err != nil {
logWithCommand.Error("backfillAllBlocks: error in populateMissingBlocks: ", err)
}
missingBlocksPopulated <- populated
}
func fullSync() {
ticker := time.NewTicker(pollingInterval)
defer ticker.Stop()
blockChain := getBlockChain()
lastBlock, err := blockChain.LastBlock()
if err != nil {
logWithCommand.Error("fullSync: Error getting last block: ", err)
}
if lastBlock.Int64() == 0 {
logWithCommand.Fatal("geth initial: state sync not finished")
}
if startingBlockNumber > lastBlock.Int64() {
logWithCommand.Fatal("fullSync: starting block number > current block number")
}
db := utils.LoadPostgres(databaseConfig, blockChain.Node())
blockRepository := repositories.NewBlockRepository(&db)
validator := history.NewBlockValidator(blockChain, blockRepository, validationWindow)
missingBlocksPopulated := make(chan int)
go backFillAllBlocks(blockChain, blockRepository, missingBlocksPopulated, startingBlockNumber)
for {
select {
case <-ticker.C:
window, err := validator.ValidateBlocks()
if err != nil {
logWithCommand.Error("fullSync: error in validateBlocks: ", err)
}
logWithCommand.Debug(window.GetString())
case <-missingBlocksPopulated:
go backFillAllBlocks(blockChain, blockRepository, missingBlocksPopulated, startingBlockNumber)
}
}
}

View File

@ -1,23 +0,0 @@
-- +goose Up
CREATE TABLE public.eth_blocks (
id SERIAL PRIMARY KEY,
difficulty BIGINT,
extra_data VARCHAR,
gas_limit BIGINT,
gas_used BIGINT,
hash VARCHAR(66),
miner VARCHAR(42),
nonce VARCHAR(20),
"number" BIGINT,
parent_hash VARCHAR(66),
reward NUMERIC,
uncles_reward NUMERIC,
"size" VARCHAR,
"time" BIGINT,
is_final BOOLEAN,
uncle_hash VARCHAR(66)
);
-- +goose Down
DROP TABLE public.eth_blocks;

View File

@ -3,7 +3,7 @@ CREATE TABLE nodes (
id SERIAL PRIMARY KEY,
client_name VARCHAR,
genesis_block VARCHAR(66),
network_id NUMERIC,
network_id VARCHAR,
node_id VARCHAR(128),
CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id)
);

View File

@ -1,18 +0,0 @@
-- +goose Up
CREATE TABLE full_sync_transactions (
id SERIAL PRIMARY KEY,
block_id INTEGER NOT NULL REFERENCES eth_blocks(id) ON DELETE CASCADE,
gas_limit NUMERIC,
gas_price NUMERIC,
hash VARCHAR(66),
input_data BYTEA,
nonce NUMERIC,
raw BYTEA,
tx_from VARCHAR(66),
tx_index INTEGER,
tx_to VARCHAR(66),
"value" NUMERIC
);
-- +goose Down
DROP TABLE full_sync_transactions;

View File

@ -1,6 +0,0 @@
-- +goose Up
CREATE INDEX number_index ON eth_blocks (number);
-- +goose Down
DROP INDEX number_index;

View File

@ -7,7 +7,7 @@ CREATE TABLE public.headers
raw JSONB,
block_timestamp NUMERIC,
check_count INTEGER NOT NULL DEFAULT 0,
eth_node_id INTEGER NOT NULL REFERENCES eth_nodes (id) ON DELETE CASCADE,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
eth_node_fingerprint VARCHAR(128),
UNIQUE (block_number, hash, eth_node_fingerprint)
);

View File

@ -1,10 +0,0 @@
-- +goose Up
CREATE TABLE watched_contracts
(
contract_id SERIAL PRIMARY KEY,
contract_abi json,
contract_hash VARCHAR(66) UNIQUE
);
-- +goose Down
DROP TABLE watched_contracts;

View File

@ -1,11 +0,0 @@
-- +goose Up
ALTER TABLE eth_blocks
ADD COLUMN node_id INTEGER NOT NULL,
ADD CONSTRAINT node_fk
FOREIGN KEY (node_id)
REFERENCES nodes (id)
ON DELETE CASCADE;
-- +goose Down
ALTER TABLE eth_blocks
DROP COLUMN node_id;

View File

@ -1,18 +0,0 @@
-- +goose Up
CREATE TABLE full_sync_logs
(
id SERIAL PRIMARY KEY,
block_number BIGINT,
address VARCHAR(66),
tx_hash VARCHAR(66),
index BIGINT,
topic0 VARCHAR(66),
topic1 VARCHAR(66),
topic2 VARCHAR(66),
topic3 VARCHAR(66),
data TEXT
);
-- +goose Down
DROP TABLE full_sync_logs;

View File

@ -1,7 +0,0 @@
-- +goose Up
ALTER TABLE eth_blocks
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
-- +goose Down
ALTER TABLE eth_blocks
DROP CONSTRAINT node_id_block_number_uc;

View File

@ -1,5 +0,0 @@
-- +goose Up
CREATE INDEX block_id_index ON full_sync_transactions (block_id);
-- +goose Down
DROP INDEX block_id_index;

View File

@ -1,5 +0,0 @@
-- +goose Up
CREATE INDEX node_id_index ON eth_blocks (node_id);
-- +goose Down
DROP INDEX node_id_index;

View File

@ -1,5 +0,0 @@
-- +goose Up
CREATE INDEX tx_to_index ON full_sync_transactions(tx_to);
-- +goose Down
DROP INDEX tx_to_index;

View File

@ -1,5 +0,0 @@
-- +goose Up
CREATE INDEX tx_from_index ON full_sync_transactions(tx_from);
-- +goose Down
DROP INDEX tx_from_index;

View File

@ -6,6 +6,7 @@ CREATE TABLE eth.header_cids (
parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
td BIGINT,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
UNIQUE (block_number, block_hash)
);

View File

@ -1,16 +0,0 @@
-- +goose Up
CREATE TABLE full_sync_receipts
(
id SERIAL PRIMARY KEY,
transaction_id INTEGER NOT NULL REFERENCES full_sync_transactions (id) ON DELETE CASCADE,
contract_address_id INTEGER NOT NULL REFERENCES addresses (id) ON DELETE CASCADE,
cumulative_gas_used NUMERIC,
gas_used NUMERIC,
state_root VARCHAR(66),
status INTEGER,
tx_hash VARCHAR(66)
);
-- +goose Down
DROP TABLE full_sync_receipts;

View File

@ -1,5 +0,0 @@
-- +goose Up
CREATE INDEX transaction_id_index ON full_sync_receipts (transaction_id);
-- +goose Down
DROP INDEX transaction_id_index;

View File

@ -1,17 +0,0 @@
-- +goose Up
ALTER TABLE full_sync_logs
ADD COLUMN receipt_id INT;
ALTER TABLE full_sync_logs
ADD CONSTRAINT receipts_fk
FOREIGN KEY (receipt_id)
REFERENCES full_sync_receipts (id)
ON DELETE CASCADE;
-- +goose Down
ALTER TABLE full_sync_logs
DROP CONSTRAINT receipts_fk;
ALTER TABLE full_sync_logs
DROP COLUMN receipt_id;

View File

@ -1,16 +0,0 @@
-- +goose Up
CREATE TABLE log_filters (
id SERIAL,
name VARCHAR NOT NULL CHECK (name <> ''),
from_block BIGINT CHECK (from_block >= 0),
to_block BIGINT CHECK (from_block >= 0),
address VARCHAR(66),
topic0 VARCHAR(66),
topic1 VARCHAR(66),
topic2 VARCHAR(66),
topic3 VARCHAR(66),
CONSTRAINT name_uc UNIQUE (name)
);
-- +goose Down
DROP TABLE log_filters;

View File

@ -1,32 +0,0 @@
-- +goose Up
CREATE VIEW block_stats AS
SELECT max(block_number) AS max_block,
min(block_number) AS min_block
FROM full_sync_logs;
CREATE VIEW watched_event_logs AS
SELECT log_filters.name,
full_sync_logs.id,
block_number,
full_sync_logs.address,
tx_hash,
index,
full_sync_logs.topic0,
full_sync_logs.topic1,
full_sync_logs.topic2,
full_sync_logs.topic3,
data,
receipt_id
FROM log_filters
CROSS JOIN block_stats
JOIN full_sync_logs ON full_sync_logs.address = log_filters.address
AND full_sync_logs.block_number >= coalesce(log_filters.from_block, block_stats.min_block)
AND full_sync_logs.block_number <= coalesce(log_filters.to_block, block_stats.max_block)
WHERE (log_filters.topic0 = full_sync_logs.topic0 OR log_filters.topic0 ISNULL)
AND (log_filters.topic1 = full_sync_logs.topic1 OR log_filters.topic1 ISNULL)
AND (log_filters.topic2 = full_sync_logs.topic2 OR log_filters.topic2 ISNULL)
AND (log_filters.topic3 = full_sync_logs.topic3 OR log_filters.topic3 ISNULL);
-- +goose Down
DROP VIEW watched_event_logs;
DROP VIEW block_stats;

View File

@ -1,14 +0,0 @@
-- +goose Up
ALTER TABLE log_filters
DROP CONSTRAINT log_filters_from_block_check1;
ALTER TABLE log_filters
ADD CONSTRAINT log_filters_to_block_check CHECK (to_block >= 0);
-- +goose Down
ALTER TABLE log_filters
DROP CONSTRAINT log_filters_to_block_check;
ALTER TABLE log_filters
ADD CONSTRAINT log_filters_from_block_check1 CHECK (to_block >= 0);

View File

@ -7,6 +7,7 @@ CREATE TABLE btc.header_cids (
cid TEXT NOT NULL,
timestamp NUMERIC NOT NULL,
bits BIGINT NOT NULL,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
UNIQUE (block_number, block_hash)
);

View File

@ -1,43 +0,0 @@
-- +goose Up
ALTER TABLE public.nodes RENAME TO eth_nodes;
ALTER TABLE public.eth_nodes RENAME COLUMN node_id TO eth_node_id;
ALTER TABLE public.eth_nodes DROP CONSTRAINT node_uc;
ALTER TABLE public.eth_nodes
ADD CONSTRAINT eth_node_uc UNIQUE (genesis_block, network_id, eth_node_id);
ALTER TABLE public.eth_blocks RENAME COLUMN node_id TO eth_node_id;
ALTER TABLE public.eth_blocks DROP CONSTRAINT node_id_block_number_uc;
ALTER TABLE public.eth_blocks
ADD CONSTRAINT eth_node_id_block_number_uc UNIQUE (number, eth_node_id);
ALTER TABLE public.eth_blocks DROP CONSTRAINT node_fk;
ALTER TABLE public.eth_blocks
ADD CONSTRAINT node_fk
FOREIGN KEY (eth_node_id) REFERENCES eth_nodes (id) ON DELETE CASCADE;
-- +goose Down
ALTER TABLE public.eth_nodes
RENAME TO nodes;
ALTER TABLE public.nodes
RENAME COLUMN eth_node_id TO node_id;
ALTER TABLE public.nodes
DROP CONSTRAINT eth_node_uc;
ALTER TABLE public.nodes
ADD CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id);
ALTER TABLE public.eth_blocks RENAME COLUMN eth_node_id TO node_id;
ALTER TABLE public.eth_blocks DROP CONSTRAINT eth_node_id_block_number_uc;
ALTER TABLE public.eth_blocks
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
ALTER TABLE public.eth_blocks DROP CONSTRAINT node_fk;
ALTER TABLE public.eth_blocks
ADD CONSTRAINT node_fk
FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE;

View File

@ -1,44 +0,0 @@
-- +goose Up
ALTER TABLE full_sync_receipts
ADD COLUMN block_id INT;
UPDATE full_sync_receipts
SET block_id = (
SELECT block_id FROM full_sync_transactions WHERE full_sync_transactions.id = full_sync_receipts.transaction_id
);
ALTER TABLE full_sync_receipts
ALTER COLUMN block_id SET NOT NULL;
ALTER TABLE full_sync_receipts
ADD CONSTRAINT eth_blocks_fk
FOREIGN KEY (block_id)
REFERENCES eth_blocks (id)
ON DELETE CASCADE;
ALTER TABLE full_sync_receipts
DROP COLUMN transaction_id;
-- +goose Down
ALTER TABLE full_sync_receipts
ADD COLUMN transaction_id INT;
CREATE INDEX transaction_id_index ON full_sync_receipts (transaction_id);
UPDATE full_sync_receipts
SET transaction_id = (
SELECT id FROM full_sync_transactions WHERE full_sync_transactions.hash = full_sync_receipts.tx_hash
);
ALTER TABLE full_sync_receipts
ALTER COLUMN transaction_id SET NOT NULL;
ALTER TABLE full_sync_receipts
ADD CONSTRAINT transaction_fk
FOREIGN KEY (transaction_id)
REFERENCES full_sync_transactions (id)
ON DELETE CASCADE;
ALTER TABLE full_sync_receipts
DROP COLUMN block_id;

View File

@ -1,16 +0,0 @@
-- +goose Up
ALTER TABLE eth_blocks
ADD COLUMN eth_node_fingerprint VARCHAR(128);
UPDATE eth_blocks
SET eth_node_fingerprint = (
SELECT eth_node_id FROM eth_nodes WHERE eth_nodes.id = eth_blocks.eth_node_id
);
ALTER TABLE eth_blocks
ALTER COLUMN eth_node_fingerprint SET NOT NULL;
-- +goose Down
ALTER TABLE eth_blocks
DROP COLUMN eth_node_fingerprint;

View File

@ -1,16 +0,0 @@
-- +goose Up
CREATE TABLE public.uncles (
id SERIAL PRIMARY KEY,
hash VARCHAR(66) NOT NULL,
block_id INTEGER NOT NULL REFERENCES eth_blocks (id) ON DELETE CASCADE,
reward NUMERIC NOT NULL,
miner VARCHAR(42) NOT NULL,
raw JSONB,
block_timestamp NUMERIC,
eth_node_id INTEGER NOT NULL REFERENCES eth_nodes (id) ON DELETE CASCADE,
eth_node_fingerprint VARCHAR(128),
UNIQUE (block_id, hash)
);
-- +goose Down
DROP TABLE public.uncles;

View File

@ -43,7 +43,8 @@ CREATE TABLE btc.header_cids (
parent_hash character varying(66) NOT NULL,
cid text NOT NULL,
"timestamp" numeric NOT NULL,
bits bigint NOT NULL
bits bigint NOT NULL,
node_id integer NOT NULL
);
@ -182,7 +183,8 @@ CREATE TABLE eth.header_cids (
block_hash character varying(66) NOT NULL,
parent_hash character varying(66) NOT NULL,
cid text NOT NULL,
td bigint
td bigint,
node_id integer NOT NULL
);
@ -407,35 +409,6 @@ CREATE SEQUENCE public.addresses_id_seq
ALTER SEQUENCE public.addresses_id_seq OWNED BY public.addresses.id;
--
-- Name: full_sync_logs; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.full_sync_logs (
id integer NOT NULL,
block_number bigint,
address character varying(66),
tx_hash character varying(66),
index bigint,
topic0 character varying(66),
topic1 character varying(66),
topic2 character varying(66),
topic3 character varying(66),
data text,
receipt_id integer
);
--
-- Name: block_stats; Type: VIEW; Schema: public; Owner: -
--
CREATE VIEW public.block_stats AS
SELECT max(full_sync_logs.block_number) AS max_block,
min(full_sync_logs.block_number) AS min_block
FROM public.full_sync_logs;
--
-- Name: blocks; Type: TABLE; Schema: public; Owner: -
--
@ -476,161 +449,6 @@ CREATE SEQUENCE public.checked_headers_id_seq
ALTER SEQUENCE public.checked_headers_id_seq OWNED BY public.checked_headers.id;
--
-- Name: eth_blocks; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.eth_blocks (
id integer NOT NULL,
difficulty bigint,
extra_data character varying,
gas_limit bigint,
gas_used bigint,
hash character varying(66),
miner character varying(42),
nonce character varying(20),
number bigint,
parent_hash character varying(66),
reward numeric,
uncles_reward numeric,
size character varying,
"time" bigint,
is_final boolean,
uncle_hash character varying(66),
eth_node_id integer NOT NULL,
eth_node_fingerprint character varying(128) NOT NULL
);
--
-- Name: eth_blocks_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.eth_blocks_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: eth_blocks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.eth_blocks_id_seq OWNED BY public.eth_blocks.id;
--
-- Name: eth_nodes; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.eth_nodes (
id integer NOT NULL,
client_name character varying,
genesis_block character varying(66),
network_id numeric,
eth_node_id character varying(128)
);
--
-- Name: full_sync_logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.full_sync_logs_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: full_sync_logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.full_sync_logs_id_seq OWNED BY public.full_sync_logs.id;
--
-- Name: full_sync_receipts; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.full_sync_receipts (
id integer NOT NULL,
contract_address_id integer NOT NULL,
cumulative_gas_used numeric,
gas_used numeric,
state_root character varying(66),
status integer,
tx_hash character varying(66),
block_id integer NOT NULL
);
--
-- Name: full_sync_receipts_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.full_sync_receipts_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: full_sync_receipts_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.full_sync_receipts_id_seq OWNED BY public.full_sync_receipts.id;
--
-- Name: full_sync_transactions; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.full_sync_transactions (
id integer NOT NULL,
block_id integer NOT NULL,
gas_limit numeric,
gas_price numeric,
hash character varying(66),
input_data bytea,
nonce numeric,
raw bytea,
tx_from character varying(66),
tx_index integer,
tx_to character varying(66),
value numeric
);
--
-- Name: full_sync_transactions_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.full_sync_transactions_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: full_sync_transactions_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.full_sync_transactions_id_seq OWNED BY public.full_sync_transactions.id;
--
-- Name: goose_db_version; Type: TABLE; Schema: public; Owner: -
--
@ -792,7 +610,7 @@ CREATE TABLE public.headers (
raw jsonb,
block_timestamp numeric,
check_count integer DEFAULT 0 NOT NULL,
eth_node_id integer NOT NULL,
node_id integer NOT NULL,
eth_node_fingerprint character varying(128)
);
@ -818,45 +636,18 @@ ALTER SEQUENCE public.headers_id_seq OWNED BY public.headers.id;
--
-- Name: log_filters; Type: TABLE; Schema: public; Owner: -
-- Name: nodes; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.log_filters (
CREATE TABLE public.nodes (
id integer NOT NULL,
name character varying NOT NULL,
from_block bigint,
to_block bigint,
address character varying(66),
topic0 character varying(66),
topic1 character varying(66),
topic2 character varying(66),
topic3 character varying(66),
CONSTRAINT log_filters_from_block_check CHECK ((from_block >= 0)),
CONSTRAINT log_filters_name_check CHECK (((name)::text <> ''::text)),
CONSTRAINT log_filters_to_block_check CHECK ((to_block >= 0))
client_name character varying,
genesis_block character varying(66),
network_id character varying,
node_id character varying(128)
);
--
-- Name: log_filters_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.log_filters_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: log_filters_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.log_filters_id_seq OWNED BY public.log_filters.id;
--
-- Name: nodes_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
@ -874,7 +665,7 @@ CREATE SEQUENCE public.nodes_id_seq
-- Name: nodes_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.nodes_id_seq OWNED BY public.eth_nodes.id;
ALTER SEQUENCE public.nodes_id_seq OWNED BY public.nodes.id;
--
@ -941,97 +732,6 @@ CREATE SEQUENCE public.storage_diff_id_seq
ALTER SEQUENCE public.storage_diff_id_seq OWNED BY public.storage_diff.id;
--
-- Name: uncles; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.uncles (
id integer NOT NULL,
hash character varying(66) NOT NULL,
block_id integer NOT NULL,
reward numeric NOT NULL,
miner character varying(42) NOT NULL,
raw jsonb,
block_timestamp numeric,
eth_node_id integer NOT NULL,
eth_node_fingerprint character varying(128)
);
--
-- Name: uncles_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.uncles_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: uncles_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.uncles_id_seq OWNED BY public.uncles.id;
--
-- Name: watched_contracts; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.watched_contracts (
contract_id integer NOT NULL,
contract_abi json,
contract_hash character varying(66)
);
--
-- Name: watched_contracts_contract_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.watched_contracts_contract_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: watched_contracts_contract_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.watched_contracts_contract_id_seq OWNED BY public.watched_contracts.contract_id;
--
-- Name: watched_event_logs; Type: VIEW; Schema: public; Owner: -
--
CREATE VIEW public.watched_event_logs AS
SELECT log_filters.name,
full_sync_logs.id,
full_sync_logs.block_number,
full_sync_logs.address,
full_sync_logs.tx_hash,
full_sync_logs.index,
full_sync_logs.topic0,
full_sync_logs.topic1,
full_sync_logs.topic2,
full_sync_logs.topic3,
full_sync_logs.data,
full_sync_logs.receipt_id
FROM ((public.log_filters
CROSS JOIN public.block_stats)
JOIN public.full_sync_logs ON ((((full_sync_logs.address)::text = (log_filters.address)::text) AND (full_sync_logs.block_number >= COALESCE(log_filters.from_block, block_stats.min_block)) AND (full_sync_logs.block_number <= COALESCE(log_filters.to_block, block_stats.max_block)))))
WHERE ((((log_filters.topic0)::text = (full_sync_logs.topic0)::text) OR (log_filters.topic0 IS NULL)) AND (((log_filters.topic1)::text = (full_sync_logs.topic1)::text) OR (log_filters.topic1 IS NULL)) AND (((log_filters.topic2)::text = (full_sync_logs.topic2)::text) OR (log_filters.topic2 IS NULL)) AND (((log_filters.topic3)::text = (full_sync_logs.topic3)::text) OR (log_filters.topic3 IS NULL)));
--
-- Name: watched_logs; Type: TABLE; Schema: public; Owner: -
--
@ -1147,41 +847,6 @@ ALTER TABLE ONLY public.addresses ALTER COLUMN id SET DEFAULT nextval('public.ad
ALTER TABLE ONLY public.checked_headers ALTER COLUMN id SET DEFAULT nextval('public.checked_headers_id_seq'::regclass);
--
-- Name: eth_blocks id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.eth_blocks ALTER COLUMN id SET DEFAULT nextval('public.eth_blocks_id_seq'::regclass);
--
-- Name: eth_nodes id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.eth_nodes ALTER COLUMN id SET DEFAULT nextval('public.nodes_id_seq'::regclass);
--
-- Name: full_sync_logs id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.full_sync_logs ALTER COLUMN id SET DEFAULT nextval('public.full_sync_logs_id_seq'::regclass);
--
-- Name: full_sync_receipts id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.full_sync_receipts ALTER COLUMN id SET DEFAULT nextval('public.full_sync_receipts_id_seq'::regclass);
--
-- Name: full_sync_transactions id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.full_sync_transactions ALTER COLUMN id SET DEFAULT nextval('public.full_sync_transactions_id_seq'::regclass);
--
-- Name: goose_db_version id; Type: DEFAULT; Schema: public; Owner: -
--
@ -1218,10 +883,10 @@ ALTER TABLE ONLY public.headers ALTER COLUMN id SET DEFAULT nextval('public.head
--
-- Name: log_filters id; Type: DEFAULT; Schema: public; Owner: -
-- Name: nodes id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.log_filters ALTER COLUMN id SET DEFAULT nextval('public.log_filters_id_seq'::regclass);
ALTER TABLE ONLY public.nodes ALTER COLUMN id SET DEFAULT nextval('public.nodes_id_seq'::regclass);
--
@ -1238,20 +903,6 @@ ALTER TABLE ONLY public.queued_storage ALTER COLUMN id SET DEFAULT nextval('publ
ALTER TABLE ONLY public.storage_diff ALTER COLUMN id SET DEFAULT nextval('public.storage_diff_id_seq'::regclass);
--
-- Name: uncles id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.uncles ALTER COLUMN id SET DEFAULT nextval('public.uncles_id_seq'::regclass);
--
-- Name: watched_contracts contract_id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.watched_contracts ALTER COLUMN contract_id SET DEFAULT nextval('public.watched_contracts_contract_id_seq'::regclass);
--
-- Name: watched_logs id; Type: DEFAULT; Schema: public; Owner: -
--
@ -1451,54 +1102,6 @@ ALTER TABLE ONLY public.checked_headers
ADD CONSTRAINT checked_headers_pkey PRIMARY KEY (id);
--
-- Name: eth_blocks eth_blocks_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.eth_blocks
ADD CONSTRAINT eth_blocks_pkey PRIMARY KEY (id);
--
-- Name: eth_blocks eth_node_id_block_number_uc; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.eth_blocks
ADD CONSTRAINT eth_node_id_block_number_uc UNIQUE (number, eth_node_id);
--
-- Name: eth_nodes eth_node_uc; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.eth_nodes
ADD CONSTRAINT eth_node_uc UNIQUE (genesis_block, network_id, eth_node_id);
--
-- Name: full_sync_logs full_sync_logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.full_sync_logs
ADD CONSTRAINT full_sync_logs_pkey PRIMARY KEY (id);
--
-- Name: full_sync_receipts full_sync_receipts_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.full_sync_receipts
ADD CONSTRAINT full_sync_receipts_pkey PRIMARY KEY (id);
--
-- Name: full_sync_transactions full_sync_transactions_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.full_sync_transactions
ADD CONSTRAINT full_sync_transactions_pkey PRIMARY KEY (id);
--
-- Name: goose_db_version goose_db_version_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
@ -1572,18 +1175,18 @@ ALTER TABLE ONLY public.headers
--
-- Name: log_filters name_uc; Type: CONSTRAINT; Schema: public; Owner: -
-- Name: nodes node_uc; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.log_filters
ADD CONSTRAINT name_uc UNIQUE (name);
ALTER TABLE ONLY public.nodes
ADD CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id);
--
-- Name: eth_nodes nodes_pkey; Type: CONSTRAINT; Schema: public; Owner: -
-- Name: nodes nodes_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.eth_nodes
ALTER TABLE ONLY public.nodes
ADD CONSTRAINT nodes_pkey PRIMARY KEY (id);
@ -1619,38 +1222,6 @@ ALTER TABLE ONLY public.storage_diff
ADD CONSTRAINT storage_diff_pkey PRIMARY KEY (id);
--
-- Name: uncles uncles_block_id_hash_key; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.uncles
ADD CONSTRAINT uncles_block_id_hash_key UNIQUE (block_id, hash);
--
-- Name: uncles uncles_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.uncles
ADD CONSTRAINT uncles_pkey PRIMARY KEY (id);
--
-- Name: watched_contracts watched_contracts_contract_hash_key; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.watched_contracts
ADD CONSTRAINT watched_contracts_contract_hash_key UNIQUE (contract_hash);
--
-- Name: watched_contracts watched_contracts_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.watched_contracts
ADD CONSTRAINT watched_contracts_pkey PRIMARY KEY (contract_id);
--
-- Name: watched_logs watched_logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
@ -1659,13 +1230,6 @@ ALTER TABLE ONLY public.watched_logs
ADD CONSTRAINT watched_logs_pkey PRIMARY KEY (id);
--
-- Name: block_id_index; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX block_id_index ON public.full_sync_transactions USING btree (block_id);
--
-- Name: header_sync_receipts_header; Type: INDEX; Schema: public; Owner: -
--
@ -1709,31 +1273,11 @@ CREATE INDEX headers_block_timestamp ON public.headers USING btree (block_timest
--
-- Name: node_id_index; Type: INDEX; Schema: public; Owner: -
-- Name: header_cids header_cids_node_id_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: -
--
CREATE INDEX node_id_index ON public.eth_blocks USING btree (eth_node_id);
--
-- Name: number_index; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX number_index ON public.eth_blocks USING btree (number);
--
-- Name: tx_from_index; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX tx_from_index ON public.full_sync_transactions USING btree (tx_from);
--
-- Name: tx_to_index; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX tx_to_index ON public.full_sync_transactions USING btree (tx_to);
ALTER TABLE ONLY btc.header_cids
ADD CONSTRAINT header_cids_node_id_fkey FOREIGN KEY (node_id) REFERENCES public.nodes(id) ON DELETE CASCADE;
--
@ -1768,6 +1312,14 @@ ALTER TABLE ONLY btc.tx_outputs
ADD CONSTRAINT tx_outputs_tx_id_fkey FOREIGN KEY (tx_id) REFERENCES btc.transaction_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
--
-- Name: header_cids header_cids_node_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.header_cids
ADD CONSTRAINT header_cids_node_id_fkey FOREIGN KEY (node_id) REFERENCES public.nodes(id) ON DELETE CASCADE;
--
-- Name: receipt_cids receipt_cids_tx_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
--
@ -1816,30 +1368,6 @@ ALTER TABLE ONLY public.checked_headers
ADD CONSTRAINT checked_headers_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
--
-- Name: full_sync_receipts eth_blocks_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.full_sync_receipts
ADD CONSTRAINT eth_blocks_fk FOREIGN KEY (block_id) REFERENCES public.eth_blocks(id) ON DELETE CASCADE;
--
-- Name: full_sync_receipts full_sync_receipts_contract_address_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.full_sync_receipts
ADD CONSTRAINT full_sync_receipts_contract_address_id_fkey FOREIGN KEY (contract_address_id) REFERENCES public.addresses(id) ON DELETE CASCADE;
--
-- Name: full_sync_transactions full_sync_transactions_block_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.full_sync_transactions
ADD CONSTRAINT full_sync_transactions_block_id_fkey FOREIGN KEY (block_id) REFERENCES public.eth_blocks(id) ON DELETE CASCADE;
--
-- Name: header_sync_logs header_sync_logs_address_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
@ -1889,19 +1417,11 @@ ALTER TABLE ONLY public.header_sync_transactions
--
-- Name: headers headers_eth_node_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
-- Name: headers headers_node_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.headers
ADD CONSTRAINT headers_eth_node_id_fkey FOREIGN KEY (eth_node_id) REFERENCES public.eth_nodes(id) ON DELETE CASCADE;
--
-- Name: eth_blocks node_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.eth_blocks
ADD CONSTRAINT node_fk FOREIGN KEY (eth_node_id) REFERENCES public.eth_nodes(id) ON DELETE CASCADE;
ADD CONSTRAINT headers_node_id_fkey FOREIGN KEY (node_id) REFERENCES public.nodes(id) ON DELETE CASCADE;
--
@ -1912,30 +1432,6 @@ ALTER TABLE ONLY public.queued_storage
ADD CONSTRAINT queued_storage_diff_id_fkey FOREIGN KEY (diff_id) REFERENCES public.storage_diff(id);
--
-- Name: full_sync_logs receipts_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.full_sync_logs
ADD CONSTRAINT receipts_fk FOREIGN KEY (receipt_id) REFERENCES public.full_sync_receipts(id) ON DELETE CASCADE;
--
-- Name: uncles uncles_block_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.uncles
ADD CONSTRAINT uncles_block_id_fkey FOREIGN KEY (block_id) REFERENCES public.eth_blocks(id) ON DELETE CASCADE;
--
-- Name: uncles uncles_eth_node_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.uncles
ADD CONSTRAINT uncles_eth_node_id_fkey FOREIGN KEY (eth_node_id) REFERENCES public.eth_nodes(id) ON DELETE CASCADE;
--
-- PostgreSQL database dump complete
--

View File

@ -25,57 +25,3 @@ different from what we have already stored in the database, the header record wi
ipcPath = <path to a running Ethereum node>
```
- Alternatively, the ipc path can be passed as a flag instead `--client-ipcPath`.
## fullSync
Syncs blocks, transactions, receipts and logs from a running Ethereum node into VulcanizeDB tables named
`blocks`, `uncles`, `full_sync_transactions`, `full_sync_receipts` and `logs`.
- Queries the Ethereum node using RPC calls.
- Validates headers from the last 15 blocks to ensure that data is up to date.
- Useful when you want to maintain a broad cache of what's happening on the blockchain.
- Handles chain reorgs by [validating the most recent blocks' hashes](../pkg/history/header_validator.go). If the hash is
different from what we have already stored in the database, the header record will be updated.
#### Usage
- Run `./vulcanizedb fullSync --config <config.toml> --starting-block-number <block-number>`
- The config file must be formatted as follows, and should contain an ipc path to a running Ethereum node:
```toml
[database]
name = "vulcanize_public"
hostname = "localhost"
user = "vulcanize"
password = "vulcanize"
port = 5432
[client]
ipcPath = <path to a running Ethereum node>
```
- Alternatively, the ipc path can be passed as a flag instead `--client-ipcPath`.
*Please note, that if you are fast syncing your Ethereum node, wait for the initial sync to finish.*
## coldImport
Syncs VulcanizeDB from Geth's underlying LevelDB datastore and persists Ethereum blocks,
transactions, receipts and logs into VulcanizeDB tables named `blocks`, `uncles`,
`full_sync_transactions`, `full_sync_receipts` and `logs` respectively.
#### Usage
1. Ensure the Ethereum node you're point at is not running, and that it has synced to the desired block height.
1. Run `./vulcanizedb coldImport --config <config.toml>`
1. Optional flags:
- `--starting-block-number <block number>`/`-s <block number>`: block number to start syncing from
- `--ending-block-number <block number>`/`-e <block number>`: block number to sync to
- `--all`/`-a`: sync all missing blocks
The config file can be formatted as follows, and must contain the LevelDB path.
```toml
[database]
name = "vulcanize_public"
hostname = "localhost"
user = "vulcanize"
password = "vulcanize"
port = 5432
[client]
leveldbpath = "/Users/user/Library/Ethereum/geth/chaindata"
```

View File

@ -1,7 +0,0 @@
[database]
name = "vulcanize_public"
hostname = "localhost"
port = 5432
[client]
ipcPath = ""

View File

@ -1,25 +0,0 @@
[superNode]
chain = "ethereum"
ipfsPath = "/root/.ipfs"
[superNode.database]
name = "vulcanize_public"
hostname = "localhost"
port = 5432
user = "ec2-user"
[superNode.sync]
on = true
wsPath = "ws://127.0.0.1:8546"
workers = 1
[superNode.server]
on = true
ipcPath = "/root/.vulcanize/vulcanize.ipc"
wsPath = "127.0.0.1:8080"
[superNode.backFill]
on = true
httpPath = "http://127.0.0.1:8545"
frequency = 5
batchSize = 50

View File

@ -1,338 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package integration
import (
"fmt"
"math/rand"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/full/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/constants"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers/test_helpers/mocks"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
)
var _ = Describe("contractWatcher full transformer", func() {
var db *postgres.DB
var err error
var blockChain core.BlockChain
var blockRepository repositories.BlockRepository
var ensAddr = strings.ToLower(constants.EnsContractAddress)
var tusdAddr = strings.ToLower(constants.TusdContractAddress)
rand.Seed(time.Now().UnixNano())
BeforeEach(func() {
db, blockChain = test_helpers.SetupDBandBC()
blockRepository = *repositories.NewBlockRepository(db)
})
AfterEach(func() {
test_helpers.TearDown(db)
})
Describe("Init", func() {
It("Initializes transformer's contract objects", func() {
_, insertErr := blockRepository.CreateOrUpdateBlock(mocks.TransferBlock1)
Expect(insertErr).NotTo(HaveOccurred())
_, insertErrTwo := blockRepository.CreateOrUpdateBlock(mocks.TransferBlock2)
Expect(insertErrTwo).NotTo(HaveOccurred())
t := transformer.NewTransformer(test_helpers.TusdConfig, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
c, ok := t.Contracts[tusdAddr]
Expect(ok).To(Equal(true))
Expect(c.StartingBlock).To(Equal(int64(6194633)))
Expect(c.Abi).To(Equal(constants.TusdAbiString))
Expect(c.Name).To(Equal("TrueUSD"))
Expect(c.Address).To(Equal(tusdAddr))
})
It("Fails to initialize if first and most recent blocks cannot be fetched from vDB", func() {
t := transformer.NewTransformer(test_helpers.TusdConfig, blockChain, db)
err = t.Init()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("no rows in result set"))
})
It("Does nothing if watched events are unset", func() {
_, insertErr := blockRepository.CreateOrUpdateBlock(mocks.TransferBlock1)
Expect(insertErr).NotTo(HaveOccurred())
_, insertErrTwo := blockRepository.CreateOrUpdateBlock(mocks.TransferBlock2)
Expect(insertErrTwo).NotTo(HaveOccurred())
var testConf config.ContractConfig
testConf = test_helpers.TusdConfig
testConf.Events = nil
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("no filters created"))
_, ok := t.Contracts[tusdAddr]
Expect(ok).To(Equal(false))
})
})
Describe("Execute", func() {
BeforeEach(func() {
_, insertErr := blockRepository.CreateOrUpdateBlock(mocks.TransferBlock1)
Expect(insertErr).NotTo(HaveOccurred())
_, insertErrTwo := blockRepository.CreateOrUpdateBlock(mocks.TransferBlock2)
Expect(insertErrTwo).NotTo(HaveOccurred())
})
It("Transforms watched contract data into custom repositories", func() {
t := transformer.NewTransformer(test_helpers.TusdConfig, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
log := test_helpers.TransferLog{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.transfer_event WHERE block = 6194634", tusdAddr)).StructScan(&log)
// We don't know vulcID, so compare individual fields instead of complete structures
Expect(log.Tx).To(Equal("0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654eee"))
Expect(log.Block).To(Equal(int64(6194634)))
Expect(log.From).To(Equal("0x000000000000000000000000000000000000Af21"))
Expect(log.To).To(Equal("0x09BbBBE21a5975cAc061D82f7b843bCE061BA391"))
Expect(log.Value).To(Equal("1097077688018008265106216665536940668749033598146"))
})
It("Keeps track of contract-related addresses while transforming event data if they need to be used for later method polling", func() {
var testConf config.ContractConfig
testConf = test_helpers.TusdConfig
testConf.Methods = map[string][]string{
tusdAddr: {"balanceOf"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
c, ok := t.Contracts[tusdAddr]
Expect(ok).To(Equal(true))
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
b, ok := c.EmittedAddrs[common.HexToAddress("0x000000000000000000000000000000000000Af21")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
b, ok = c.EmittedAddrs[common.HexToAddress("0x09BbBBE21a5975cAc061D82f7b843bCE061BA391")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
_, ok = c.EmittedAddrs[common.HexToAddress("0x09BbBBE21a5975cAc061D82f7b843b1234567890")]
Expect(ok).To(Equal(false))
_, ok = c.EmittedAddrs[common.HexToAddress("0x")]
Expect(ok).To(Equal(false))
_, ok = c.EmittedAddrs[""]
Expect(ok).To(Equal(false))
_, ok = c.EmittedAddrs[common.HexToAddress("0x09THISE21a5IS5cFAKE1D82fAND43bCE06MADEUP")]
Expect(ok).To(Equal(false))
})
It("Polls given methods using generated token holder address", func() {
var testConf config.ContractConfig
testConf = test_helpers.TusdConfig
testConf.Methods = map[string][]string{
tusdAddr: {"balanceOf"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
res := test_helpers.BalanceOf{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.balanceof_method WHERE who_ = '0x000000000000000000000000000000000000Af21' AND block = '6194634'", tusdAddr)).StructScan(&res)
Expect(err).ToNot(HaveOccurred())
Expect(res.Balance).To(Equal("0"))
Expect(res.TokenName).To(Equal("TrueUSD"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.balanceof_method WHERE who_ = '0x09BbBBE21a5975cAc061D82f7b843bCE061BA391' AND block = '6194634'", tusdAddr)).StructScan(&res)
Expect(err).ToNot(HaveOccurred())
Expect(res.Balance).To(Equal("0"))
Expect(res.TokenName).To(Equal("TrueUSD"))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.balanceof_method WHERE who_ = '0xfE9e8709d3215310075d67E3ed32A380CCf451C8' AND block = '6194634'", tusdAddr)).StructScan(&res)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("no rows in result set"))
})
It("Fails if initialization has not been done", func() {
t := transformer.NewTransformer(test_helpers.TusdConfig, blockChain, db)
err = t.Execute()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("transformer has no initialized contracts to work with"))
})
})
Describe("Execute- against ENS registry contract", func() {
BeforeEach(func() {
_, insertErr := blockRepository.CreateOrUpdateBlock(mocks.NewOwnerBlock1)
Expect(insertErr).NotTo(HaveOccurred())
_, insertErrTwo := blockRepository.CreateOrUpdateBlock(mocks.NewOwnerBlock2)
Expect(insertErrTwo).NotTo(HaveOccurred())
})
It("Transforms watched contract data into custom repositories", func() {
t := transformer.NewTransformer(test_helpers.ENSConfig, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
log := test_helpers.NewOwnerLog{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.newowner_event", ensAddr)).StructScan(&log)
// We don't know vulcID, so compare individual fields instead of complete structures
Expect(log.Tx).To(Equal("0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654bbb"))
Expect(log.Block).To(Equal(int64(6194635)))
Expect(log.Node).To(Equal("0x0000000000000000000000000000000000000000000000000000c02aaa39b223"))
Expect(log.Label).To(Equal("0x9dd48110dcc444fdc242510c09bbbbe21a5975cac061d82f7b843bce061ba391"))
Expect(log.Owner).To(Equal("0x000000000000000000000000000000000000Af21"))
})
It("Keeps track of contract-related hashes while transforming event data if they need to be used for later method polling", func() {
var testConf config.ContractConfig
testConf = test_helpers.ENSConfig
testConf.Methods = map[string][]string{
ensAddr: {"owner"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
c, ok := t.Contracts[ensAddr]
Expect(ok).To(Equal(true))
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
Expect(len(c.EmittedHashes)).To(Equal(3))
b, ok := c.EmittedHashes[common.HexToHash("0x0000000000000000000000000000000000000000000000000000c02aaa39b223")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
b, ok = c.EmittedHashes[common.HexToHash("0x9dd48110dcc444fdc242510c09bbbbe21a5975cac061d82f7b843bce061ba391")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
// Doesn't keep track of address since it wouldn't be used in calling the 'owner' method
_, ok = c.EmittedAddrs[common.HexToAddress("0x000000000000000000000000000000000000Af21")]
Expect(ok).To(Equal(false))
})
It("Polls given methods using generated token holder address", func() {
var testConf config.ContractConfig
testConf = test_helpers.ENSConfig
testConf.Methods = map[string][]string{
ensAddr: {"owner"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
res := test_helpers.Owner{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.owner_method WHERE node_ = '0x0000000000000000000000000000000000000000000000000000c02aaa39b223' AND block = '6194636'", ensAddr)).StructScan(&res)
Expect(err).ToNot(HaveOccurred())
Expect(res.Address).To(Equal("0x0000000000000000000000000000000000000000"))
Expect(res.TokenName).To(Equal(""))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.owner_method WHERE node_ = '0x9dd48110dcc444fdc242510c09bbbbe21a5975cac061d82f7b843bce061ba391' AND block = '6194636'", ensAddr)).StructScan(&res)
Expect(err).ToNot(HaveOccurred())
Expect(res.Address).To(Equal("0x0000000000000000000000000000000000000000"))
Expect(res.TokenName).To(Equal(""))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.owner_method WHERE node_ = '0x9THIS110dcc444fIS242510c09bbAbe21aFAKEcacNODE82f7b843HASH61ba391' AND block = '6194636'", ensAddr)).StructScan(&res)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("no rows in result set"))
})
It("It does not perist events if they do not pass the emitted arg filter", func() {
var testConf config.ContractConfig
testConf = test_helpers.ENSConfig
testConf.EventArgs = map[string][]string{
ensAddr: {"fake_filter_value"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
log := test_helpers.HeaderSyncNewOwnerLog{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.newowner_event", ensAddr)).StructScan(&log)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("does not exist"))
})
It("If a method arg filter is applied, only those arguments are used in polling", func() {
var testConf config.ContractConfig
testConf = test_helpers.ENSConfig
testConf.MethodArgs = map[string][]string{
ensAddr: {"0x0000000000000000000000000000000000000000000000000000c02aaa39b223"},
}
testConf.Methods = map[string][]string{
ensAddr: {"owner"},
}
t := transformer.NewTransformer(testConf, blockChain, db)
err = t.Init()
Expect(err).ToNot(HaveOccurred())
err = t.Execute()
Expect(err).ToNot(HaveOccurred())
res := test_helpers.Owner{}
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.owner_method WHERE node_ = '0x0000000000000000000000000000000000000000000000000000c02aaa39b223' AND block = '6194636'", ensAddr)).StructScan(&res)
Expect(err).ToNot(HaveOccurred())
Expect(res.Address).To(Equal("0x0000000000000000000000000000000000000000"))
Expect(res.TokenName).To(Equal(""))
err = db.QueryRowx(fmt.Sprintf("SELECT * FROM full_%s.owner_method WHERE node_ = '0x9dd48110dcc444fdc242510c09bbbbe21a5975cac061d82f7b843bce061ba391' AND block = '6194636'", ensAddr)).StructScan(&res)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("no rows in result set"))
})
})
})

View File

@ -30,8 +30,8 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers/test_helpers/mocks"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
var _ = Describe("contractWatcher headerSync transformer", func() {

View File

@ -27,8 +27,6 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
rpc2 "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
"github.com/vulcanize/vulcanizedb/pkg/eth/history"
"github.com/vulcanize/vulcanizedb/pkg/eth/node"
"github.com/vulcanize/vulcanizedb/test_config"
)
@ -47,19 +45,6 @@ var _ = Describe("Reading from the Geth blockchain", func() {
blockChain = eth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
})
It("reads two blocks", func(done Done) {
blocks := fakes.NewMockBlockRepository()
lastBlock, err := blockChain.LastBlock()
Expect(err).NotTo(HaveOccurred())
queriedBlocks := []int64{lastBlock.Int64() - 5, lastBlock.Int64() - 6}
_, err = history.RetrieveAndUpdateBlocks(blockChain, blocks, queriedBlocks)
Expect(err).NotTo(HaveOccurred())
blocks.AssertCreateOrUpdateBlocksCallCountAndBlockNumbersEquals(2, []int64{lastBlock.Int64() - 5, lastBlock.Int64() - 6})
close(done)
}, 30)
It("retrieves the genesis block and first block", func(done Done) {
genesisBlock, err := blockChain.GetBlockByNumber(int64(0))
Expect(err).ToNot(HaveOccurred())

View File

@ -29,7 +29,7 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/poller"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/types"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
var _ = Describe("Poller", func() {

View File

@ -18,7 +18,7 @@ package event
import (
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
// Converter transforms log data into general InsertionModels the Repository can persist__

View File

@ -24,7 +24,7 @@ import (
"github.com/vulcanize/vulcanizedb/utils"
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
const SetLogTransformedQuery = `UPDATE public.header_sync_logs SET transformed = true WHERE id = $1`

View File

@ -24,9 +24,9 @@ import (
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/factories/event"
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
"github.com/vulcanize/vulcanizedb/test_config"
)

View File

@ -20,7 +20,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type Transformer struct {

View File

@ -19,7 +19,7 @@ package storage
import (
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type KeysLoader interface {

View File

@ -19,7 +19,7 @@ package storage
import (
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type KeysLookup interface {

View File

@ -18,7 +18,7 @@ package storage
import (
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type Repository interface {

View File

@ -20,7 +20,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type Transformer struct {

View File

@ -19,7 +19,7 @@ package mocks
import (
"github.com/vulcanize/vulcanizedb/libraries/shared/factories/event"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type MockConverter struct {

View File

@ -18,7 +18,7 @@ package mocks
import (
"github.com/vulcanize/vulcanizedb/libraries/shared/factories/event"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type MockEventRepository struct {

View File

@ -19,8 +19,8 @@ package mocks
import (
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type MockEventTransformer struct {

View File

@ -19,7 +19,7 @@ package mocks
import (
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type MockStorageKeysLoader struct {

View File

@ -19,7 +19,7 @@ package mocks
import (
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type MockStorageKeysLookup struct {

View File

@ -18,7 +18,7 @@ package mocks
import (
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type MockStorageRepository struct {

View File

@ -21,7 +21,7 @@ import (
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
// MockStorageTransformer for tests

View File

@ -33,7 +33,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/jmoiron/sqlx"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
const getOrCreateAddressQuery = `WITH addressId AS (

View File

@ -25,8 +25,8 @@ import (
"github.com/vulcanize/vulcanizedb/libraries/shared/repository"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
"github.com/vulcanize/vulcanizedb/test_config"
)

View File

@ -18,7 +18,7 @@ package storage
import (
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type IStorageQueue interface {

View File

@ -22,8 +22,8 @@ import (
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
"github.com/vulcanize/vulcanizedb/test_config"
)

View File

@ -7,8 +7,8 @@ import (
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
// Create a header sync log to reference in an event, returning inserted header sync log

View File

@ -21,8 +21,8 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type ITransactionsSyncer interface {

View File

@ -19,7 +19,7 @@ package transformer
import (
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type ContractTransformer interface {

View File

@ -19,7 +19,7 @@ package transformer
import (
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type EventTransformer interface {

View File

@ -19,7 +19,7 @@ package transformer
import (
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type StorageTransformer interface {

View File

@ -18,7 +18,7 @@ package transformer
import (
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)

View File

@ -27,7 +27,7 @@ import (
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type ContractWatcher struct {

View File

@ -28,8 +28,8 @@ import (
"github.com/vulcanize/vulcanizedb/libraries/shared/transactions"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
const NoNewDataPause = time.Second * 7
@ -44,7 +44,6 @@ type EventWatcher struct {
func NewEventWatcher(db *postgres.DB, bc core.BlockChain) EventWatcher {
extractor := &logs.LogExtractor{
CheckedHeadersRepository: repositories.NewCheckedHeadersRepository(db),
CheckedLogsRepository: repositories.NewCheckedLogsRepository(db),
Fetcher: fetcher.NewLogFetcher(bc),
LogRepository: repositories.NewHeaderSyncLogRepository(db),
Syncer: transactions.NewTransactionsSyncer(db, bc),

View File

@ -28,8 +28,8 @@ import (
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/postgres"
)
type IStorageWatcher interface {

View File

@ -78,7 +78,7 @@ func (blockChain *BlockChain) GetEthLogsWithCustomQuery(query ethereum.FilterQue
func (blockChain *BlockChain) GetHeaderByNumber(blockNumber int64) (header core.Header, err error) {
logrus.Debugf("GetHeaderByNumber called with block %d", blockNumber)
if blockChain.node.NetworkID == core.KOVAN_NETWORK_ID {
if blockChain.node.NetworkID == string(core.KOVAN_NETWORK_ID) {
return blockChain.getPOAHeader(blockNumber)
}
return blockChain.getPOWHeader(blockNumber)
@ -86,7 +86,7 @@ func (blockChain *BlockChain) GetHeaderByNumber(blockNumber int64) (header core.
func (blockChain *BlockChain) GetHeadersByNumbers(blockNumbers []int64) (header []core.Header, err error) {
logrus.Debug("GetHeadersByNumbers called")
if blockChain.node.NetworkID == core.KOVAN_NETWORK_ID {
if blockChain.node.NetworkID == string(core.KOVAN_NETWORK_ID) {
return blockChain.getPOAHeaders(blockNumbers)
}
return blockChain.getPOWHeaders(blockNumbers)

View File

@ -102,7 +102,7 @@ var _ = Describe("Geth blockchain", func() {
Describe("POA/Kovan", func() {
It("fetches header from rpcClient", func() {
node.NetworkID = vulcCore.KOVAN_NETWORK_ID
node.NetworkID = string(vulcCore.KOVAN_NETWORK_ID)
blockNumber := hexutil.Big(*big.NewInt(100))
mockRpcClient.SetReturnPOAHeader(vulcCore.POAHeader{Number: &blockNumber})
blockChain = eth.NewBlockChain(mockClient, mockRpcClient, node, fakes.NewMockTransactionConverter())
@ -114,7 +114,7 @@ var _ = Describe("Geth blockchain", func() {
})
It("returns err if rpcClient returns err", func() {
node.NetworkID = vulcCore.KOVAN_NETWORK_ID
node.NetworkID = string(vulcCore.KOVAN_NETWORK_ID)
mockRpcClient.SetCallContextErr(fakes.FakeError)
blockChain = eth.NewBlockChain(mockClient, mockRpcClient, node, fakes.NewMockTransactionConverter())
@ -125,7 +125,7 @@ var _ = Describe("Geth blockchain", func() {
})
It("returns error if returned header is empty", func() {
node.NetworkID = vulcCore.KOVAN_NETWORK_ID
node.NetworkID = string(vulcCore.KOVAN_NETWORK_ID)
blockChain = eth.NewBlockChain(mockClient, mockRpcClient, node, fakes.NewMockTransactionConverter())
_, err := blockChain.GetHeaderByNumber(100)
@ -135,7 +135,7 @@ var _ = Describe("Geth blockchain", func() {
})
It("returns multiple headers with multiple blocknumbers", func() {
node.NetworkID = vulcCore.KOVAN_NETWORK_ID
node.NetworkID = string(vulcCore.KOVAN_NETWORK_ID)
blockNumber := hexutil.Big(*big.NewInt(100))
mockRpcClient.SetReturnPOAHeaders([]vulcCore.POAHeader{{Number: &blockNumber}})

View File

@ -1,29 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cold_import_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestColdImport(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "ColdImport Suite")
}

View File

@ -1,75 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cold_import
import (
"github.com/vulcanize/vulcanizedb/pkg/eth/converters/common"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/ethereum"
)
type ColdImporter struct {
blockRepository datastore.BlockRepository
converter common.BlockConverter
ethDB ethereum.Database
receiptRepository datastore.FullSyncReceiptRepository
}
func NewColdImporter(ethDB ethereum.Database, blockRepository datastore.BlockRepository, receiptRepository datastore.FullSyncReceiptRepository, converter common.BlockConverter) *ColdImporter {
return &ColdImporter{
blockRepository: blockRepository,
converter: converter,
ethDB: ethDB,
receiptRepository: receiptRepository,
}
}
func (ci *ColdImporter) Execute(startingBlockNumber int64, endingBlockNumber int64, nodeID string) error {
missingBlocks := ci.blockRepository.MissingBlockNumbers(startingBlockNumber, endingBlockNumber, nodeID)
for _, n := range missingBlocks {
hash := ci.ethDB.GetBlockHash(n)
blockID, err := ci.createBlocksAndTransactions(hash, n)
if err != nil {
return err
}
err = ci.createReceiptsAndLogs(hash, n, blockID)
if err != nil {
return err
}
}
ci.blockRepository.SetBlocksStatus(endingBlockNumber)
return nil
}
func (ci *ColdImporter) createBlocksAndTransactions(hash []byte, i int64) (int64, error) {
block := ci.ethDB.GetBlock(hash, i)
coreBlock, err := ci.converter.ToCoreBlock(block)
if err != nil {
return 0, err
}
return ci.blockRepository.CreateOrUpdateBlock(coreBlock)
}
func (ci *ColdImporter) createReceiptsAndLogs(hash []byte, number int64, blockID int64) error {
receipts := ci.ethDB.GetBlockReceipts(hash, number)
coreReceipts, err := common.ToCoreReceipts(receipts)
if err != nil {
return err
}
return ci.receiptRepository.CreateReceiptsAndLogs(blockID, coreReceipts)
}

View File

@ -1,152 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cold_import_test
import (
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/eth/cold_import"
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/eth/converters/common"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
)
var _ = Describe("Geth cold importer", func() {
var fakeGethBlock *types.Block
BeforeEach(func() {
header := &types.Header{}
transactions := []*types.Transaction{}
uncles := []*types.Header{}
receipts := []*types.Receipt{}
fakeGethBlock = types.NewBlock(header, transactions, uncles, receipts)
})
It("only populates missing blocks", func() {
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
mockBlockRepository := fakes.NewMockBlockRepository()
mockReceiptRepository := fakes.NewMockReceiptRepository()
mockTransactionConverter := fakes.NewMockTransactionConverter()
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
nodeId := "node_id"
startingBlockNumber := int64(120)
missingBlockNumber := int64(123)
endingBlockNumber := int64(125)
fakeHash := []byte{1, 2, 3, 4, 5}
mockBlockRepository.SetMissingBlockNumbersReturnArray([]int64{missingBlockNumber})
mockEthereumDatabase.SetReturnHash(fakeHash)
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
importer := cold_import.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
err := importer.Execute(startingBlockNumber, endingBlockNumber, nodeId)
Expect(err).NotTo(HaveOccurred())
mockBlockRepository.AssertMissingBlockNumbersCalledWith(startingBlockNumber, endingBlockNumber, nodeId)
mockEthereumDatabase.AssertGetBlockHashCalledWith(missingBlockNumber)
mockEthereumDatabase.AssertGetBlockCalledWith(fakeHash, missingBlockNumber)
})
It("fetches missing blocks from level db and persists them to pg", func() {
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
mockBlockRepository := fakes.NewMockBlockRepository()
mockReceiptRepository := fakes.NewMockReceiptRepository()
mockTransactionConverter := fakes.NewMockTransactionConverter()
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
blockNumber := int64(123)
fakeHash := []byte{1, 2, 3, 4, 5}
mockBlockRepository.SetMissingBlockNumbersReturnArray([]int64{blockNumber})
mockEthereumDatabase.SetReturnHash(fakeHash)
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
importer := cold_import.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
err := importer.Execute(blockNumber, blockNumber, "node_id")
Expect(err).NotTo(HaveOccurred())
mockEthereumDatabase.AssertGetBlockHashCalledWith(blockNumber)
mockEthereumDatabase.AssertGetBlockCalledWith(fakeHash, blockNumber)
Expect(mockTransactionConverter.ConvertBlockTransactionsToCoreCalled).To(BeTrue())
Expect(mockTransactionConverter.ConvertBlockTransactionsToCorePassedBlock).To(Equal(fakeGethBlock))
convertedBlock, err := blockConverter.ToCoreBlock(fakeGethBlock)
Expect(err).NotTo(HaveOccurred())
mockBlockRepository.AssertCreateOrUpdateBlockCalledWith(convertedBlock)
})
It("sets is_final status on populated blocks", func() {
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
mockBlockRepository := fakes.NewMockBlockRepository()
mockReceiptRepository := fakes.NewMockReceiptRepository()
mockTransactionConverter := fakes.NewMockTransactionConverter()
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
startingBlockNumber := int64(120)
endingBlockNumber := int64(125)
fakeHash := []byte{1, 2, 3, 4, 5}
mockBlockRepository.SetMissingBlockNumbersReturnArray([]int64{startingBlockNumber})
mockEthereumDatabase.SetReturnHash(fakeHash)
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
importer := cold_import.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
err := importer.Execute(startingBlockNumber, endingBlockNumber, "node_id")
Expect(err).NotTo(HaveOccurred())
mockBlockRepository.AssertSetBlockStatusCalledWith(endingBlockNumber)
})
It("fetches receipts from level db and persists them to pg", func() {
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
mockBlockRepository := fakes.NewMockBlockRepository()
mockReceiptRepository := fakes.NewMockReceiptRepository()
mockTransactionConverter := fakes.NewMockTransactionConverter()
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
blockNumber := int64(123)
blockId := int64(999)
mockBlockRepository.SetCreateOrUpdateBlockReturnVals(blockId, nil)
fakeReceipts := types.Receipts{{}}
mockBlockRepository.SetMissingBlockNumbersReturnArray([]int64{blockNumber})
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
mockEthereumDatabase.SetReturnReceipts(fakeReceipts)
importer := cold_import.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
err := importer.Execute(blockNumber, blockNumber, "node_id")
Expect(err).NotTo(HaveOccurred())
expectedReceipts, err := vulcCommon.ToCoreReceipts(fakeReceipts)
Expect(err).ToNot(HaveOccurred())
mockReceiptRepository.AssertCreateReceiptsAndLogsCalledWith(blockId, expectedReceipts)
})
It("does not fetch receipts if block already exists", func() {
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
mockBlockRepository := fakes.NewMockBlockRepository()
mockReceiptRepository := fakes.NewMockReceiptRepository()
mockTransactionConverter := fakes.NewMockTransactionConverter()
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
blockNumber := int64(123)
mockBlockRepository.SetMissingBlockNumbersReturnArray([]int64{})
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
mockBlockRepository.SetCreateOrUpdateBlockReturnVals(0, repositories.ErrBlockExists)
importer := cold_import.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
err := importer.Execute(blockNumber, blockNumber, "node_id")
Expect(err).NotTo(HaveOccurred())
mockReceiptRepository.AssertCreateReceiptsAndLogsNotCalled()
})
})

View File

@ -1,85 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cold_import
import (
"errors"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/crypto"
"github.com/vulcanize/vulcanizedb/pkg/fs"
)
const (
ColdImportClientName = "LevelDbColdImport"
ColdImportNetworkID float64 = 1
)
var (
NoChainDataErr = errors.New("Level DB path does not include chaindata extension.")
NoGethRootErr = errors.New("Level DB path does not include root path to geth.")
)
type ColdImportNodeBuilder struct {
reader fs.Reader
parser crypto.PublicKeyParser
}
func NewColdImportNodeBuilder(reader fs.Reader, parser crypto.PublicKeyParser) ColdImportNodeBuilder {
return ColdImportNodeBuilder{reader: reader, parser: parser}
}
func (cinb ColdImportNodeBuilder) GetNode(genesisBlock []byte, levelPath string) (core.Node, error) {
var coldNode core.Node
nodeKeyPath, err := getNodeKeyPath(levelPath)
if err != nil {
return coldNode, err
}
nodeKey, err := cinb.reader.Read(nodeKeyPath)
if err != nil {
return coldNode, err
}
nodeID, err := cinb.parser.ParsePublicKey(string(nodeKey))
if err != nil {
return coldNode, err
}
genesisBlockHash := common.BytesToHash(genesisBlock).String()
coldNode = core.Node{
GenesisBlock: genesisBlockHash,
NetworkID: ColdImportNetworkID,
ID: nodeID,
ClientName: ColdImportClientName,
}
return coldNode, nil
}
func getNodeKeyPath(levelPath string) (string, error) {
chaindataExtension := "chaindata"
if !strings.Contains(levelPath, chaindataExtension) {
return "", NoChainDataErr
}
chaindataExtensionLength := len(chaindataExtension)
gethRootPathLength := len(levelPath) - chaindataExtensionLength
if gethRootPathLength <= chaindataExtensionLength {
return "", NoGethRootErr
}
gethRootPath := levelPath[:gethRootPathLength]
nodeKeyPath := gethRootPath + "nodekey"
return nodeKeyPath, nil
}

View File

@ -1,114 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cold_import_test
import (
"errors"
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/eth/cold_import"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
)
var _ = Describe("Cold importer node builder", func() {
Describe("when level path is not valid", func() {
It("returns error if no chaindata extension", func() {
gethPath := "path/to/geth"
mockReader := fakes.NewMockFsReader()
mockParser := fakes.NewMockCryptoParser()
nodeBuilder := cold_import.NewColdImportNodeBuilder(mockReader, mockParser)
_, err := nodeBuilder.GetNode([]byte{1, 2, 3, 4, 5}, gethPath)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(cold_import.NoChainDataErr))
})
It("returns error if no root geth path", func() {
chaindataPath := "chaindata"
mockReader := fakes.NewMockFsReader()
mockParser := fakes.NewMockCryptoParser()
nodeBuilder := cold_import.NewColdImportNodeBuilder(mockReader, mockParser)
_, err := nodeBuilder.GetNode([]byte{1, 2, 3, 4, 5}, chaindataPath)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(cold_import.NoGethRootErr))
})
})
Describe("when reader fails", func() {
It("returns err", func() {
mockReader := fakes.NewMockFsReader()
fakeError := errors.New("Failed")
mockReader.SetReturnErr(fakeError)
mockParser := fakes.NewMockCryptoParser()
nodeBuilder := cold_import.NewColdImportNodeBuilder(mockReader, mockParser)
_, err := nodeBuilder.GetNode([]byte{1, 2, 3, 4, 5}, "path/to/geth/chaindata")
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakeError))
})
})
Describe("when parser fails", func() {
It("returns err", func() {
mockReader := fakes.NewMockFsReader()
mockParser := fakes.NewMockCryptoParser()
fakeErr := errors.New("Failed")
mockParser.SetReturnErr(fakeErr)
nodeBuilder := cold_import.NewColdImportNodeBuilder(mockReader, mockParser)
_, err := nodeBuilder.GetNode([]byte{1, 2, 3, 4, 5}, "path/to/geth/chaindata")
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakeErr))
})
})
Describe("when path is valid and reader and parser succeed", func() {
It("builds a node", func() {
fakeGenesisBlock := []byte{1, 2, 3, 4, 5}
fakeRootGethPath := "root/path/to/geth/"
fakeLevelPath := fakeRootGethPath + "chaindata"
fakeNodeKeyPath := fakeRootGethPath + "nodekey"
fakePublicKeyBytes := []byte{5, 4, 3, 2, 1}
fakePublicKeyString := "public_key"
mockReader := fakes.NewMockFsReader()
mockReader.SetReturnBytes(fakePublicKeyBytes)
mockParser := fakes.NewMockCryptoParser()
mockParser.SetReturnVal(fakePublicKeyString)
nodeBuilder := cold_import.NewColdImportNodeBuilder(mockReader, mockParser)
result, err := nodeBuilder.GetNode(fakeGenesisBlock, fakeLevelPath)
Expect(err).NotTo(HaveOccurred())
mockReader.AssertReadCalledWith(fakeNodeKeyPath)
mockParser.AssertParsePublicKeyCalledWith(string(fakePublicKeyBytes))
Expect(result).NotTo(BeNil())
Expect(result.ClientName).To(Equal(cold_import.ColdImportClientName))
expectedGenesisBlock := common.BytesToHash(fakeGenesisBlock).String()
Expect(result.GenesisBlock).To(Equal(expectedGenesisBlock))
Expect(result.ID).To(Equal(fakePublicKeyString))
Expect(result.NetworkID).To(Equal(cold_import.ColdImportNetworkID))
})
})
})

View File

@ -1,117 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package converter
import (
"fmt"
"math/big"
"strconv"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/contract"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/types"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
)
// ConverterInterface is used to convert watched event logs to
// custom logs containing event input name => value maps
type ConverterInterface interface {
Convert(watchedEvent core.WatchedEvent, event types.Event) (*types.Log, error)
Update(info *contract.Contract)
}
// Converter is the underlying struct for the ConverterInterface
type Converter struct {
ContractInfo *contract.Contract
}
// Update configures the converter for a specific contract
func (c *Converter) Update(info *contract.Contract) {
c.ContractInfo = info
}
// Convert converts the given watched event log into a types.Log for the given event
func (c *Converter) Convert(watchedEvent core.WatchedEvent, event types.Event) (*types.Log, error) {
boundContract := bind.NewBoundContract(common.HexToAddress(c.ContractInfo.Address), c.ContractInfo.ParsedAbi, nil, nil, nil)
values := make(map[string]interface{})
log := helpers.ConvertToLog(watchedEvent)
err := boundContract.UnpackLogIntoMap(values, event.Name, log)
if err != nil {
return nil, err
}
strValues := make(map[string]string, len(values))
seenAddrs := make([]interface{}, 0, len(values))
seenHashes := make([]interface{}, 0, len(values))
for fieldName, input := range values {
// Postgres cannot handle custom types, resolve to strings
switch input.(type) {
case *big.Int:
b := input.(*big.Int)
strValues[fieldName] = b.String()
case common.Address:
a := input.(common.Address)
strValues[fieldName] = a.String()
seenAddrs = append(seenAddrs, a)
case common.Hash:
h := input.(common.Hash)
strValues[fieldName] = h.String()
seenHashes = append(seenHashes, h)
case string:
strValues[fieldName] = input.(string)
case bool:
strValues[fieldName] = strconv.FormatBool(input.(bool))
case []byte:
b := input.([]byte)
strValues[fieldName] = hexutil.Encode(b)
if len(b) == 32 { // collect byte arrays of size 32 as hashes
seenHashes = append(seenHashes, common.HexToHash(strValues[fieldName]))
}
case byte:
b := input.(byte)
strValues[fieldName] = string(b)
default:
return nil, fmt.Errorf("error: unhandled abi type %T", input)
}
}
// Only hold onto logs that pass our address filter, if any
if c.ContractInfo.PassesEventFilter(strValues) {
eventLog := &types.Log{
ID: watchedEvent.LogID,
Values: strValues,
Block: watchedEvent.BlockNumber,
Tx: watchedEvent.TxHash,
}
// Cache emitted values if their caching is turned on
if c.ContractInfo.EmittedAddrs != nil {
c.ContractInfo.AddEmittedAddr(seenAddrs...)
}
if c.ContractInfo.EmittedHashes != nil {
c.ContractInfo.AddEmittedHash(seenHashes...)
}
return eventLog, nil
}
return nil, nil
}

View File

@ -1,35 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package converter_test
import (
"io/ioutil"
"log"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestConverter(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Full Converter Suite Test")
}
var _ = BeforeSuite(func() {
log.SetOutput(ioutil.Discard)
})

View File

@ -1,113 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package converter_test
import (
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/full/converter"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/contract"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers/test_helpers/mocks"
)
var _ = Describe("Converter", func() {
var con *contract.Contract
var wantedEvents = []string{"Transfer"}
var err error
BeforeEach(func() {
con = test_helpers.SetupTusdContract(wantedEvents, []string{"balanceOf"})
})
Describe("Update", func() {
It("Updates contract con held by the converter", func() {
c := converter.Converter{}
c.Update(con)
Expect(c.ContractInfo).To(Equal(con))
con := test_helpers.SetupTusdContract([]string{}, []string{})
c.Update(con)
Expect(c.ContractInfo).To(Equal(con))
})
})
Describe("Convert", func() {
It("Converts a watched event log to mapping of event input names to values", func() {
_, ok := con.Events["Approval"]
Expect(ok).To(Equal(false))
event, ok := con.Events["Transfer"]
Expect(ok).To(Equal(true))
err = con.GenerateFilters()
Expect(err).ToNot(HaveOccurred())
c := converter.Converter{}
c.Update(con)
log, err := c.Convert(mocks.MockTranferEvent, event)
Expect(err).ToNot(HaveOccurred())
from := common.HexToAddress("0x000000000000000000000000000000000000000000000000000000000000af21")
to := common.HexToAddress("0x9dd48110dcc444fdc242510c09bbbbe21a5975cac061d82f7b843bce061ba391")
value := helpers.BigFromString("1097077688018008265106216665536940668749033598146")
v := log.Values["value"]
Expect(log.Values["to"]).To(Equal(to.String()))
Expect(log.Values["from"]).To(Equal(from.String()))
Expect(v).To(Equal(value.String()))
})
It("Keeps track of addresses it sees to grow a token holder address list for the contract", func() {
event, ok := con.Events["Transfer"]
Expect(ok).To(Equal(true))
c := converter.Converter{}
c.Update(con)
_, err := c.Convert(mocks.MockTranferEvent, event)
Expect(err).ToNot(HaveOccurred())
b, ok := con.EmittedAddrs[common.HexToAddress("0x000000000000000000000000000000000000Af21")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
b, ok = con.EmittedAddrs[common.HexToAddress("0x09BbBBE21a5975cAc061D82f7b843bCE061BA391")]
Expect(ok).To(Equal(true))
Expect(b).To(Equal(true))
_, ok = con.EmittedAddrs[common.HexToAddress("0x")]
Expect(ok).To(Equal(false))
_, ok = con.EmittedAddrs[""]
Expect(ok).To(Equal(false))
_, ok = con.EmittedAddrs[common.HexToAddress("0x09THISE21a5IS5cFAKE1D82fAND43bCE06MADEUP")]
Expect(ok).To(Equal(false))
})
It("Fails with an empty contract", func() {
event := con.Events["Transfer"]
c := converter.Converter{}
c.Update(&contract.Contract{})
_, err = c.Convert(mocks.MockTranferEvent, event)
Expect(err).To(HaveOccurred())
})
})
})

View File

@ -1,99 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package retriever
import (
"database/sql"
"github.com/vulcanize/vulcanizedb/libraries/shared/repository"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
)
// BlockRetriever is used to retrieve the first block for a given contract and the most recent block
// It requires a vDB synced database with blocks, transactions, receipts, and logs
type BlockRetriever interface {
RetrieveFirstBlock(contractAddr string) (int64, error)
RetrieveMostRecentBlock() (int64, error)
}
type blockRetriever struct {
db *postgres.DB
}
// NewBlockRetriever returns a new BlockRetriever
func NewBlockRetriever(db *postgres.DB) BlockRetriever {
return &blockRetriever{
db: db,
}
}
// RetrieveFirstBlock fetches the block number for the earliest block in the db
// Tries both methods of finding the first block, with the receipt method taking precedence
func (r *blockRetriever) RetrieveFirstBlock(contractAddr string) (int64, error) {
i, err := r.retrieveFirstBlockFromReceipts(contractAddr)
if err != nil {
if err == sql.ErrNoRows {
i, err = r.retrieveFirstBlockFromLogs(contractAddr)
}
return i, err
}
return i, err
}
// For some contracts the contract creation transaction receipt doesn't have the contract address so this doesn't work (e.g. Sai)
func (r *blockRetriever) retrieveFirstBlockFromReceipts(contractAddr string) (int64, error) {
var firstBlock int64
addressID, getAddressErr := repository.GetOrCreateAddress(r.db, contractAddr)
if getAddressErr != nil {
return firstBlock, getAddressErr
}
err := r.db.Get(
&firstBlock,
`SELECT number FROM eth_blocks
WHERE id = (SELECT block_id FROM full_sync_receipts
WHERE contract_address_id = $1
ORDER BY block_id ASC
LIMIT 1)`,
addressID,
)
return firstBlock, err
}
// In which case this servers as a heuristic to find the first block by finding the first contract event log
func (r *blockRetriever) retrieveFirstBlockFromLogs(contractAddr string) (int64, error) {
var firstBlock int
err := r.db.Get(
&firstBlock,
"SELECT block_number FROM full_sync_logs WHERE lower(address) = $1 ORDER BY block_number ASC LIMIT 1",
contractAddr,
)
return int64(firstBlock), err
}
// RetrieveMostRecentBlock retrieves the most recent block number in vDB
func (r *blockRetriever) RetrieveMostRecentBlock() (int64, error) {
var lastBlock int64
err := r.db.Get(
&lastBlock,
"SELECT number FROM eth_blocks ORDER BY number DESC LIMIT 1",
)
return lastBlock, err
}

View File

@ -1,259 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package retriever_test
import (
"strings"
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/full/retriever"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/constants"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
)
var _ = Describe("Block Retriever", func() {
var db *postgres.DB
var r retriever.BlockRetriever
var rawTransaction []byte
var blockRepository repositories.BlockRepository
// Contains no contract address
var block1 = core.Block{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad123ert",
Number: 1,
Transactions: []core.TransactionModel{},
}
BeforeEach(func() {
db, _ = test_helpers.SetupDBandBC()
blockRepository = *repositories.NewBlockRepository(db)
r = retriever.NewBlockRetriever(db)
gethTransaction := types.Transaction{}
var err error
rawTransaction, err = gethTransaction.MarshalJSON()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
test_helpers.TearDown(db)
})
Describe("RetrieveFirstBlock", func() {
It("Retrieves block number where contract first appears in receipt, if available", func() {
// Contains the address in the receipt
block2 := core.Block{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad123ert",
Number: 2,
Transactions: []core.TransactionModel{{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
GasPrice: 0,
GasLimit: 0,
Nonce: 0,
Raw: rawTransaction,
Receipt: core.Receipt{
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
ContractAddress: constants.TusdContractAddress,
Logs: []core.FullSyncLog{},
},
TxIndex: 0,
Value: "0",
}},
}
// Contains address in logs
block3 := core.Block{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad456yui",
Number: 3,
Transactions: []core.TransactionModel{{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
GasPrice: 0,
GasLimit: 0,
Nonce: 0,
Raw: rawTransaction,
Receipt: core.Receipt{
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
ContractAddress: constants.TusdContractAddress,
Logs: []core.FullSyncLog{{
BlockNumber: 3,
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
Address: constants.TusdContractAddress,
Topics: core.Topics{
constants.TransferEvent.Signature(),
"0x000000000000000000000000000000000000000000000000000000000000af21",
"0x9dd48110dcc444fdc242510c09bbbbe21a5975cac061d82f7b843bce061ba391",
"",
},
Index: 1,
Data: "0x000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000089d24a6b4ccb1b6faa2625fe562bdd9a23260359000000000000000000000000000000000000000000000000392d2e2bda9c00000000000000000000000000000000000000000000000000927f41fa0a4a418000000000000000000000000000000000000000000000000000000000005adcfebe",
}},
},
TxIndex: 0,
Value: "0",
}},
}
_, insertErrOne := blockRepository.CreateOrUpdateBlock(block1)
Expect(insertErrOne).NotTo(HaveOccurred())
_, insertErrTwo := blockRepository.CreateOrUpdateBlock(block2)
Expect(insertErrTwo).NotTo(HaveOccurred())
_, insertErrThree := blockRepository.CreateOrUpdateBlock(block3)
Expect(insertErrThree).NotTo(HaveOccurred())
i, err := r.RetrieveFirstBlock(strings.ToLower(constants.TusdContractAddress))
Expect(err).NotTo(HaveOccurred())
Expect(i).To(Equal(int64(2)))
})
It("Retrieves block number where contract first appears in event logs if it cannot find the address in a receipt", func() {
block2 := core.Block{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad123ert",
Number: 2,
Transactions: []core.TransactionModel{{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
GasPrice: 0,
GasLimit: 0,
Nonce: 0,
Raw: rawTransaction,
Receipt: core.Receipt{
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
ContractAddress: "",
Logs: []core.FullSyncLog{{
BlockNumber: 2,
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
Address: constants.DaiContractAddress,
Topics: core.Topics{
constants.TransferEvent.Signature(),
"0x000000000000000000000000000000000000000000000000000000000000af21",
"0x9dd48110dcc444fdc242510c09bbbbe21a5975cac061d82f7b843bce061ba391",
"",
},
Index: 1,
Data: "0x000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000089d24a6b4ccb1b6faa2625fe562bdd9a23260359000000000000000000000000000000000000000000000000392d2e2bda9c00000000000000000000000000000000000000000000000000927f41fa0a4a418000000000000000000000000000000000000000000000000000000000005adcfebe",
}},
},
TxIndex: 0,
Value: "0",
}},
}
block3 := core.Block{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad456yui",
Number: 3,
Transactions: []core.TransactionModel{{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
GasPrice: 0,
GasLimit: 0,
Nonce: 0,
Raw: rawTransaction,
Receipt: core.Receipt{
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
ContractAddress: "",
Logs: []core.FullSyncLog{{
BlockNumber: 3,
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
Address: constants.DaiContractAddress,
Topics: core.Topics{
constants.TransferEvent.Signature(),
"0x000000000000000000000000000000000000000000000000000000000000af21",
"0x9dd48110dcc444fdc242510c09bbbbe21a5975cac061d82f7b843bce061ba391",
"",
},
Index: 1,
Data: "0x000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000089d24a6b4ccb1b6faa2625fe562bdd9a23260359000000000000000000000000000000000000000000000000392d2e2bda9c00000000000000000000000000000000000000000000000000927f41fa0a4a418000000000000000000000000000000000000000000000000000000000005adcfebe",
}},
},
TxIndex: 0,
Value: "0",
}},
}
_, insertErrOne := blockRepository.CreateOrUpdateBlock(block1)
Expect(insertErrOne).NotTo(HaveOccurred())
_, insertErrTwo := blockRepository.CreateOrUpdateBlock(block2)
Expect(insertErrTwo).NotTo(HaveOccurred())
_, insertErrThree := blockRepository.CreateOrUpdateBlock(block3)
Expect(insertErrThree).NotTo(HaveOccurred())
i, err := r.RetrieveFirstBlock(constants.DaiContractAddress)
Expect(err).NotTo(HaveOccurred())
Expect(i).To(Equal(int64(2)))
})
It("Fails if the contract address cannot be found in any blocks", func() {
block2 := core.Block{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad123ert",
Number: 2,
Transactions: []core.TransactionModel{},
}
block3 := core.Block{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad456yui",
Number: 3,
Transactions: []core.TransactionModel{},
}
_, insertErrOne := blockRepository.CreateOrUpdateBlock(block1)
Expect(insertErrOne).NotTo(HaveOccurred())
_, insertErrTwo := blockRepository.CreateOrUpdateBlock(block2)
Expect(insertErrTwo).NotTo(HaveOccurred())
_, insertErrThree := blockRepository.CreateOrUpdateBlock(block3)
Expect(insertErrThree).NotTo(HaveOccurred())
_, err := r.RetrieveFirstBlock(constants.DaiContractAddress)
Expect(err).To(HaveOccurred())
})
})
Describe("RetrieveMostRecentBlock", func() {
It("Retrieves the latest block", func() {
block2 := core.Block{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad123ert",
Number: 2,
Transactions: []core.TransactionModel{},
}
block3 := core.Block{
Hash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad456yui",
Number: 3,
Transactions: []core.TransactionModel{},
}
_, insertErrOne := blockRepository.CreateOrUpdateBlock(block1)
Expect(insertErrOne).NotTo(HaveOccurred())
_, insertErrTwo := blockRepository.CreateOrUpdateBlock(block2)
Expect(insertErrTwo).NotTo(HaveOccurred())
_, insertErrThree := blockRepository.CreateOrUpdateBlock(block3)
Expect(insertErrThree).NotTo(HaveOccurred())
i, err := r.RetrieveMostRecentBlock()
Expect(err).ToNot(HaveOccurred())
Expect(i).To(Equal(int64(3)))
})
It("Fails if it cannot retrieve the latest block", func() {
i, err := r.RetrieveMostRecentBlock()
Expect(err).To(HaveOccurred())
Expect(i).To(Equal(int64(0)))
})
})
})

View File

@ -1,35 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package retriever_test
import (
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
)
func TestRetriever(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Full Block Number Retriever Suite Test")
}
var _ = BeforeSuite(func() {
logrus.SetOutput(ioutil.Discard)
})

View File

@ -1,236 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package transformer
import (
"errors"
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/full/converter"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/full/retriever"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/contract"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/parser"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/poller"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/repository"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/types"
"github.com/vulcanize/vulcanizedb/pkg/eth/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth/datastore/postgres/repositories"
)
// Transformer is the top level struct for transforming watched contract data
// Requires a fully synced vDB and a running eth node (or infura)
type Transformer struct {
// Database interfaces
FilterRepository datastore.FilterRepository // Log filters repo; accepts filters generated by Contract.GenerateFilters()
WatchedEventRepository datastore.WatchedEventRepository // Watched event log views, created by the log filters
TransformedEventRepository repository.EventRepository // Holds transformed watched event log data
// Pre-processing interfaces
Parser parser.Parser // Parses events and methods out of contract abi fetched using contract address
Retriever retriever.BlockRetriever // Retrieves first block for contract and current block height
// Processing interfaces
Converter converter.ConverterInterface // Converts watched event logs into custom log
Poller poller.Poller // Polls methods using contract's token holder addresses and persists them using method datastore
// Store contract configuration information
Config config.ContractConfig
// Store contract info as mapping to contract address
Contracts map[string]*contract.Contract
// Latest block in the block repository
LastBlock int64
}
// NewTransformer takes in contract config, blockchain, and database, and returns a new Transformer
func NewTransformer(con config.ContractConfig, BC core.BlockChain, DB *postgres.DB) *Transformer {
return &Transformer{
Poller: poller.NewPoller(BC, DB, types.FullSync),
Parser: parser.NewParser(con.Network),
Retriever: retriever.NewBlockRetriever(DB),
Converter: &converter.Converter{},
Contracts: map[string]*contract.Contract{},
WatchedEventRepository: repositories.WatchedEventRepository{DB: DB},
FilterRepository: repositories.FilterRepository{DB: DB},
TransformedEventRepository: repository.NewEventRepository(DB, types.FullSync),
Config: con,
}
}
// Init initializes the transformer
// Use after creating and setting transformer
// Loops over all of the addr => filter sets
// Uses parser to pull event info from abi
// Use this info to generate event filters
func (tr *Transformer) Init() error {
for contractAddr := range tr.Config.Addresses {
// Configure Abi
if tr.Config.Abis[contractAddr] == "" {
// If no abi is given in the config, this method will try fetching from internal look-up table and etherscan
err := tr.Parser.Parse(contractAddr)
if err != nil {
return err
}
} else {
// If we have an abi from the config, load that into the parser
err := tr.Parser.ParseAbiStr(tr.Config.Abis[contractAddr])
if err != nil {
return err
}
}
// Get first block and most recent block number in the header repo
firstBlock, err := tr.Retriever.RetrieveFirstBlock(contractAddr)
if err != nil {
return err
}
// Set to specified range if it falls within the bounds
if firstBlock < tr.Config.StartingBlocks[contractAddr] {
firstBlock = tr.Config.StartingBlocks[contractAddr]
}
// Get contract name if it has one
var name = new(string)
pollingErr := tr.Poller.FetchContractData(tr.Parser.Abi(), contractAddr, "name", nil, name, tr.LastBlock)
if pollingErr != nil {
// can't return this error because "name" might not exist on the contract
logrus.Warnf("error fetching contract data: %s", pollingErr.Error())
}
// Remove any potential accidental duplicate inputs in arg filter values
eventArgs := map[string]bool{}
for _, arg := range tr.Config.EventArgs[contractAddr] {
eventArgs[arg] = true
}
methodArgs := map[string]bool{}
for _, arg := range tr.Config.MethodArgs[contractAddr] {
methodArgs[arg] = true
}
// Aggregate info into contract object
info := contract.Contract{
Name: *name,
Network: tr.Config.Network,
Address: contractAddr,
Abi: tr.Parser.Abi(),
ParsedAbi: tr.Parser.ParsedAbi(),
StartingBlock: firstBlock,
Events: tr.Parser.GetEvents(tr.Config.Events[contractAddr]),
Methods: tr.Parser.GetSelectMethods(tr.Config.Methods[contractAddr]),
FilterArgs: eventArgs,
MethodArgs: methodArgs,
Piping: tr.Config.Piping[contractAddr],
}.Init()
// Use info to create filters
err = info.GenerateFilters()
if err != nil {
return err
}
// Iterate over filters and push them to the repo using filter repository interface
for _, filter := range info.Filters {
err = tr.FilterRepository.CreateFilter(filter)
if err != nil {
return err
}
}
// Store contract info for further processing
tr.Contracts[contractAddr] = info
}
// Get the most recent block number in the block repo
var err error
tr.LastBlock, err = tr.Retriever.RetrieveMostRecentBlock()
if err != nil {
return err
}
return nil
}
// Execute runs the transformation processes
// Iterates through stored, initialized contract objects
// Iterates through contract's event filters, grabbing watched event logs
// Uses converter to convert logs into custom log type
// Persists converted logs into custom postgres tables
// Calls selected methods, using token holder address generated during event log conversion
func (tr *Transformer) Execute() error {
if len(tr.Contracts) == 0 {
return errors.New("error: transformer has no initialized contracts to work with")
}
// Iterate through all internal contracts
for _, con := range tr.Contracts {
// Update converter with current contract
tr.Converter.Update(con)
// Iterate through contract filters and get watched event logs
for eventSig, filter := range con.Filters {
watchedEvents, err := tr.WatchedEventRepository.GetWatchedEvents(filter.Name)
if err != nil {
return err
}
// Iterate over watched event logs
for _, we := range watchedEvents {
// Convert them to our custom log type
cstm, err := tr.Converter.Convert(*we, con.Events[eventSig])
if err != nil {
return err
}
if cstm == nil {
continue
}
// If log is not empty, immediately persist in repo
// Run this in seperate goroutine?
err = tr.TransformedEventRepository.PersistLogs([]types.Log{*cstm}, con.Events[eventSig], con.Address, con.Name)
if err != nil {
return err
}
}
}
// After persisting all watched event logs
// poller polls select contract methods
// and persists the results into custom pg tables
if err := tr.Poller.PollContract(*con, tr.LastBlock); err != nil {
return err
}
}
// At the end of a transformation cycle, and before the next
// update the latest block from the block repo
var err error
tr.LastBlock, err = tr.Retriever.RetrieveMostRecentBlock()
if err != nil {
return err
}
return nil
}
// GetConfig returns the transformers config; satisfies the transformer interface
func (tr *Transformer) GetConfig() config.ContractConfig {
return tr.Config
}

View File

@ -1,35 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package transformer_test
import (
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
)
func TestTransformer(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Full Transformer Suite Test")
}
var _ = BeforeSuite(func() {
logrus.SetOutput(ioutil.Discard)
})

View File

@ -1,98 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package transformer_test
import (
"math/rand"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/full/retriever"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/full/transformer"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/contract"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/helpers/test_helpers/mocks"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/parser"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/poller"
"github.com/vulcanize/vulcanizedb/pkg/eth/contract_watcher/shared/types"
"github.com/vulcanize/vulcanizedb/pkg/eth/fakes"
)
var _ = Describe("Transformer", func() {
var fakeAddress = "0x1234567890abcdef"
rand.Seed(time.Now().UnixNano())
Describe("Init", func() {
It("Initializes transformer's contract objects", func() {
blockRetriever := &fakes.MockFullSyncBlockRetriever{}
firstBlock := int64(1)
mostRecentBlock := int64(2)
blockRetriever.FirstBlock = firstBlock
blockRetriever.MostRecentBlock = mostRecentBlock
parsr := &fakes.MockParser{}
fakeAbi := "fake_abi"
eventName := "Transfer"
event := types.Event{}
parsr.AbiToReturn = fakeAbi
parsr.EventName = eventName
parsr.Event = event
pollr := &fakes.MockPoller{}
fakeContractName := "fake_contract_name"
pollr.ContractName = fakeContractName
t := getTransformer(blockRetriever, parsr, pollr)
err := t.Init()
Expect(err).ToNot(HaveOccurred())
c, ok := t.Contracts[fakeAddress]
Expect(ok).To(Equal(true))
Expect(c.StartingBlock).To(Equal(firstBlock))
Expect(t.LastBlock).To(Equal(mostRecentBlock))
Expect(c.Abi).To(Equal(fakeAbi))
Expect(c.Name).To(Equal(fakeContractName))
Expect(c.Address).To(Equal(fakeAddress))
})
It("Fails to initialize if first and most recent blocks cannot be fetched from vDB", func() {
blockRetriever := &fakes.MockFullSyncBlockRetriever{}
blockRetriever.FirstBlockErr = fakes.FakeError
t := getTransformer(blockRetriever, &fakes.MockParser{}, &fakes.MockPoller{})
err := t.Init()
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
})
})
func getTransformer(blockRetriever retriever.BlockRetriever, parsr parser.Parser, pollr poller.Poller) transformer.Transformer {
return transformer.Transformer{
FilterRepository: &fakes.MockFilterRepository{},
Parser: parsr,
Retriever: blockRetriever,
Poller: pollr,
Contracts: map[string]*contract.Contract{},
Config: mocks.MockConfig,
}
}

Some files were not shown because too many files have changed in this diff Show More