Decouple log extraction from transformer delegation
- limit missing headers results set to 100 so that extraction doesn't excessively block delegation - wrap checked headers functions in repository struct - move storage repository to factory, to correspond with event repository path - remove unused files - reformat sql - remove line breaks in imports
This commit is contained in:
parent
cb819fa9a6
commit
d496dad33c
@ -1,21 +1,22 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE public.blocks (
|
||||
id SERIAL PRIMARY KEY,
|
||||
difficulty BIGINT,
|
||||
extra_data VARCHAR,
|
||||
gas_limit BIGINT,
|
||||
gas_used BIGINT,
|
||||
hash VARCHAR(66),
|
||||
miner VARCHAR(42),
|
||||
nonce VARCHAR(20),
|
||||
"number" BIGINT,
|
||||
parent_hash VARCHAR(66),
|
||||
reward NUMERIC,
|
||||
uncles_reward NUMERIC,
|
||||
"size" VARCHAR,
|
||||
"time" BIGINT,
|
||||
is_final BOOLEAN,
|
||||
uncle_hash VARCHAR(66)
|
||||
CREATE TABLE public.blocks
|
||||
(
|
||||
id SERIAL PRIMARY KEY,
|
||||
difficulty BIGINT,
|
||||
extra_data VARCHAR,
|
||||
gas_limit BIGINT,
|
||||
gas_used BIGINT,
|
||||
hash VARCHAR(66),
|
||||
miner VARCHAR(42),
|
||||
nonce VARCHAR(20),
|
||||
"number" BIGINT,
|
||||
parent_hash VARCHAR(66),
|
||||
reward NUMERIC,
|
||||
uncles_reward NUMERIC,
|
||||
"size" VARCHAR,
|
||||
"time" BIGINT,
|
||||
is_final BOOLEAN,
|
||||
uncle_hash VARCHAR(66)
|
||||
);
|
||||
|
||||
|
||||
|
@ -1,17 +1,18 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE full_sync_transactions (
|
||||
id SERIAL PRIMARY KEY,
|
||||
block_id INTEGER NOT NULL REFERENCES blocks(id) ON DELETE CASCADE,
|
||||
gas_limit NUMERIC,
|
||||
gas_price NUMERIC,
|
||||
hash VARCHAR(66),
|
||||
input_data BYTEA,
|
||||
nonce NUMERIC,
|
||||
raw BYTEA,
|
||||
tx_from VARCHAR(66),
|
||||
tx_index INTEGER,
|
||||
tx_to VARCHAR(66),
|
||||
"value" NUMERIC
|
||||
CREATE TABLE full_sync_transactions
|
||||
(
|
||||
id SERIAL PRIMARY KEY,
|
||||
block_id INTEGER NOT NULL REFERENCES blocks (id) ON DELETE CASCADE,
|
||||
gas_limit NUMERIC,
|
||||
gas_price NUMERIC,
|
||||
hash VARCHAR(66),
|
||||
input_data BYTEA,
|
||||
nonce NUMERIC,
|
||||
raw BYTEA,
|
||||
tx_from VARCHAR(66),
|
||||
tx_index INTEGER,
|
||||
tx_to VARCHAR(66),
|
||||
"value" NUMERIC
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
|
@ -1,9 +1,9 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE watched_contracts
|
||||
(
|
||||
contract_id SERIAL PRIMARY KEY,
|
||||
contract_abi json,
|
||||
contract_hash VARCHAR(66) UNIQUE
|
||||
contract_id SERIAL PRIMARY KEY,
|
||||
contract_abi json,
|
||||
contract_hash VARCHAR(66) UNIQUE
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
|
@ -1,11 +1,12 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE nodes (
|
||||
id SERIAL PRIMARY KEY,
|
||||
client_name VARCHAR,
|
||||
genesis_block VARCHAR(66),
|
||||
network_id NUMERIC,
|
||||
node_id VARCHAR(128),
|
||||
CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id)
|
||||
CREATE TABLE nodes
|
||||
(
|
||||
id SERIAL PRIMARY KEY,
|
||||
client_name VARCHAR,
|
||||
genesis_block VARCHAR(66),
|
||||
network_id NUMERIC,
|
||||
node_id VARCHAR(128),
|
||||
CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id)
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
|
@ -1,11 +1,11 @@
|
||||
-- +goose Up
|
||||
ALTER TABLE blocks
|
||||
ADD COLUMN node_id INTEGER NOT NULL,
|
||||
ADD CONSTRAINT node_fk
|
||||
FOREIGN KEY (node_id)
|
||||
REFERENCES nodes (id)
|
||||
ON DELETE CASCADE;
|
||||
ADD COLUMN node_id INTEGER NOT NULL,
|
||||
ADD CONSTRAINT node_fk
|
||||
FOREIGN KEY (node_id)
|
||||
REFERENCES nodes (id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE blocks
|
||||
DROP COLUMN node_id;
|
||||
DROP COLUMN node_id;
|
||||
|
@ -1,19 +1,19 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE full_sync_logs (
|
||||
id SERIAL PRIMARY KEY,
|
||||
block_number BIGINT,
|
||||
address VARCHAR(66),
|
||||
tx_hash VARCHAR(66),
|
||||
index BIGINT,
|
||||
topic0 VARCHAR(66),
|
||||
topic1 VARCHAR(66),
|
||||
topic2 VARCHAR(66),
|
||||
topic3 VARCHAR(66),
|
||||
data TEXT,
|
||||
CONSTRAINT full_sync_log_uc UNIQUE (block_number, index)
|
||||
CREATE TABLE full_sync_logs
|
||||
(
|
||||
id SERIAL PRIMARY KEY,
|
||||
block_number BIGINT,
|
||||
address VARCHAR(66),
|
||||
tx_hash VARCHAR(66),
|
||||
index BIGINT,
|
||||
topic0 VARCHAR(66),
|
||||
topic1 VARCHAR(66),
|
||||
topic2 VARCHAR(66),
|
||||
topic3 VARCHAR(66),
|
||||
data TEXT,
|
||||
CONSTRAINT full_sync_log_uc UNIQUE (block_number, index)
|
||||
);
|
||||
|
||||
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE full_sync_logs;
|
||||
|
@ -1,7 +1,7 @@
|
||||
-- +goose Up
|
||||
ALTER TABLE blocks
|
||||
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
|
||||
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE blocks
|
||||
DROP CONSTRAINT node_id_block_number_uc;
|
||||
DROP CONSTRAINT node_id_block_number_uc;
|
||||
|
@ -1,5 +1,5 @@
|
||||
-- +goose Up
|
||||
CREATE INDEX tx_to_index ON full_sync_transactions(tx_to);
|
||||
CREATE INDEX tx_to_index ON full_sync_transactions (tx_to);
|
||||
|
||||
-- +goose Down
|
||||
DROP INDEX tx_to_index;
|
||||
|
@ -1,5 +1,5 @@
|
||||
-- +goose Up
|
||||
CREATE INDEX tx_from_index ON full_sync_transactions(tx_from);
|
||||
CREATE INDEX tx_from_index ON full_sync_transactions (tx_from);
|
||||
|
||||
-- +goose Down
|
||||
DROP INDEX tx_from_index;
|
||||
|
@ -3,21 +3,21 @@ ALTER TABLE full_sync_logs
|
||||
DROP CONSTRAINT full_sync_log_uc;
|
||||
|
||||
ALTER TABLE full_sync_logs
|
||||
ADD COLUMN receipt_id INT;
|
||||
ADD COLUMN receipt_id INT;
|
||||
|
||||
ALTER TABLE full_sync_logs
|
||||
ADD CONSTRAINT receipts_fk
|
||||
FOREIGN KEY (receipt_id)
|
||||
REFERENCES full_sync_receipts (id)
|
||||
ON DELETE CASCADE;
|
||||
ADD CONSTRAINT receipts_fk
|
||||
FOREIGN KEY (receipt_id)
|
||||
REFERENCES full_sync_receipts (id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE full_sync_logs
|
||||
DROP CONSTRAINT receipts_fk;
|
||||
DROP CONSTRAINT receipts_fk;
|
||||
|
||||
ALTER TABLE full_sync_logs
|
||||
DROP COLUMN receipt_id;
|
||||
DROP COLUMN receipt_id;
|
||||
|
||||
ALTER TABLE full_sync_logs
|
||||
ADD CONSTRAINT full_sync_log_uc UNIQUE (block_number, index);
|
||||
ADD CONSTRAINT full_sync_log_uc UNIQUE (block_number, index);
|
||||
|
@ -1,15 +1,16 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE log_filters (
|
||||
id SERIAL,
|
||||
name VARCHAR NOT NULL CHECK (name <> ''),
|
||||
from_block BIGINT CHECK (from_block >= 0),
|
||||
to_block BIGINT CHECK (from_block >= 0),
|
||||
address VARCHAR(66),
|
||||
topic0 VARCHAR(66),
|
||||
topic1 VARCHAR(66),
|
||||
topic2 VARCHAR(66),
|
||||
topic3 VARCHAR(66),
|
||||
CONSTRAINT name_uc UNIQUE (name)
|
||||
CREATE TABLE log_filters
|
||||
(
|
||||
id SERIAL,
|
||||
name VARCHAR NOT NULL CHECK (name <> ''),
|
||||
from_block BIGINT CHECK (from_block >= 0),
|
||||
to_block BIGINT CHECK (from_block >= 0),
|
||||
address VARCHAR(66),
|
||||
topic0 VARCHAR(66),
|
||||
topic1 VARCHAR(66),
|
||||
topic2 VARCHAR(66),
|
||||
topic3 VARCHAR(66),
|
||||
CONSTRAINT name_uc UNIQUE (name)
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
|
@ -1,33 +1,31 @@
|
||||
-- +goose Up
|
||||
CREATE VIEW block_stats AS
|
||||
SELECT
|
||||
max(block_number) AS max_block,
|
||||
min(block_number) AS min_block
|
||||
FROM full_sync_logs;
|
||||
SELECT max(block_number) AS max_block,
|
||||
min(block_number) AS min_block
|
||||
FROM full_sync_logs;
|
||||
|
||||
CREATE VIEW watched_event_logs AS
|
||||
SELECT
|
||||
log_filters.name,
|
||||
full_sync_logs.id,
|
||||
block_number,
|
||||
full_sync_logs.address,
|
||||
tx_hash,
|
||||
index,
|
||||
full_sync_logs.topic0,
|
||||
full_sync_logs.topic1,
|
||||
full_sync_logs.topic2,
|
||||
full_sync_logs.topic3,
|
||||
data,
|
||||
receipt_id
|
||||
FROM log_filters
|
||||
CROSS JOIN block_stats
|
||||
JOIN full_sync_logs ON full_sync_logs.address = log_filters.address
|
||||
AND full_sync_logs.block_number >= coalesce(log_filters.from_block, block_stats.min_block)
|
||||
AND full_sync_logs.block_number <= coalesce(log_filters.to_block, block_stats.max_block)
|
||||
WHERE (log_filters.topic0 = full_sync_logs.topic0 OR log_filters.topic0 ISNULL)
|
||||
AND (log_filters.topic1 = full_sync_logs.topic1 OR log_filters.topic1 ISNULL)
|
||||
AND (log_filters.topic2 = full_sync_logs.topic2 OR log_filters.topic2 ISNULL)
|
||||
AND (log_filters.topic3 = full_sync_logs.topic3 OR log_filters.topic3 ISNULL);
|
||||
SELECT log_filters.name,
|
||||
full_sync_logs.id,
|
||||
block_number,
|
||||
full_sync_logs.address,
|
||||
tx_hash,
|
||||
index,
|
||||
full_sync_logs.topic0,
|
||||
full_sync_logs.topic1,
|
||||
full_sync_logs.topic2,
|
||||
full_sync_logs.topic3,
|
||||
data,
|
||||
receipt_id
|
||||
FROM log_filters
|
||||
CROSS JOIN block_stats
|
||||
JOIN full_sync_logs ON full_sync_logs.address = log_filters.address
|
||||
AND full_sync_logs.block_number >= coalesce(log_filters.from_block, block_stats.min_block)
|
||||
AND full_sync_logs.block_number <= coalesce(log_filters.to_block, block_stats.max_block)
|
||||
WHERE (log_filters.topic0 = full_sync_logs.topic0 OR log_filters.topic0 ISNULL)
|
||||
AND (log_filters.topic1 = full_sync_logs.topic1 OR log_filters.topic1 ISNULL)
|
||||
AND (log_filters.topic2 = full_sync_logs.topic2 OR log_filters.topic2 ISNULL)
|
||||
AND (log_filters.topic3 = full_sync_logs.topic3 OR log_filters.topic3 ISNULL);
|
||||
|
||||
-- +goose Down
|
||||
DROP VIEW watched_event_logs;
|
||||
|
@ -1,14 +1,14 @@
|
||||
-- +goose Up
|
||||
ALTER TABLE log_filters
|
||||
DROP CONSTRAINT log_filters_from_block_check1;
|
||||
DROP CONSTRAINT log_filters_from_block_check1;
|
||||
|
||||
ALTER TABLE log_filters
|
||||
ADD CONSTRAINT log_filters_to_block_check CHECK (to_block >= 0);
|
||||
ADD CONSTRAINT log_filters_to_block_check CHECK (to_block >= 0);
|
||||
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE log_filters
|
||||
DROP CONSTRAINT log_filters_to_block_check;
|
||||
DROP CONSTRAINT log_filters_to_block_check;
|
||||
|
||||
ALTER TABLE log_filters
|
||||
ADD CONSTRAINT log_filters_from_block_check1 CHECK (to_block >= 0);
|
||||
ADD CONSTRAINT log_filters_from_block_check1 CHECK (to_block >= 0);
|
||||
|
@ -1,43 +1,52 @@
|
||||
-- +goose Up
|
||||
ALTER TABLE public.nodes RENAME TO eth_nodes;
|
||||
ALTER TABLE public.nodes
|
||||
RENAME TO eth_nodes;
|
||||
|
||||
ALTER TABLE public.eth_nodes RENAME COLUMN node_id TO eth_node_id;
|
||||
|
||||
ALTER TABLE public.eth_nodes DROP CONSTRAINT node_uc;
|
||||
ALTER TABLE public.eth_nodes
|
||||
ADD CONSTRAINT eth_node_uc UNIQUE (genesis_block, network_id, eth_node_id);
|
||||
RENAME COLUMN node_id TO eth_node_id;
|
||||
|
||||
ALTER TABLE public.blocks RENAME COLUMN node_id TO eth_node_id;
|
||||
ALTER TABLE public.eth_nodes
|
||||
DROP CONSTRAINT node_uc;
|
||||
ALTER TABLE public.eth_nodes
|
||||
ADD CONSTRAINT eth_node_uc UNIQUE (genesis_block, network_id, eth_node_id);
|
||||
|
||||
ALTER TABLE public.blocks DROP CONSTRAINT node_id_block_number_uc;
|
||||
ALTER TABLE public.blocks
|
||||
ADD CONSTRAINT eth_node_id_block_number_uc UNIQUE (number, eth_node_id);
|
||||
RENAME COLUMN node_id TO eth_node_id;
|
||||
|
||||
ALTER TABLE public.blocks DROP CONSTRAINT node_fk;
|
||||
ALTER TABLE public.blocks
|
||||
ADD CONSTRAINT node_fk
|
||||
FOREIGN KEY (eth_node_id) REFERENCES eth_nodes (id) ON DELETE CASCADE;
|
||||
DROP CONSTRAINT node_id_block_number_uc;
|
||||
ALTER TABLE public.blocks
|
||||
ADD CONSTRAINT eth_node_id_block_number_uc UNIQUE (number, eth_node_id);
|
||||
|
||||
ALTER TABLE public.blocks
|
||||
DROP CONSTRAINT node_fk;
|
||||
ALTER TABLE public.blocks
|
||||
ADD CONSTRAINT node_fk
|
||||
FOREIGN KEY (eth_node_id) REFERENCES eth_nodes (id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE public.eth_nodes
|
||||
RENAME TO nodes;
|
||||
RENAME TO nodes;
|
||||
|
||||
ALTER TABLE public.nodes
|
||||
RENAME COLUMN eth_node_id TO node_id;
|
||||
RENAME COLUMN eth_node_id TO node_id;
|
||||
|
||||
ALTER TABLE public.nodes
|
||||
DROP CONSTRAINT eth_node_uc;
|
||||
DROP CONSTRAINT eth_node_uc;
|
||||
ALTER TABLE public.nodes
|
||||
ADD CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id);
|
||||
ADD CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id);
|
||||
|
||||
ALTER TABLE public.blocks RENAME COLUMN eth_node_id TO node_id;
|
||||
|
||||
ALTER TABLE public.blocks DROP CONSTRAINT eth_node_id_block_number_uc;
|
||||
ALTER TABLE public.blocks
|
||||
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
|
||||
RENAME COLUMN eth_node_id TO node_id;
|
||||
|
||||
ALTER TABLE public.blocks DROP CONSTRAINT node_fk;
|
||||
ALTER TABLE public.blocks
|
||||
ADD CONSTRAINT node_fk
|
||||
FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE;
|
||||
DROP CONSTRAINT eth_node_id_block_number_uc;
|
||||
ALTER TABLE public.blocks
|
||||
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
|
||||
|
||||
ALTER TABLE public.blocks
|
||||
DROP CONSTRAINT node_fk;
|
||||
ALTER TABLE public.blocks
|
||||
ADD CONSTRAINT node_fk
|
||||
FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE;
|
||||
|
@ -1,44 +1,44 @@
|
||||
-- +goose Up
|
||||
ALTER TABLE full_sync_receipts
|
||||
ADD COLUMN block_id INT;
|
||||
ADD COLUMN block_id INT;
|
||||
|
||||
UPDATE full_sync_receipts
|
||||
SET block_id = (
|
||||
SET block_id = (
|
||||
SELECT block_id FROM full_sync_transactions WHERE full_sync_transactions.id = full_sync_receipts.transaction_id
|
||||
);
|
||||
);
|
||||
|
||||
ALTER TABLE full_sync_receipts
|
||||
ALTER COLUMN block_id SET NOT NULL;
|
||||
ALTER COLUMN block_id SET NOT NULL;
|
||||
|
||||
ALTER TABLE full_sync_receipts
|
||||
ADD CONSTRAINT blocks_fk
|
||||
FOREIGN KEY (block_id)
|
||||
REFERENCES blocks (id)
|
||||
ON DELETE CASCADE;
|
||||
ADD CONSTRAINT blocks_fk
|
||||
FOREIGN KEY (block_id)
|
||||
REFERENCES blocks (id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE full_sync_receipts
|
||||
DROP COLUMN transaction_id;
|
||||
DROP COLUMN transaction_id;
|
||||
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE full_sync_receipts
|
||||
ADD COLUMN transaction_id INT;
|
||||
ADD COLUMN transaction_id INT;
|
||||
|
||||
CREATE INDEX transaction_id_index ON full_sync_receipts (transaction_id);
|
||||
|
||||
UPDATE full_sync_receipts
|
||||
SET transaction_id = (
|
||||
SET transaction_id = (
|
||||
SELECT id FROM full_sync_transactions WHERE full_sync_transactions.hash = full_sync_receipts.tx_hash
|
||||
);
|
||||
);
|
||||
|
||||
ALTER TABLE full_sync_receipts
|
||||
ALTER COLUMN transaction_id SET NOT NULL;
|
||||
ALTER COLUMN transaction_id SET NOT NULL;
|
||||
|
||||
ALTER TABLE full_sync_receipts
|
||||
ADD CONSTRAINT transaction_fk
|
||||
FOREIGN KEY (transaction_id)
|
||||
REFERENCES full_sync_transactions (id)
|
||||
ON DELETE CASCADE;
|
||||
ADD CONSTRAINT transaction_fk
|
||||
FOREIGN KEY (transaction_id)
|
||||
REFERENCES full_sync_transactions (id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE full_sync_receipts
|
||||
DROP COLUMN block_id;
|
||||
DROP COLUMN block_id;
|
||||
|
@ -1,16 +1,16 @@
|
||||
-- +goose Up
|
||||
ALTER TABLE blocks
|
||||
ADD COLUMN eth_node_fingerprint VARCHAR(128);
|
||||
ADD COLUMN eth_node_fingerprint VARCHAR(128);
|
||||
|
||||
UPDATE blocks
|
||||
SET eth_node_fingerprint = (
|
||||
SET eth_node_fingerprint = (
|
||||
SELECT eth_node_id FROM eth_nodes WHERE eth_nodes.id = blocks.eth_node_id
|
||||
);
|
||||
);
|
||||
|
||||
ALTER TABLE blocks
|
||||
ALTER COLUMN eth_node_fingerprint SET NOT NULL;
|
||||
ALTER COLUMN eth_node_fingerprint SET NOT NULL;
|
||||
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE blocks
|
||||
DROP COLUMN eth_node_fingerprint;
|
||||
DROP COLUMN eth_node_fingerprint;
|
||||
|
@ -1,12 +1,13 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE public.headers (
|
||||
id SERIAL PRIMARY KEY,
|
||||
hash VARCHAR(66),
|
||||
block_number BIGINT,
|
||||
raw JSONB,
|
||||
block_timestamp NUMERIC,
|
||||
eth_node_id INTEGER NOT NULL REFERENCES eth_nodes (id) ON DELETE CASCADE,
|
||||
eth_node_fingerprint VARCHAR(128)
|
||||
CREATE TABLE public.headers
|
||||
(
|
||||
id SERIAL PRIMARY KEY,
|
||||
hash VARCHAR(66),
|
||||
block_number BIGINT,
|
||||
raw JSONB,
|
||||
block_timestamp NUMERIC,
|
||||
eth_node_id INTEGER NOT NULL REFERENCES eth_nodes (id) ON DELETE CASCADE,
|
||||
eth_node_fingerprint VARCHAR(128)
|
||||
);
|
||||
|
||||
-- Index is removed when table is
|
||||
|
@ -1,7 +1,9 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE public.checked_headers (
|
||||
id SERIAL PRIMARY KEY,
|
||||
header_id INTEGER UNIQUE NOT NULL REFERENCES headers (id) ON DELETE CASCADE
|
||||
CREATE TABLE public.checked_headers
|
||||
(
|
||||
id SERIAL PRIMARY KEY,
|
||||
header_id INTEGER UNIQUE NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
|
||||
check_count INTEGER NOT NULL DEFAULT 1
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
|
@ -1,12 +1,13 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE public.queued_storage (
|
||||
id SERIAL PRIMARY KEY,
|
||||
block_height BIGINT,
|
||||
block_hash BYTEA,
|
||||
contract BYTEA,
|
||||
storage_key BYTEA,
|
||||
storage_value BYTEA,
|
||||
UNIQUE (block_height, block_hash, contract, storage_key, storage_value)
|
||||
CREATE TABLE public.queued_storage
|
||||
(
|
||||
id SERIAL PRIMARY KEY,
|
||||
block_height BIGINT,
|
||||
block_hash BYTEA,
|
||||
contract BYTEA,
|
||||
storage_key BYTEA,
|
||||
storage_value BYTEA,
|
||||
UNIQUE (block_height, block_hash, contract, storage_key, storage_value)
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
|
@ -1,18 +1,19 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE header_sync_transactions (
|
||||
id SERIAL PRIMARY KEY,
|
||||
header_id INTEGER NOT NULL REFERENCES headers(id) ON DELETE CASCADE,
|
||||
hash VARCHAR(66),
|
||||
gas_limit NUMERIC,
|
||||
gas_price NUMERIC,
|
||||
input_data BYTEA,
|
||||
nonce NUMERIC,
|
||||
raw BYTEA,
|
||||
tx_from VARCHAR(44),
|
||||
tx_index INTEGER,
|
||||
tx_to VARCHAR(44),
|
||||
"value" NUMERIC,
|
||||
UNIQUE (header_id, hash)
|
||||
CREATE TABLE header_sync_transactions
|
||||
(
|
||||
id SERIAL PRIMARY KEY,
|
||||
header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
|
||||
hash VARCHAR(66),
|
||||
gas_limit NUMERIC,
|
||||
gas_price NUMERIC,
|
||||
input_data BYTEA,
|
||||
nonce NUMERIC,
|
||||
raw BYTEA,
|
||||
tx_from VARCHAR(44),
|
||||
tx_index INTEGER,
|
||||
tx_to VARCHAR(44),
|
||||
"value" NUMERIC,
|
||||
UNIQUE (header_id, hash)
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
|
@ -1,15 +1,16 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE public.uncles (
|
||||
id SERIAL PRIMARY KEY,
|
||||
hash VARCHAR(66) NOT NULL,
|
||||
block_id INTEGER NOT NULL REFERENCES blocks (id) ON DELETE CASCADE,
|
||||
reward NUMERIC NOT NULL,
|
||||
miner VARCHAR(42) NOT NULL,
|
||||
raw JSONB,
|
||||
block_timestamp NUMERIC,
|
||||
eth_node_id INTEGER NOT NULL REFERENCES eth_nodes (id) ON DELETE CASCADE,
|
||||
eth_node_fingerprint VARCHAR(128),
|
||||
UNIQUE (block_id, hash)
|
||||
CREATE TABLE public.uncles
|
||||
(
|
||||
id SERIAL PRIMARY KEY,
|
||||
hash VARCHAR(66) NOT NULL,
|
||||
block_id INTEGER NOT NULL REFERENCES blocks (id) ON DELETE CASCADE,
|
||||
reward NUMERIC NOT NULL,
|
||||
miner VARCHAR(42) NOT NULL,
|
||||
raw JSONB,
|
||||
block_timestamp NUMERIC,
|
||||
eth_node_id INTEGER NOT NULL REFERENCES eth_nodes (id) ON DELETE CASCADE,
|
||||
eth_node_fingerprint VARCHAR(128),
|
||||
UNIQUE (block_id, hash)
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
|
@ -13,6 +13,7 @@ CREATE TABLE header_sync_logs
|
||||
tx_index INTEGER,
|
||||
log_index INTEGER,
|
||||
raw JSONB,
|
||||
transformed BOOL NOT NULL DEFAULT FALSE,
|
||||
UNIQUE (header_id, tx_index, log_index)
|
||||
);
|
||||
|
||||
|
@ -131,7 +131,8 @@ ALTER SEQUENCE public.blocks_id_seq OWNED BY public.blocks.id;
|
||||
|
||||
CREATE TABLE public.checked_headers (
|
||||
id integer NOT NULL,
|
||||
header_id integer NOT NULL
|
||||
header_id integer NOT NULL,
|
||||
check_count integer DEFAULT 1 NOT NULL
|
||||
);
|
||||
|
||||
|
||||
@ -311,7 +312,8 @@ CREATE TABLE public.header_sync_logs (
|
||||
tx_hash character varying(66),
|
||||
tx_index integer,
|
||||
log_index integer,
|
||||
raw jsonb
|
||||
raw jsonb,
|
||||
transformed boolean DEFAULT false NOT NULL
|
||||
);
|
||||
|
||||
|
||||
|
@ -38,7 +38,7 @@ var _ = Describe("Reading contracts", func() {
|
||||
|
||||
Describe("Getting a contract attribute", func() {
|
||||
It("retrieves the event log for a specific block and contract", func() {
|
||||
expectedLogZero := core.Log{
|
||||
expectedLogZero := core.FullSyncLog{
|
||||
BlockNumber: 4703824,
|
||||
TxHash: "0xf896bfd1eb539d881a1a31102b78de9f25cd591bf1fe1924b86148c0b205fd5d",
|
||||
Address: "0xd26114cd6ee289accf82350c8d8487fedb8a0c07",
|
||||
@ -59,7 +59,7 @@ var _ = Describe("Reading contracts", func() {
|
||||
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
|
||||
contract := testing.SampleContract()
|
||||
|
||||
logs, err := blockChain.GetLogs(contract, big.NewInt(4703824), nil)
|
||||
logs, err := blockChain.GetFullSyncLogs(contract, big.NewInt(4703824), nil)
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
Expect(len(logs)).To(Equal(3))
|
||||
@ -76,7 +76,7 @@ var _ = Describe("Reading contracts", func() {
|
||||
transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
|
||||
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
|
||||
|
||||
logs, err := blockChain.GetLogs(core.Contract{Hash: "0x123"}, big.NewInt(4703824), nil)
|
||||
logs, err := blockChain.GetFullSyncLogs(core.Contract{Hash: "0x123"}, big.NewInt(4703824), nil)
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
Expect(len(logs)).To(Equal(0))
|
||||
|
@ -17,15 +17,14 @@
|
||||
package chunker
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Chunker interface {
|
||||
AddConfigs(transformerConfigs []transformer.EventTransformerConfig)
|
||||
AddConfig(transformerConfig transformer.EventTransformerConfig)
|
||||
ChunkLogs(logs []core.HeaderSyncLog) map[string][]core.HeaderSyncLog
|
||||
}
|
||||
|
||||
@ -43,14 +42,12 @@ func NewLogChunker() *LogChunker {
|
||||
}
|
||||
}
|
||||
|
||||
// Configures the chunker by adding more addreses and topics to consider.
|
||||
func (chunker *LogChunker) AddConfigs(transformerConfigs []transformer.EventTransformerConfig) {
|
||||
for _, config := range transformerConfigs {
|
||||
for _, address := range config.ContractAddresses {
|
||||
var lowerCaseAddress = strings.ToLower(address)
|
||||
chunker.AddressToNames[lowerCaseAddress] = append(chunker.AddressToNames[lowerCaseAddress], config.TransformerName)
|
||||
chunker.NameToTopic0[config.TransformerName] = common.HexToHash(config.Topic)
|
||||
}
|
||||
// Configures the chunker by adding one config with more addresses and topics to consider.
|
||||
func (chunker *LogChunker) AddConfig(transformerConfig transformer.EventTransformerConfig) {
|
||||
for _, address := range transformerConfig.ContractAddresses {
|
||||
var lowerCaseAddress = strings.ToLower(address)
|
||||
chunker.AddressToNames[lowerCaseAddress] = append(chunker.AddressToNames[lowerCaseAddress], transformerConfig.TransformerName)
|
||||
chunker.NameToTopic0[transformerConfig.TransformerName] = common.HexToHash(transformerConfig.Topic)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
chunk "github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
@ -29,31 +28,32 @@ import (
|
||||
|
||||
var _ = Describe("Log chunker", func() {
|
||||
var (
|
||||
configs []transformer.EventTransformerConfig
|
||||
chunker *chunk.LogChunker
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
chunker = chunk.NewLogChunker()
|
||||
|
||||
configA := transformer.EventTransformerConfig{
|
||||
TransformerName: "TransformerA",
|
||||
ContractAddresses: []string{"0x00000000000000000000000000000000000000A1", "0x00000000000000000000000000000000000000A2"},
|
||||
Topic: "0xA",
|
||||
}
|
||||
chunker.AddConfig(configA)
|
||||
|
||||
configB := transformer.EventTransformerConfig{
|
||||
TransformerName: "TransformerB",
|
||||
ContractAddresses: []string{"0x00000000000000000000000000000000000000B1"},
|
||||
Topic: "0xB",
|
||||
}
|
||||
chunker.AddConfig(configB)
|
||||
|
||||
configC := transformer.EventTransformerConfig{
|
||||
TransformerName: "TransformerC",
|
||||
ContractAddresses: []string{"0x00000000000000000000000000000000000000A2"},
|
||||
Topic: "0xC",
|
||||
}
|
||||
|
||||
configs = []transformer.EventTransformerConfig{configA, configB, configC}
|
||||
chunker = chunk.NewLogChunker()
|
||||
chunker.AddConfigs(configs)
|
||||
chunker.AddConfig(configC)
|
||||
})
|
||||
|
||||
Describe("initialisation", func() {
|
||||
@ -72,14 +72,14 @@ var _ = Describe("Log chunker", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Describe("AddConfigs", func() {
|
||||
Describe("AddConfig", func() {
|
||||
It("can add more configs later", func() {
|
||||
configD := transformer.EventTransformerConfig{
|
||||
TransformerName: "TransformerD",
|
||||
ContractAddresses: []string{"0x000000000000000000000000000000000000000D"},
|
||||
Topic: "0xD",
|
||||
}
|
||||
chunker.AddConfigs([]transformer.EventTransformerConfig{configD})
|
||||
chunker.AddConfig(configD)
|
||||
|
||||
Expect(chunker.AddressToNames).To(ContainElement([]string{"TransformerD"}))
|
||||
Expect(chunker.NameToTopic0).To(ContainElement(common.HexToHash("0xD")))
|
||||
@ -91,7 +91,7 @@ var _ = Describe("Log chunker", func() {
|
||||
ContractAddresses: []string{"0x000000000000000000000000000000000000000D"},
|
||||
Topic: "0xD",
|
||||
}
|
||||
chunker.AddConfigs([]transformer.EventTransformerConfig{configD})
|
||||
chunker.AddConfig(configD)
|
||||
|
||||
Expect(chunker.AddressToNames["0x000000000000000000000000000000000000000d"]).To(Equal([]string{"TransformerD"}))
|
||||
})
|
||||
|
@ -21,5 +21,5 @@ type TransformerExecution bool
|
||||
const (
|
||||
HeaderRecheck TransformerExecution = true
|
||||
HeaderMissing TransformerExecution = false
|
||||
RecheckHeaderCap = "4"
|
||||
RecheckHeaderCap = int64(5)
|
||||
)
|
||||
|
@ -16,12 +16,9 @@
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
)
|
||||
import "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
|
||||
type Repository interface {
|
||||
Create(headerID int64, models []interface{}) error
|
||||
MarkHeaderChecked(headerID int64) error
|
||||
Create(models []interface{}) error
|
||||
SetDB(db *postgres.DB)
|
||||
}
|
||||
|
@ -18,7 +18,6 @@ package event
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
@ -35,34 +34,29 @@ func (transformer Transformer) NewTransformer(db *postgres.DB) transformer.Event
|
||||
return transformer
|
||||
}
|
||||
|
||||
func (transformer Transformer) Execute(logs []core.HeaderSyncLog, headerID int64) error {
|
||||
func (transformer Transformer) Execute(logs []core.HeaderSyncLog) error {
|
||||
transformerName := transformer.Config.TransformerName
|
||||
config := transformer.Config
|
||||
|
||||
if len(logs) < 1 {
|
||||
err := transformer.Repository.MarkHeaderChecked(headerID)
|
||||
if err != nil {
|
||||
logrus.Printf("Error marking header as checked in %v: %v", transformerName, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
entities, err := transformer.Converter.ToEntities(config.ContractAbi, logs)
|
||||
if err != nil {
|
||||
logrus.Printf("Error converting logs to entities in %v: %v", transformerName, err)
|
||||
logrus.Errorf("error converting logs to entities in %v: %v", transformerName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
models, err := transformer.Converter.ToModels(entities)
|
||||
if err != nil {
|
||||
logrus.Printf("Error converting entities to models in %v: %v", transformerName, err)
|
||||
logrus.Errorf("error converting entities to models in %v: %v", transformerName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = transformer.Repository.Create(headerID, models)
|
||||
err = transformer.Repository.Create(models)
|
||||
if err != nil {
|
||||
logrus.Printf("Error persisting %v record: %v", transformerName, err)
|
||||
logrus.Errorf("error persisting %v record: %v", transformerName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -17,22 +17,20 @@
|
||||
package event_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/factories/event"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
var _ = Describe("Transformer", func() {
|
||||
var (
|
||||
repository mocks.MockRepository
|
||||
repository mocks.MockEventRepository
|
||||
converter mocks.MockConverter
|
||||
t transformer.EventTransformer
|
||||
headerOne core.Header
|
||||
@ -41,7 +39,7 @@ var _ = Describe("Transformer", func() {
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
repository = mocks.MockRepository{}
|
||||
repository = mocks.MockEventRepository{}
|
||||
converter = mocks.MockConverter{}
|
||||
|
||||
t = event.Transformer{
|
||||
@ -64,15 +62,8 @@ var _ = Describe("Transformer", func() {
|
||||
Expect(repository.SetDbCalled).To(BeTrue())
|
||||
})
|
||||
|
||||
It("marks header checked if no logs returned", func() {
|
||||
err := t.Execute([]core.HeaderSyncLog{}, headerOne.Id)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
repository.AssertMarkHeaderCheckedCalledWith(headerOne.Id)
|
||||
})
|
||||
|
||||
It("doesn't attempt to convert or persist an empty collection when there are no logs", func() {
|
||||
err := t.Execute([]core.HeaderSyncLog{}, headerOne.Id)
|
||||
err := t.Execute([]core.HeaderSyncLog{})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(converter.ToEntitiesCalledCounter).To(Equal(0))
|
||||
@ -80,24 +71,8 @@ var _ = Describe("Transformer", func() {
|
||||
Expect(repository.CreateCalledCounter).To(Equal(0))
|
||||
})
|
||||
|
||||
It("does not call repository.MarkCheckedHeader when there are logs", func() {
|
||||
err := t.Execute(logs, headerOne.Id)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
repository.AssertMarkHeaderCheckedNotCalled()
|
||||
})
|
||||
|
||||
It("returns error if marking header checked returns err", func() {
|
||||
repository.SetMarkHeaderCheckedError(fakes.FakeError)
|
||||
|
||||
err := t.Execute([]core.HeaderSyncLog{}, headerOne.Id)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
|
||||
It("converts an eth log to an entity", func() {
|
||||
err := t.Execute(logs, headerOne.Id)
|
||||
err := t.Execute(logs)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(converter.ContractAbi).To(Equal(config.ContractAbi))
|
||||
@ -107,7 +82,7 @@ var _ = Describe("Transformer", func() {
|
||||
It("returns an error if converter fails", func() {
|
||||
converter.ToEntitiesError = fakes.FakeError
|
||||
|
||||
err := t.Execute(logs, headerOne.Id)
|
||||
err := t.Execute(logs)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
@ -116,7 +91,7 @@ var _ = Describe("Transformer", func() {
|
||||
It("converts an entity to a model", func() {
|
||||
converter.EntitiesToReturn = []interface{}{test_data.GenericEntity{}}
|
||||
|
||||
err := t.Execute(logs, headerOne.Id)
|
||||
err := t.Execute(logs)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(converter.EntitiesToConvert[0]).To(Equal(test_data.GenericEntity{}))
|
||||
@ -126,7 +101,7 @@ var _ = Describe("Transformer", func() {
|
||||
converter.EntitiesToReturn = []interface{}{test_data.GenericEntity{}}
|
||||
converter.ToModelsError = fakes.FakeError
|
||||
|
||||
err := t.Execute(logs, headerOne.Id)
|
||||
err := t.Execute(logs)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
@ -135,16 +110,15 @@ var _ = Describe("Transformer", func() {
|
||||
It("persists the record", func() {
|
||||
converter.ModelsToReturn = []interface{}{test_data.GenericModel{}}
|
||||
|
||||
err := t.Execute(logs, headerOne.Id)
|
||||
err := t.Execute(logs)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(repository.PassedHeaderID).To(Equal(headerOne.Id))
|
||||
Expect(repository.PassedModels[0]).To(Equal(test_data.GenericModel{}))
|
||||
})
|
||||
|
||||
It("returns error if persisting the record fails", func() {
|
||||
repository.SetCreateError(fakes.FakeError)
|
||||
err := t.Execute(logs, headerOne.Id)
|
||||
err := t.Execute(logs)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
|
@ -14,14 +14,14 @@
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package repository
|
||||
package storage
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
)
|
||||
|
||||
type StorageRepository interface {
|
||||
type Repository interface {
|
||||
Create(blockNumber int, blockHash string, metadata utils.StorageValueMetadata, value interface{}) error
|
||||
SetDB(db *postgres.DB)
|
||||
}
|
@ -19,7 +19,6 @@ package storage
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/repository"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
@ -29,7 +28,7 @@ import (
|
||||
type Transformer struct {
|
||||
Address common.Address
|
||||
Mappings storage.Mappings
|
||||
Repository repository.StorageRepository
|
||||
Repository Repository
|
||||
}
|
||||
|
||||
func (transformer Transformer) NewTransformer(db *postgres.DB) transformer.StorageTransformer {
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
type ILogFetcher interface {
|
||||
FetchLogs(contractAddresses []common.Address, topics []common.Hash, missingHeader core.Header) ([]types.Log, error)
|
||||
// TODO Extend FetchLogs for doing several blocks at a time
|
||||
}
|
||||
|
||||
type LogFetcher struct {
|
||||
|
78
libraries/shared/logs/delegator.go
Normal file
78
libraries/shared/logs/delegator.go
Normal file
@ -0,0 +1,78 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package logs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||
)
|
||||
|
||||
var ErrNoTransformers = errors.New("no event transformers configured in the log delegator")
|
||||
|
||||
type ILogDelegator interface {
|
||||
AddTransformer(t transformer.EventTransformer)
|
||||
DelegateLogs() error
|
||||
}
|
||||
|
||||
type LogDelegator struct {
|
||||
Chunker chunker.Chunker
|
||||
LogRepository datastore.HeaderSyncLogRepository
|
||||
Transformers []transformer.EventTransformer
|
||||
}
|
||||
|
||||
func (delegator *LogDelegator) AddTransformer(t transformer.EventTransformer) {
|
||||
delegator.Transformers = append(delegator.Transformers, t)
|
||||
delegator.Chunker.AddConfig(t.GetConfig())
|
||||
}
|
||||
|
||||
func (delegator LogDelegator) DelegateLogs() error {
|
||||
if len(delegator.Transformers) < 1 {
|
||||
return ErrNoTransformers
|
||||
}
|
||||
|
||||
persistedLogs, fetchErr := delegator.LogRepository.GetUntransformedHeaderSyncLogs()
|
||||
if fetchErr != nil {
|
||||
logrus.Errorf("error loading logs from db: %s", fetchErr.Error())
|
||||
return fetchErr
|
||||
}
|
||||
|
||||
transformErr := delegator.delegateLogs(persistedLogs)
|
||||
if transformErr != nil {
|
||||
logrus.Errorf("error transforming logs: %s", transformErr)
|
||||
return transformErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (delegator *LogDelegator) delegateLogs(logs []core.HeaderSyncLog) error {
|
||||
chunkedLogs := delegator.Chunker.ChunkLogs(logs)
|
||||
for _, t := range delegator.Transformers {
|
||||
transformerName := t.GetConfig().TransformerName
|
||||
logChunk := chunkedLogs[transformerName]
|
||||
err := t.Execute(logChunk)
|
||||
if err != nil {
|
||||
logrus.Errorf("%v transformer failed to execute in watcher: %v", transformerName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
134
libraries/shared/logs/delegator_test.go
Normal file
134
libraries/shared/logs/delegator_test.go
Normal file
@ -0,0 +1,134 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package logs_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/logs"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ = Describe("Log delegator", func() {
|
||||
Describe("AddTransformer", func() {
|
||||
It("adds transformers to the delegator", func() {
|
||||
fakeTransformer := &mocks.MockEventTransformer{}
|
||||
delegator := logs.LogDelegator{Chunker: chunker.NewLogChunker()}
|
||||
|
||||
delegator.AddTransformer(fakeTransformer)
|
||||
|
||||
Expect(delegator.Transformers).To(Equal([]transformer.EventTransformer{fakeTransformer}))
|
||||
})
|
||||
|
||||
It("passes transformers' configs to the chunker", func() {
|
||||
fakeTransformer := &mocks.MockEventTransformer{}
|
||||
fakeConfig := mocks.FakeTransformerConfig
|
||||
fakeTransformer.SetTransformerConfig(fakeConfig)
|
||||
chunker := chunker.NewLogChunker()
|
||||
delegator := logs.LogDelegator{Chunker: chunker}
|
||||
|
||||
delegator.AddTransformer(fakeTransformer)
|
||||
|
||||
expectedName := fakeConfig.TransformerName
|
||||
expectedTopic := common.HexToHash(fakeConfig.Topic)
|
||||
Expect(chunker.NameToTopic0).To(Equal(map[string]common.Hash{expectedName: expectedTopic}))
|
||||
expectedAddress := strings.ToLower(fakeConfig.ContractAddresses[0])
|
||||
Expect(chunker.AddressToNames).To(Equal(map[string][]string{expectedAddress: {expectedName}}))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("DelegateLogs", func() {
|
||||
It("returns an error if no transformers configured", func() {
|
||||
delegator := newDelegator(&fakes.MockHeaderSyncLogRepository{})
|
||||
|
||||
err := delegator.DelegateLogs()
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(logs.ErrNoTransformers))
|
||||
})
|
||||
|
||||
It("gets untransformed logs", func() {
|
||||
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||
delegator := newDelegator(mockLogRepository)
|
||||
delegator.AddTransformer(&mocks.MockEventTransformer{})
|
||||
|
||||
err := delegator.DelegateLogs()
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(mockLogRepository.GetCalled).To(BeTrue())
|
||||
})
|
||||
|
||||
It("returns error if getting untransformed logs fails", func() {
|
||||
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||
mockLogRepository.GetError = fakes.FakeError
|
||||
delegator := newDelegator(mockLogRepository)
|
||||
delegator.AddTransformer(&mocks.MockEventTransformer{})
|
||||
|
||||
err := delegator.DelegateLogs()
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
|
||||
It("delegates chunked logs to transformers", func() {
|
||||
fakeTransformer := &mocks.MockEventTransformer{}
|
||||
fakeTransformer.SetTransformerConfig(mocks.FakeTransformerConfig)
|
||||
fakeGethLog := types.Log{
|
||||
Address: common.HexToAddress(fakeTransformer.GetConfig().ContractAddresses[0]),
|
||||
Topics: []common.Hash{common.HexToHash(fakeTransformer.GetConfig().Topic)},
|
||||
}
|
||||
fakeHeaderSyncLog := core.HeaderSyncLog{Log: fakeGethLog}
|
||||
fakeHeaderSyncLogs := []core.HeaderSyncLog{fakeHeaderSyncLog}
|
||||
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||
mockLogRepository.ReturnLogs = fakeHeaderSyncLogs
|
||||
delegator := newDelegator(mockLogRepository)
|
||||
delegator.AddTransformer(fakeTransformer)
|
||||
|
||||
err := delegator.DelegateLogs()
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(fakeTransformer.ExecuteWasCalled).To(BeTrue())
|
||||
Expect(fakeTransformer.PassedLogs).To(Equal(fakeHeaderSyncLogs))
|
||||
})
|
||||
|
||||
It("returns an error if transformer returns an error", func() {
|
||||
delegator := newDelegator(&fakes.MockHeaderSyncLogRepository{})
|
||||
fakeTransformer := &mocks.MockEventTransformer{ExecuteError: fakes.FakeError}
|
||||
delegator.AddTransformer(fakeTransformer)
|
||||
|
||||
err := delegator.DelegateLogs()
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func newDelegator(headerSyncLogRepository *fakes.MockHeaderSyncLogRepository) logs.LogDelegator {
|
||||
return logs.LogDelegator{
|
||||
Chunker: chunker.NewLogChunker(),
|
||||
LogRepository: headerSyncLogRepository,
|
||||
}
|
||||
}
|
122
libraries/shared/logs/extractor.go
Normal file
122
libraries/shared/logs/extractor.go
Normal file
@ -0,0 +1,122 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package logs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transactions"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||
)
|
||||
|
||||
var ErrNoWatchedAddresses = errors.New("no watched addresses configured in the log extractor")
|
||||
|
||||
type ILogExtractor interface {
|
||||
AddTransformerConfig(config transformer.EventTransformerConfig)
|
||||
ExtractLogs(recheckHeaders constants.TransformerExecution) error
|
||||
}
|
||||
|
||||
type LogExtractor struct {
|
||||
Addresses []common.Address
|
||||
CheckedHeadersRepository datastore.CheckedHeadersRepository
|
||||
Fetcher fetcher.ILogFetcher
|
||||
LogRepository datastore.HeaderSyncLogRepository
|
||||
StartingBlock *int64
|
||||
Syncer transactions.ITransactionsSyncer
|
||||
Topics []common.Hash
|
||||
}
|
||||
|
||||
// Add additional logs to extract
|
||||
func (extractor *LogExtractor) AddTransformerConfig(config transformer.EventTransformerConfig) {
|
||||
if extractor.StartingBlock == nil {
|
||||
extractor.StartingBlock = &config.StartingBlockNumber
|
||||
} else if earlierStartingBlockNumber(config.StartingBlockNumber, *extractor.StartingBlock) {
|
||||
extractor.StartingBlock = &config.StartingBlockNumber
|
||||
}
|
||||
|
||||
addresses := transformer.HexStringsToAddresses(config.ContractAddresses)
|
||||
extractor.Addresses = append(extractor.Addresses, addresses...)
|
||||
extractor.Topics = append(extractor.Topics, common.HexToHash(config.Topic))
|
||||
}
|
||||
|
||||
// Fetch and persist watched logs
|
||||
func (extractor LogExtractor) ExtractLogs(recheckHeaders constants.TransformerExecution) error {
|
||||
if len(extractor.Addresses) < 1 {
|
||||
logrus.Errorf("error extracting logs: %s", ErrNoWatchedAddresses.Error())
|
||||
return ErrNoWatchedAddresses
|
||||
}
|
||||
|
||||
missingHeaders, missingHeadersErr := extractor.CheckedHeadersRepository.MissingHeaders(*extractor.StartingBlock, -1, getCheckCount(recheckHeaders))
|
||||
if missingHeadersErr != nil {
|
||||
logrus.Errorf("error fetching missing headers: %s", missingHeadersErr)
|
||||
return missingHeadersErr
|
||||
}
|
||||
|
||||
for _, header := range missingHeaders {
|
||||
logs, fetchLogsErr := extractor.Fetcher.FetchLogs(extractor.Addresses, extractor.Topics, header)
|
||||
if fetchLogsErr != nil {
|
||||
logError("error fetching logs for header: %s", fetchLogsErr, header)
|
||||
return fetchLogsErr
|
||||
}
|
||||
|
||||
if len(logs) > 0 {
|
||||
transactionsSyncErr := extractor.Syncer.SyncTransactions(header.Id, logs)
|
||||
if transactionsSyncErr != nil {
|
||||
logError("error syncing transactions: %s", transactionsSyncErr, header)
|
||||
return transactionsSyncErr
|
||||
}
|
||||
|
||||
createLogsErr := extractor.LogRepository.CreateHeaderSyncLogs(header.Id, logs)
|
||||
if createLogsErr != nil {
|
||||
logError("error persisting logs: %s", createLogsErr, header)
|
||||
return createLogsErr
|
||||
}
|
||||
}
|
||||
|
||||
markHeaderCheckedErr := extractor.CheckedHeadersRepository.MarkHeaderChecked(header.Id)
|
||||
if markHeaderCheckedErr != nil {
|
||||
logError("error marking header checked: %s", markHeaderCheckedErr, header)
|
||||
return markHeaderCheckedErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func earlierStartingBlockNumber(transformerBlock, watcherBlock int64) bool {
|
||||
return transformerBlock < watcherBlock
|
||||
}
|
||||
|
||||
func logError(description string, err error, header core.Header) {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"headerId": header.Id,
|
||||
"headerHash": header.Hash,
|
||||
"blockNumber": header.BlockNumber,
|
||||
}).Errorf(description, err.Error())
|
||||
}
|
||||
|
||||
func getCheckCount(recheckHeaders constants.TransformerExecution) int64 {
|
||||
if recheckHeaders == constants.HeaderMissing {
|
||||
return 1
|
||||
} else {
|
||||
return constants.RecheckHeaderCap
|
||||
}
|
||||
}
|
310
libraries/shared/logs/extractor_test.go
Normal file
310
libraries/shared/logs/extractor_test.go
Normal file
@ -0,0 +1,310 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package logs_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/logs"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
var _ = Describe("Log extractor", func() {
|
||||
var extractor *logs.LogExtractor
|
||||
|
||||
BeforeEach(func() {
|
||||
extractor = &logs.LogExtractor{
|
||||
Fetcher: &mocks.MockLogFetcher{},
|
||||
CheckedHeadersRepository: &fakes.MockCheckedHeadersRepository{},
|
||||
LogRepository: &fakes.MockHeaderSyncLogRepository{},
|
||||
Syncer: &fakes.MockTransactionSyncer{},
|
||||
}
|
||||
})
|
||||
|
||||
Describe("AddTransformerConfig", func() {
|
||||
It("it includes earliest starting block number in fetch logs query", func() {
|
||||
earlierStartingBlockNumber := rand.Int63()
|
||||
laterStartingBlockNumber := earlierStartingBlockNumber + 1
|
||||
|
||||
extractor.AddTransformerConfig(getTransformerConfig(laterStartingBlockNumber))
|
||||
extractor.AddTransformerConfig(getTransformerConfig(earlierStartingBlockNumber))
|
||||
|
||||
Expect(*extractor.StartingBlock).To(Equal(earlierStartingBlockNumber))
|
||||
})
|
||||
|
||||
It("includes added addresses in fetch logs query", func() {
|
||||
addresses := []string{"0xA", "0xB"}
|
||||
configWithAddresses := transformer.EventTransformerConfig{
|
||||
ContractAddresses: addresses,
|
||||
StartingBlockNumber: rand.Int63(),
|
||||
}
|
||||
|
||||
extractor.AddTransformerConfig(configWithAddresses)
|
||||
|
||||
expectedAddresses := transformer.HexStringsToAddresses(addresses)
|
||||
Expect(extractor.Addresses).To(Equal(expectedAddresses))
|
||||
})
|
||||
|
||||
It("includes added topics in fetch logs query", func() {
|
||||
topic := "0x1"
|
||||
configWithTopic := transformer.EventTransformerConfig{
|
||||
ContractAddresses: []string{fakes.FakeAddress.Hex()},
|
||||
Topic: topic,
|
||||
StartingBlockNumber: rand.Int63(),
|
||||
}
|
||||
|
||||
extractor.AddTransformerConfig(configWithTopic)
|
||||
|
||||
Expect(extractor.Topics).To(Equal([]common.Hash{common.HexToHash(topic)}))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("ExtractLogs", func() {
|
||||
It("returns error if no watched addresses configured", func() {
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(logs.ErrNoWatchedAddresses))
|
||||
})
|
||||
|
||||
Describe("when checking missing headers", func() {
|
||||
It("gets missing headers since configured starting block with check count < 1", func() {
|
||||
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||
mockCheckedHeadersRepository.ReturnHeaders = []core.Header{{}}
|
||||
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||
startingBlockNumber := rand.Int63()
|
||||
extractor.AddTransformerConfig(getTransformerConfig(startingBlockNumber))
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(mockCheckedHeadersRepository.StartingBlockNumber).To(Equal(startingBlockNumber))
|
||||
Expect(mockCheckedHeadersRepository.EndingBlockNumber).To(Equal(int64(-1)))
|
||||
Expect(mockCheckedHeadersRepository.CheckCount).To(Equal(int64(1)))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("when rechecking headers", func() {
|
||||
It("gets missing headers since configured starting block with check count < 1", func() {
|
||||
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||
mockCheckedHeadersRepository.ReturnHeaders = []core.Header{{}}
|
||||
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||
startingBlockNumber := rand.Int63()
|
||||
extractor.AddTransformerConfig(getTransformerConfig(startingBlockNumber))
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderRecheck)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(mockCheckedHeadersRepository.StartingBlockNumber).To(Equal(startingBlockNumber))
|
||||
Expect(mockCheckedHeadersRepository.EndingBlockNumber).To(Equal(int64(-1)))
|
||||
Expect(mockCheckedHeadersRepository.CheckCount).To(Equal(constants.RecheckHeaderCap))
|
||||
})
|
||||
})
|
||||
|
||||
It("returns error if getting missing headers fails", func() {
|
||||
addTransformerConfig(extractor)
|
||||
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||
mockCheckedHeadersRepository.MissingHeadersReturnError = fakes.FakeError
|
||||
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
|
||||
It("does not fetch logs if no missing headers", func() {
|
||||
addTransformerConfig(extractor)
|
||||
mockLogFetcher := &mocks.MockLogFetcher{}
|
||||
extractor.Fetcher = mockLogFetcher
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(mockLogFetcher.FetchCalled).To(BeFalse())
|
||||
})
|
||||
|
||||
It("fetches logs for missing headers", func() {
|
||||
addMissingHeader(extractor)
|
||||
config := transformer.EventTransformerConfig{
|
||||
ContractAddresses: []string{fakes.FakeAddress.Hex()},
|
||||
Topic: fakes.FakeHash.Hex(),
|
||||
StartingBlockNumber: rand.Int63(),
|
||||
}
|
||||
extractor.AddTransformerConfig(config)
|
||||
mockLogFetcher := &mocks.MockLogFetcher{}
|
||||
extractor.Fetcher = mockLogFetcher
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(mockLogFetcher.FetchCalled).To(BeTrue())
|
||||
Expect(mockLogFetcher.Topics).To(Equal([]common.Hash{common.HexToHash(config.Topic)}))
|
||||
Expect(mockLogFetcher.ContractAddresses).To(Equal(transformer.HexStringsToAddresses(config.ContractAddresses)))
|
||||
})
|
||||
|
||||
It("returns error if fetching logs fails", func() {
|
||||
addMissingHeader(extractor)
|
||||
addTransformerConfig(extractor)
|
||||
mockLogFetcher := &mocks.MockLogFetcher{}
|
||||
mockLogFetcher.ReturnError = fakes.FakeError
|
||||
extractor.Fetcher = mockLogFetcher
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
|
||||
It("does not sync transactions if no fetched logs", func() {
|
||||
addMissingHeader(extractor)
|
||||
addTransformerConfig(extractor)
|
||||
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
|
||||
extractor.Syncer = mockTransactionSyncer
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(mockTransactionSyncer.SyncTransactionsCalled).To(BeFalse())
|
||||
})
|
||||
|
||||
It("syncs transactions for fetched logs", func() {
|
||||
addMissingHeader(extractor)
|
||||
addFetchedLog(extractor)
|
||||
addTransformerConfig(extractor)
|
||||
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
|
||||
extractor.Syncer = mockTransactionSyncer
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(mockTransactionSyncer.SyncTransactionsCalled).To(BeTrue())
|
||||
})
|
||||
|
||||
It("returns error if syncing transactions fails", func() {
|
||||
addMissingHeader(extractor)
|
||||
addFetchedLog(extractor)
|
||||
addTransformerConfig(extractor)
|
||||
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
|
||||
mockTransactionSyncer.SyncTransactionsError = fakes.FakeError
|
||||
extractor.Syncer = mockTransactionSyncer
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
|
||||
It("persists fetched logs", func() {
|
||||
addMissingHeader(extractor)
|
||||
addTransformerConfig(extractor)
|
||||
fakeLogs := []types.Log{{
|
||||
Address: common.HexToAddress("0xA"),
|
||||
Topics: []common.Hash{common.HexToHash("0xA")},
|
||||
Data: []byte{},
|
||||
Index: 0,
|
||||
}}
|
||||
mockLogFetcher := &mocks.MockLogFetcher{ReturnLogs: fakeLogs}
|
||||
extractor.Fetcher = mockLogFetcher
|
||||
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||
extractor.LogRepository = mockLogRepository
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(mockLogRepository.PassedLogs).To(Equal(fakeLogs))
|
||||
})
|
||||
|
||||
It("returns error if persisting logs fails", func() {
|
||||
addMissingHeader(extractor)
|
||||
addFetchedLog(extractor)
|
||||
addTransformerConfig(extractor)
|
||||
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||
mockLogRepository.CreateError = fakes.FakeError
|
||||
extractor.LogRepository = mockLogRepository
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
|
||||
It("marks header checked", func() {
|
||||
addFetchedLog(extractor)
|
||||
addTransformerConfig(extractor)
|
||||
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||
headerID := rand.Int63()
|
||||
mockCheckedHeadersRepository.ReturnHeaders = []core.Header{{Id: headerID}}
|
||||
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(mockCheckedHeadersRepository.HeaderID).To(Equal(headerID))
|
||||
})
|
||||
|
||||
It("returns error if marking header checked fails", func() {
|
||||
addFetchedLog(extractor)
|
||||
addTransformerConfig(extractor)
|
||||
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||
mockCheckedHeadersRepository.ReturnHeaders = []core.Header{{Id: rand.Int63()}}
|
||||
mockCheckedHeadersRepository.MarkHeaderCheckedReturnError = fakes.FakeError
|
||||
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||
|
||||
err := extractor.ExtractLogs(constants.HeaderMissing)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func addTransformerConfig(extractor *logs.LogExtractor) {
|
||||
fakeConfig := transformer.EventTransformerConfig{
|
||||
ContractAddresses: []string{fakes.FakeAddress.Hex()},
|
||||
Topic: fakes.FakeHash.Hex(),
|
||||
StartingBlockNumber: rand.Int63(),
|
||||
}
|
||||
extractor.AddTransformerConfig(fakeConfig)
|
||||
}
|
||||
|
||||
func addMissingHeader(extractor *logs.LogExtractor) {
|
||||
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||
mockCheckedHeadersRepository.ReturnHeaders = []core.Header{{}}
|
||||
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||
}
|
||||
|
||||
func addFetchedLog(extractor *logs.LogExtractor) {
|
||||
mockLogFetcher := &mocks.MockLogFetcher{}
|
||||
mockLogFetcher.ReturnLogs = []types.Log{{}}
|
||||
extractor.Fetcher = mockLogFetcher
|
||||
}
|
||||
|
||||
func getTransformerConfig(startingBlockNumber int64) transformer.EventTransformerConfig {
|
||||
return transformer.EventTransformerConfig{
|
||||
ContractAddresses: []string{fakes.FakeAddress.Hex()},
|
||||
Topic: fakes.FakeHash.Hex(),
|
||||
StartingBlockNumber: startingBlockNumber,
|
||||
}
|
||||
}
|
35
libraries/shared/logs/logs_suite_test.go
Normal file
35
libraries/shared/logs/logs_suite_test.go
Normal file
@ -0,0 +1,35 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package logs_test
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestLogs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Logs Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
})
|
@ -23,76 +23,68 @@ import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
)
|
||||
|
||||
type MockRepository struct {
|
||||
type MockEventRepository struct {
|
||||
createError error
|
||||
markHeaderCheckedError error
|
||||
MarkHeaderCheckedPassedHeaderIDs []int64
|
||||
CreatedHeaderIds []int64
|
||||
missingHeaders []core.Header
|
||||
allHeaders []core.Header
|
||||
missingHeadersError error
|
||||
PassedStartingBlockNumber int64
|
||||
PassedEndingBlockNumber int64
|
||||
PassedHeaderID int64
|
||||
PassedModels []interface{}
|
||||
SetDbCalled bool
|
||||
CreateCalledCounter int
|
||||
}
|
||||
|
||||
func (repository *MockRepository) Create(headerID int64, models []interface{}) error {
|
||||
repository.PassedHeaderID = headerID
|
||||
func (repository *MockEventRepository) Create(models []interface{}) error {
|
||||
repository.PassedModels = models
|
||||
repository.CreatedHeaderIds = append(repository.CreatedHeaderIds, headerID)
|
||||
repository.CreateCalledCounter++
|
||||
|
||||
return repository.createError
|
||||
}
|
||||
|
||||
func (repository *MockRepository) MarkHeaderChecked(headerID int64) error {
|
||||
func (repository *MockEventRepository) MarkHeaderChecked(headerID int64) error {
|
||||
repository.MarkHeaderCheckedPassedHeaderIDs = append(repository.MarkHeaderCheckedPassedHeaderIDs, headerID)
|
||||
return repository.markHeaderCheckedError
|
||||
}
|
||||
|
||||
func (repository *MockRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
|
||||
func (repository *MockEventRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
|
||||
repository.PassedStartingBlockNumber = startingBlockNumber
|
||||
repository.PassedEndingBlockNumber = endingBlockNumber
|
||||
return repository.missingHeaders, repository.missingHeadersError
|
||||
}
|
||||
|
||||
func (repository *MockRepository) RecheckHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
|
||||
func (repository *MockEventRepository) RecheckHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
|
||||
repository.PassedStartingBlockNumber = startingBlockNumber
|
||||
repository.PassedEndingBlockNumber = endingBlockNumber
|
||||
return repository.allHeaders, nil
|
||||
}
|
||||
|
||||
func (repository *MockRepository) SetDB(db *postgres.DB) {
|
||||
func (repository *MockEventRepository) SetDB(db *postgres.DB) {
|
||||
repository.SetDbCalled = true
|
||||
}
|
||||
|
||||
func (repository *MockRepository) SetMissingHeadersError(e error) {
|
||||
func (repository *MockEventRepository) SetMissingHeadersError(e error) {
|
||||
repository.missingHeadersError = e
|
||||
}
|
||||
|
||||
func (repository *MockRepository) SetAllHeaders(headers []core.Header) {
|
||||
func (repository *MockEventRepository) SetAllHeaders(headers []core.Header) {
|
||||
repository.allHeaders = headers
|
||||
}
|
||||
|
||||
func (repository *MockRepository) SetMissingHeaders(headers []core.Header) {
|
||||
func (repository *MockEventRepository) SetMissingHeaders(headers []core.Header) {
|
||||
repository.missingHeaders = headers
|
||||
}
|
||||
|
||||
func (repository *MockRepository) SetMarkHeaderCheckedError(e error) {
|
||||
func (repository *MockEventRepository) SetMarkHeaderCheckedError(e error) {
|
||||
repository.markHeaderCheckedError = e
|
||||
}
|
||||
|
||||
func (repository *MockRepository) SetCreateError(e error) {
|
||||
func (repository *MockEventRepository) SetCreateError(e error) {
|
||||
repository.createError = e
|
||||
}
|
||||
|
||||
func (repository *MockRepository) AssertMarkHeaderCheckedCalledWith(i int64) {
|
||||
func (repository *MockEventRepository) AssertMarkHeaderCheckedCalledWith(i int64) {
|
||||
Expect(repository.MarkHeaderCheckedPassedHeaderIDs).To(ContainElement(i))
|
||||
}
|
||||
|
||||
func (repository *MockRepository) AssertMarkHeaderCheckedNotCalled() {
|
||||
Expect(len(repository.MarkHeaderCheckedPassedHeaderIDs)).To(Equal(0))
|
||||
}
|
@ -20,40 +20,39 @@ import (
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
)
|
||||
|
||||
type MockTransformer struct {
|
||||
type MockEventTransformer struct {
|
||||
ExecuteWasCalled bool
|
||||
ExecuteError error
|
||||
PassedLogs []core.HeaderSyncLog
|
||||
PassedHeaderID int64
|
||||
config transformer.EventTransformerConfig
|
||||
}
|
||||
|
||||
func (mh *MockTransformer) Execute(logs []core.HeaderSyncLog, headerID int64) error {
|
||||
if mh.ExecuteError != nil {
|
||||
return mh.ExecuteError
|
||||
func (t *MockEventTransformer) Execute(logs []core.HeaderSyncLog) error {
|
||||
if t.ExecuteError != nil {
|
||||
return t.ExecuteError
|
||||
}
|
||||
mh.ExecuteWasCalled = true
|
||||
mh.PassedLogs = logs
|
||||
mh.PassedHeaderID = headerID
|
||||
t.ExecuteWasCalled = true
|
||||
t.PassedLogs = logs
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mh *MockTransformer) GetConfig() transformer.EventTransformerConfig {
|
||||
return mh.config
|
||||
func (t *MockEventTransformer) GetConfig() transformer.EventTransformerConfig {
|
||||
return t.config
|
||||
}
|
||||
|
||||
func (mh *MockTransformer) SetTransformerConfig(config transformer.EventTransformerConfig) {
|
||||
mh.config = config
|
||||
func (t *MockEventTransformer) SetTransformerConfig(config transformer.EventTransformerConfig) {
|
||||
t.config = config
|
||||
}
|
||||
|
||||
func (mh *MockTransformer) FakeTransformerInitializer(db *postgres.DB) transformer.EventTransformer {
|
||||
return mh
|
||||
func (t *MockEventTransformer) FakeTransformerInitializer(db *postgres.DB) transformer.EventTransformer {
|
||||
return t
|
||||
}
|
||||
|
||||
var FakeTransformerConfig = transformer.EventTransformerConfig{
|
||||
TransformerName: "FakeTransformer",
|
||||
ContractAddresses: []string{"FakeAddress"},
|
||||
Topic: "FakeTopic",
|
||||
ContractAddresses: []string{fakes.FakeAddress.Hex()},
|
||||
Topic: fakes.FakeHash.Hex(),
|
||||
}
|
36
libraries/shared/mocks/log_delegator.go
Normal file
36
libraries/shared/mocks/log_delegator.go
Normal file
@ -0,0 +1,36 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
)
|
||||
|
||||
type MockLogDelegator struct {
|
||||
AddedTransformers []transformer.EventTransformer
|
||||
DelegateCalled bool
|
||||
DelegateError error
|
||||
}
|
||||
|
||||
func (delegator *MockLogDelegator) AddTransformer(t transformer.EventTransformer) {
|
||||
delegator.AddedTransformers = append(delegator.AddedTransformers, t)
|
||||
}
|
||||
|
||||
func (delegator *MockLogDelegator) DelegateLogs() error {
|
||||
delegator.DelegateCalled = true
|
||||
return delegator.DelegateError
|
||||
}
|
37
libraries/shared/mocks/log_extractor.go
Normal file
37
libraries/shared/mocks/log_extractor.go
Normal file
@ -0,0 +1,37 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
)
|
||||
|
||||
type MockLogExtractor struct {
|
||||
AddedConfigs []transformer.EventTransformerConfig
|
||||
ExtractLogsCalled bool
|
||||
ExtractLogsError error
|
||||
}
|
||||
|
||||
func (extractor *MockLogExtractor) AddTransformerConfig(config transformer.EventTransformerConfig) {
|
||||
extractor.AddedConfigs = append(extractor.AddedConfigs, config)
|
||||
}
|
||||
|
||||
func (extractor *MockLogExtractor) ExtractLogs(recheckHeaders constants.TransformerExecution) error {
|
||||
extractor.ExtractLogsCalled = true
|
||||
return extractor.ExtractLogsError
|
||||
}
|
@ -17,26 +17,24 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
type MockLogNoteConverter struct {
|
||||
err error
|
||||
returnModels []interface{}
|
||||
PassedLogs []types.Log
|
||||
ToModelsCalledCounter int
|
||||
type MockLogFetcher struct {
|
||||
ContractAddresses []common.Address
|
||||
FetchCalled bool
|
||||
MissingHeader core.Header
|
||||
ReturnError error
|
||||
ReturnLogs []types.Log
|
||||
Topics []common.Hash
|
||||
}
|
||||
|
||||
func (converter *MockLogNoteConverter) ToModels(ethLogs []types.Log) ([]interface{}, error) {
|
||||
converter.PassedLogs = ethLogs
|
||||
converter.ToModelsCalledCounter++
|
||||
return converter.returnModels, converter.err
|
||||
}
|
||||
|
||||
func (converter *MockLogNoteConverter) SetConverterError(e error) {
|
||||
converter.err = e
|
||||
}
|
||||
|
||||
func (converter *MockLogNoteConverter) SetReturnModels(models []interface{}) {
|
||||
converter.returnModels = models
|
||||
func (fetcher *MockLogFetcher) FetchLogs(contractAddresses []common.Address, topics []common.Hash, missingHeader core.Header) ([]types.Log, error) {
|
||||
fetcher.FetchCalled = true
|
||||
fetcher.ContractAddresses = contractAddresses
|
||||
fetcher.Topics = topics
|
||||
fetcher.MissingHeader = missingHeader
|
||||
return fetcher.ReturnLogs, fetcher.ReturnError
|
||||
}
|
@ -16,192 +16,12 @@
|
||||
|
||||
package repository
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/lib/pq"
|
||||
"github.com/sirupsen/logrus"
|
||||
import "github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
)
|
||||
|
||||
const insertHeaderSyncLogQuery = `INSERT INTO header_sync_logs
|
||||
(header_id, address, topics, data, block_number, block_hash, tx_index, tx_hash, log_index, raw)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) ON CONFLICT DO NOTHING RETURNING id`
|
||||
|
||||
func MarkHeaderChecked(headerID int64, db *postgres.DB, checkedHeadersColumn string) error {
|
||||
_, err := db.Exec(`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`)
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT (header_id) DO
|
||||
UPDATE SET `+checkedHeadersColumn+` = checked_headers.`+checkedHeadersColumn+` + 1`, headerID, 1)
|
||||
return err
|
||||
}
|
||||
|
||||
func MarkHeaderCheckedInTransaction(headerID int64, tx *sqlx.Tx, checkedHeadersColumn string) error {
|
||||
func MarkContractWatcherHeaderCheckedInTransaction(headerID int64, tx *sqlx.Tx, checkedHeadersColumn string) error {
|
||||
_, err := tx.Exec(`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`)
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT (header_id) DO
|
||||
UPDATE SET `+checkedHeadersColumn+` = checked_headers.`+checkedHeadersColumn+` + 1`, headerID, 1)
|
||||
UPDATE SET `+checkedHeadersColumn+` = checked_headers.`+checkedHeadersColumn+` + 1`, headerID, 1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Treats a header as missing if it's not in the headers table, or not checked for some log type
|
||||
func MissingHeaders(startingBlockNumber, endingBlockNumber int64, db *postgres.DB, notCheckedSQL string) ([]core.Header, error) {
|
||||
var result []core.Header
|
||||
var query string
|
||||
var err error
|
||||
|
||||
if endingBlockNumber == -1 {
|
||||
query = `SELECT headers.id, headers.block_number, headers.hash FROM headers
|
||||
LEFT JOIN checked_headers on headers.id = header_id
|
||||
WHERE (header_id ISNULL OR ` + notCheckedSQL + `)
|
||||
AND headers.block_number >= $1
|
||||
AND headers.eth_node_fingerprint = $2`
|
||||
err = db.Select(&result, query, startingBlockNumber, db.Node.ID)
|
||||
} else {
|
||||
query = `SELECT headers.id, headers.block_number, headers.hash FROM headers
|
||||
LEFT JOIN checked_headers on headers.id = header_id
|
||||
WHERE (header_id ISNULL OR ` + notCheckedSQL + `)
|
||||
AND headers.block_number >= $1
|
||||
AND headers.block_number <= $2
|
||||
AND headers.eth_node_fingerprint = $3`
|
||||
err = db.Select(&result, query, startingBlockNumber, endingBlockNumber, db.Node.ID)
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
func GetCheckedColumnNames(db *postgres.DB) ([]string, error) {
|
||||
// Query returns `[]driver.Value`, nullable polymorphic interface
|
||||
var queryResult []driver.Value
|
||||
columnNamesQuery :=
|
||||
`SELECT column_name FROM information_schema.columns
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = 'checked_headers'
|
||||
AND column_name <> 'id'
|
||||
AND column_name <> 'header_id';`
|
||||
|
||||
err := db.Select(&queryResult, columnNamesQuery)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
// Transform column names from `driver.Value` to strings
|
||||
var columnNames []string
|
||||
for _, result := range queryResult {
|
||||
if columnName, ok := result.(string); ok {
|
||||
columnNames = append(columnNames, columnName)
|
||||
} else {
|
||||
return []string{}, fmt.Errorf("incorrect value for checked_headers column name")
|
||||
}
|
||||
}
|
||||
|
||||
return columnNames, nil
|
||||
}
|
||||
|
||||
// Builds a SQL string that checks if any column should be checked/rechecked.
|
||||
// Defaults to FALSE when no columns are provided.
|
||||
// Ex: ["columnA", "columnB"] => "NOT (columnA!=0 AND columnB!=0)"
|
||||
// [] => "FALSE"
|
||||
func CreateHeaderCheckedPredicateSQL(boolColumns []string, recheckHeaders constants.TransformerExecution) string {
|
||||
if len(boolColumns) == 0 {
|
||||
return "FALSE"
|
||||
}
|
||||
|
||||
if recheckHeaders {
|
||||
return createHeaderCheckedPredicateSQLForRecheckedHeaders(boolColumns)
|
||||
} else {
|
||||
return createHeaderCheckedPredicateSQLForMissingHeaders(boolColumns)
|
||||
}
|
||||
}
|
||||
|
||||
func CreateLogs(headerID int64, logs []types.Log, db *postgres.DB) ([]core.HeaderSyncLog, error) {
|
||||
tx, txErr := db.Beginx()
|
||||
if txErr != nil {
|
||||
return nil, txErr
|
||||
}
|
||||
var results []core.HeaderSyncLog
|
||||
for _, log := range logs {
|
||||
logID, err := insertLog(headerID, log, tx)
|
||||
if err != nil {
|
||||
if logWasADuplicate(err) {
|
||||
continue
|
||||
}
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
logrus.Errorf("failed to rollback header sync log insert: %s", rollbackErr.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, buildLog(logID, headerID, log))
|
||||
}
|
||||
return results, tx.Commit()
|
||||
}
|
||||
|
||||
func logWasADuplicate(err error) bool {
|
||||
return err == sql.ErrNoRows
|
||||
}
|
||||
|
||||
func insertLog(headerID int64, log types.Log, tx *sqlx.Tx) (int64, error) {
|
||||
topics := buildTopics(log)
|
||||
raw, jsonErr := log.MarshalJSON()
|
||||
if jsonErr != nil {
|
||||
return 0, jsonErr
|
||||
}
|
||||
var logID int64
|
||||
err := tx.QueryRowx(insertHeaderSyncLogQuery, headerID, log.Address.Hex(), topics, log.Data, log.BlockNumber,
|
||||
log.BlockHash.Hex(), log.TxIndex, log.TxHash.Hex(), log.Index, raw).Scan(&logID)
|
||||
return logID, err
|
||||
}
|
||||
|
||||
func buildLog(logID int64, headerID int64, log types.Log) core.HeaderSyncLog {
|
||||
return core.HeaderSyncLog{
|
||||
ID: logID,
|
||||
HeaderID: headerID,
|
||||
Log: log,
|
||||
Transformed: false,
|
||||
}
|
||||
}
|
||||
|
||||
func buildTopics(log types.Log) pq.ByteaArray {
|
||||
var topics pq.ByteaArray
|
||||
for _, topic := range log.Topics {
|
||||
topics = append(topics, topic.Bytes())
|
||||
}
|
||||
return topics
|
||||
}
|
||||
|
||||
func createHeaderCheckedPredicateSQLForMissingHeaders(boolColumns []string) string {
|
||||
var result bytes.Buffer
|
||||
result.WriteString(" (")
|
||||
|
||||
// Loop excluding last column name
|
||||
for _, column := range boolColumns[:len(boolColumns)-1] {
|
||||
result.WriteString(fmt.Sprintf("%v=0 OR ", column))
|
||||
}
|
||||
|
||||
result.WriteString(fmt.Sprintf("%v=0)", boolColumns[len(boolColumns)-1]))
|
||||
|
||||
return result.String()
|
||||
}
|
||||
|
||||
func createHeaderCheckedPredicateSQLForRecheckedHeaders(boolColumns []string) string {
|
||||
var result bytes.Buffer
|
||||
result.WriteString(" (")
|
||||
|
||||
// Loop excluding last column name
|
||||
for _, column := range boolColumns[:len(boolColumns)-1] {
|
||||
result.WriteString(fmt.Sprintf("%v<%s OR ", column, constants.RecheckHeaderCap))
|
||||
}
|
||||
|
||||
// No trailing "OR" for the last column name
|
||||
result.WriteString(fmt.Sprintf("%v<%s)", boolColumns[len(boolColumns)-1], constants.RecheckHeaderCap))
|
||||
|
||||
return result.String()
|
||||
}
|
||||
|
@ -17,39 +17,25 @@
|
||||
package repository_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/lib/pq"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||
shared "github.com/vulcanize/vulcanizedb/libraries/shared/repository"
|
||||
r2 "github.com/vulcanize/vulcanizedb/pkg/contract_watcher/header/repository"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/repository"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"github.com/vulcanize/vulcanizedb/test_config"
|
||||
)
|
||||
|
||||
var _ = Describe("Repository", func() {
|
||||
var (
|
||||
checkedHeadersColumn string
|
||||
db *postgres.DB
|
||||
)
|
||||
var _ = Describe("", func() {
|
||||
Describe("MarkContractWatcherHeaderCheckedInTransaction", func() {
|
||||
var (
|
||||
checkedHeadersColumn string
|
||||
db *postgres.DB
|
||||
)
|
||||
|
||||
Describe("MarkHeaderChecked", func() {
|
||||
BeforeEach(func() {
|
||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||
test_config.CleanTestDB(db)
|
||||
|
||||
checkedHeadersColumn = "test_column_checked"
|
||||
_, migrateErr := db.Exec(`ALTER TABLE public.checked_headers
|
||||
ADD COLUMN ` + checkedHeadersColumn + ` integer`)
|
||||
@ -61,395 +47,21 @@ var _ = Describe("Repository", func() {
|
||||
Expect(cleanupMigrateErr).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("marks passed column as checked for passed header", func() {
|
||||
headerRepository := repositories.NewHeaderRepository(db)
|
||||
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
||||
Expect(headerErr).NotTo(HaveOccurred())
|
||||
|
||||
err := shared.MarkHeaderChecked(headerID, db, checkedHeadersColumn)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var checkedCount int
|
||||
fetchErr := db.Get(&checkedCount, `SELECT `+checkedHeadersColumn+` FROM public.checked_headers LIMIT 1`)
|
||||
Expect(fetchErr).NotTo(HaveOccurred())
|
||||
Expect(checkedCount).To(Equal(1))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("MarkHeaderCheckedInTransaction", func() {
|
||||
BeforeEach(func() {
|
||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||
test_config.CleanTestDB(db)
|
||||
|
||||
checkedHeadersColumn = "test_column_checked"
|
||||
_, migrateErr := db.Exec(`ALTER TABLE public.checked_headers
|
||||
ADD COLUMN ` + checkedHeadersColumn + ` integer`)
|
||||
Expect(migrateErr).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
_, cleanupMigrateErr := db.Exec(`ALTER TABLE public.checked_headers DROP COLUMN ` + checkedHeadersColumn)
|
||||
Expect(cleanupMigrateErr).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("marks passed column as checked for passed header within a passed transaction", func() {
|
||||
It("marks passed header as checked within a passed transaction", func() {
|
||||
headerRepository := repositories.NewHeaderRepository(db)
|
||||
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
||||
Expect(headerErr).NotTo(HaveOccurred())
|
||||
tx, txErr := db.Beginx()
|
||||
Expect(txErr).NotTo(HaveOccurred())
|
||||
|
||||
err := shared.MarkHeaderCheckedInTransaction(headerID, tx, checkedHeadersColumn)
|
||||
|
||||
err := repository.MarkContractWatcherHeaderCheckedInTransaction(headerID, tx, checkedHeadersColumn)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
commitErr := tx.Commit()
|
||||
Expect(commitErr).NotTo(HaveOccurred())
|
||||
var checkedCount int
|
||||
fetchErr := db.Get(&checkedCount, `SELECT `+checkedHeadersColumn+` FROM public.checked_headers LIMIT 1`)
|
||||
fetchErr := db.Get(&checkedCount, `SELECT COUNT(*) FROM public.checked_headers WHERE header_id = $1`, headerID)
|
||||
Expect(fetchErr).NotTo(HaveOccurred())
|
||||
Expect(checkedCount).To(Equal(1))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("MissingHeaders", func() {
|
||||
var (
|
||||
headerRepository datastore.HeaderRepository
|
||||
startingBlockNumber int64
|
||||
endingBlockNumber int64
|
||||
eventSpecificBlockNumber int64
|
||||
outOfRangeBlockNumber int64
|
||||
blockNumbers []int64
|
||||
headerIDs []int64
|
||||
notCheckedSQL string
|
||||
err error
|
||||
hr r2.HeaderRepository
|
||||
columnNames []string
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||
test_config.CleanTestDB(db)
|
||||
headerRepository = repositories.NewHeaderRepository(db)
|
||||
hr = r2.NewHeaderRepository(db)
|
||||
hr.AddCheckColumns(getExpectedColumnNames())
|
||||
|
||||
columnNames, err = shared.GetCheckedColumnNames(db)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
notCheckedSQL = shared.CreateHeaderCheckedPredicateSQL(columnNames, constants.HeaderMissing)
|
||||
|
||||
startingBlockNumber = rand.Int63()
|
||||
eventSpecificBlockNumber = startingBlockNumber + 1
|
||||
endingBlockNumber = startingBlockNumber + 2
|
||||
outOfRangeBlockNumber = endingBlockNumber + 1
|
||||
|
||||
blockNumbers = []int64{startingBlockNumber, eventSpecificBlockNumber, endingBlockNumber, outOfRangeBlockNumber}
|
||||
|
||||
headerIDs = []int64{}
|
||||
for _, n := range blockNumbers {
|
||||
headerID, err := headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(n))
|
||||
headerIDs = append(headerIDs, headerID)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
test_config.CleanCheckedHeadersTable(db, getExpectedColumnNames())
|
||||
})
|
||||
|
||||
It("only treats headers as checked if the event specific logs have been checked", func() {
|
||||
//add a checked_header record, but don't mark it check for any of the columns
|
||||
_, err = db.Exec(`INSERT INTO public.checked_headers (header_id) VALUES ($1)`, headerIDs[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
headers, err := shared.MissingHeaders(startingBlockNumber, endingBlockNumber, db, notCheckedSQL)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(headers)).To(Equal(3))
|
||||
Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber)))
|
||||
Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber)))
|
||||
Expect(headers[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber)))
|
||||
})
|
||||
|
||||
It("only returns headers associated with the current node", func() {
|
||||
dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
|
||||
headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
|
||||
for _, n := range blockNumbers {
|
||||
_, err = headerRepositoryTwo.CreateOrUpdateHeader(fakes.GetFakeHeader(n + 10))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeOneMissingHeaders, err := shared.MissingHeaders(startingBlockNumber, endingBlockNumber, db, notCheckedSQL)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(nodeOneMissingHeaders)).To(Equal(3))
|
||||
Expect(nodeOneMissingHeaders[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(eventSpecificBlockNumber), Equal(endingBlockNumber)))
|
||||
Expect(nodeOneMissingHeaders[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(eventSpecificBlockNumber), Equal(endingBlockNumber)))
|
||||
Expect(nodeOneMissingHeaders[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(startingBlockNumber), Equal(eventSpecificBlockNumber), Equal(endingBlockNumber)))
|
||||
|
||||
nodeTwoMissingHeaders, err := shared.MissingHeaders(startingBlockNumber, endingBlockNumber+10, dbTwo, notCheckedSQL)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(nodeTwoMissingHeaders)).To(Equal(3))
|
||||
Expect(nodeTwoMissingHeaders[0].BlockNumber).To(Or(Equal(startingBlockNumber+10), Equal(eventSpecificBlockNumber+10), Equal(endingBlockNumber+10)))
|
||||
Expect(nodeTwoMissingHeaders[1].BlockNumber).To(Or(Equal(startingBlockNumber+10), Equal(eventSpecificBlockNumber+10), Equal(endingBlockNumber+10)))
|
||||
})
|
||||
|
||||
It("handles an ending block of -1 ", func() {
|
||||
endingBlock := int64(-1)
|
||||
headers, err := shared.MissingHeaders(startingBlockNumber, endingBlock, db, notCheckedSQL)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(headers)).To(Equal(4))
|
||||
Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(headers[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(headers[3].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
|
||||
})
|
||||
|
||||
It("when a the `notCheckedSQL` argument allows for rechecks it returns headers where the checked count is less than the maximum", func() {
|
||||
columnName := columnNames[0]
|
||||
recheckedSQL := shared.CreateHeaderCheckedPredicateSQL([]string{columnName}, constants.HeaderRecheck)
|
||||
// mark every header checked at least once
|
||||
// header 4 is marked the maximum number of times, it it is not longer checked
|
||||
|
||||
maxCheckCount, intConversionErr := strconv.Atoi(constants.RecheckHeaderCap)
|
||||
Expect(intConversionErr).NotTo(HaveOccurred())
|
||||
|
||||
markHeaderOneErr := shared.MarkHeaderChecked(headerIDs[0], db, columnName)
|
||||
Expect(markHeaderOneErr).NotTo(HaveOccurred())
|
||||
markHeaderTwoErr := shared.MarkHeaderChecked(headerIDs[1], db, columnName)
|
||||
Expect(markHeaderTwoErr).NotTo(HaveOccurred())
|
||||
markHeaderThreeErr := shared.MarkHeaderChecked(headerIDs[2], db, columnName)
|
||||
Expect(markHeaderThreeErr).NotTo(HaveOccurred())
|
||||
for i := 0; i <= maxCheckCount; i++ {
|
||||
markHeaderFourErr := shared.MarkHeaderChecked(headerIDs[3], db, columnName)
|
||||
Expect(markHeaderFourErr).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
headers, err := shared.MissingHeaders(1, -1, db, recheckedSQL)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(headers)).To(Equal(3))
|
||||
Expect(headers[0].Id).To(Or(Equal(headerIDs[0]), Equal(headerIDs[1]), Equal(headerIDs[2])))
|
||||
Expect(headers[1].Id).To(Or(Equal(headerIDs[0]), Equal(headerIDs[1]), Equal(headerIDs[2])))
|
||||
Expect(headers[2].Id).To(Or(Equal(headerIDs[0]), Equal(headerIDs[1]), Equal(headerIDs[2])))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GetCheckedColumnNames", func() {
|
||||
It("gets the column names from checked_headers", func() {
|
||||
db := test_config.NewTestDB(test_config.NewTestNode())
|
||||
hr := r2.NewHeaderRepository(db)
|
||||
hr.AddCheckColumns(getExpectedColumnNames())
|
||||
test_config.CleanTestDB(db)
|
||||
expectedColumnNames := getExpectedColumnNames()
|
||||
actualColumnNames, err := shared.GetCheckedColumnNames(db)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(actualColumnNames).To(Equal(expectedColumnNames))
|
||||
test_config.CleanCheckedHeadersTable(db, getExpectedColumnNames())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CreateHeaderCheckedPredicateSQL", func() {
|
||||
Describe("for headers that haven't been checked for logs", func() {
|
||||
It("generates a correct SQL string for one column", func() {
|
||||
columns := []string{"columnA"}
|
||||
expected := " (columnA=0)"
|
||||
actual := shared.CreateHeaderCheckedPredicateSQL(columns, constants.HeaderMissing)
|
||||
Expect(actual).To(Equal(expected))
|
||||
})
|
||||
|
||||
It("generates a correct SQL string for several columns", func() {
|
||||
columns := []string{"columnA", "columnB"}
|
||||
expected := " (columnA=0 OR columnB=0)"
|
||||
actual := shared.CreateHeaderCheckedPredicateSQL(columns, constants.HeaderMissing)
|
||||
Expect(actual).To(Equal(expected))
|
||||
})
|
||||
|
||||
It("defaults to FALSE when there are no columns", func() {
|
||||
expected := "FALSE"
|
||||
actual := shared.CreateHeaderCheckedPredicateSQL([]string{}, constants.HeaderMissing)
|
||||
Expect(actual).To(Equal(expected))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("for headers that are being rechecked for logs", func() {
|
||||
It("generates a correct SQL string for rechecking headers for one column", func() {
|
||||
columns := []string{"columnA"}
|
||||
expected := fmt.Sprintf(" (columnA<%s)", constants.RecheckHeaderCap)
|
||||
actual := shared.CreateHeaderCheckedPredicateSQL(columns, constants.HeaderRecheck)
|
||||
Expect(actual).To(Equal(expected))
|
||||
})
|
||||
|
||||
It("generates a correct SQL string for rechecking headers for several columns", func() {
|
||||
columns := []string{"columnA", "columnB"}
|
||||
expected := fmt.Sprintf(" (columnA<%s OR columnB<%s)", constants.RecheckHeaderCap, constants.RecheckHeaderCap)
|
||||
actual := shared.CreateHeaderCheckedPredicateSQL(columns, constants.HeaderRecheck)
|
||||
Expect(actual).To(Equal(expected))
|
||||
})
|
||||
|
||||
It("defaults to FALSE when there are no columns", func() {
|
||||
expected := "FALSE"
|
||||
actual := shared.CreateHeaderCheckedPredicateSQL([]string{}, constants.HeaderRecheck)
|
||||
Expect(actual).To(Equal(expected))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CreateHeaderSyncLogs", func() {
|
||||
var headerID int64
|
||||
|
||||
type HeaderSyncLog struct {
|
||||
ID int64
|
||||
HeaderID int64 `db:"header_id"`
|
||||
Address string
|
||||
Topics pq.ByteaArray
|
||||
Data []byte
|
||||
BlockNumber uint64 `db:"block_number"`
|
||||
BlockHash string `db:"block_hash"`
|
||||
TxHash string `db:"tx_hash"`
|
||||
TxIndex uint `db:"tx_index"`
|
||||
LogIndex uint `db:"log_index"`
|
||||
Transformed bool
|
||||
Raw []byte
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||
test_config.CleanTestDB(db)
|
||||
headerRepository := repositories.NewHeaderRepository(db)
|
||||
var headerErr error
|
||||
headerID, headerErr = headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
||||
Expect(headerErr).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("writes a log to the db", func() {
|
||||
log := test_data.GenericTestLog()
|
||||
|
||||
_, err := shared.CreateLogs(headerID, []types.Log{log}, db)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var dbLog HeaderSyncLog
|
||||
lookupErr := db.Get(&dbLog, `SELECT * FROM header_sync_logs`)
|
||||
Expect(lookupErr).NotTo(HaveOccurred())
|
||||
Expect(dbLog.ID).NotTo(BeZero())
|
||||
Expect(dbLog.HeaderID).To(Equal(headerID))
|
||||
Expect(dbLog.Address).To(Equal(log.Address.Hex()))
|
||||
Expect(dbLog.Topics[0]).To(Equal(log.Topics[0].Bytes()))
|
||||
Expect(dbLog.Topics[1]).To(Equal(log.Topics[1].Bytes()))
|
||||
Expect(dbLog.Data).To(Equal(log.Data))
|
||||
Expect(dbLog.BlockNumber).To(Equal(log.BlockNumber))
|
||||
Expect(dbLog.BlockHash).To(Equal(log.BlockHash.Hex()))
|
||||
Expect(dbLog.TxIndex).To(Equal(log.TxIndex))
|
||||
Expect(dbLog.TxHash).To(Equal(log.TxHash.Hex()))
|
||||
Expect(dbLog.LogIndex).To(Equal(log.Index))
|
||||
expectedRaw, jsonErr := log.MarshalJSON()
|
||||
Expect(jsonErr).NotTo(HaveOccurred())
|
||||
Expect(dbLog.Raw).To(MatchJSON(expectedRaw))
|
||||
Expect(dbLog.Transformed).To(BeFalse())
|
||||
})
|
||||
|
||||
It("writes several logs to the db", func() {
|
||||
log1 := test_data.GenericTestLog()
|
||||
log2 := test_data.GenericTestLog()
|
||||
logs := []types.Log{log1, log2}
|
||||
|
||||
_, err := shared.CreateLogs(headerID, logs, db)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var count int
|
||||
lookupErr := db.Get(&count, `SELECT COUNT(*) FROM header_sync_logs`)
|
||||
Expect(lookupErr).NotTo(HaveOccurred())
|
||||
Expect(count).To(Equal(len(logs)))
|
||||
})
|
||||
|
||||
It("persists record that can be unpacked into types.Log", func() {
|
||||
// important if we want to decouple log persistence from transforming and still make use of
|
||||
// tools on types.Log like abi.Unpack
|
||||
|
||||
log := test_data.GenericTestLog()
|
||||
|
||||
_, err := shared.CreateLogs(headerID, []types.Log{log}, db)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var dbLog HeaderSyncLog
|
||||
lookupErr := db.Get(&dbLog, `SELECT * FROM header_sync_logs`)
|
||||
Expect(lookupErr).NotTo(HaveOccurred())
|
||||
|
||||
var logTopics []common.Hash
|
||||
for _, topic := range dbLog.Topics {
|
||||
logTopics = append(logTopics, common.BytesToHash(topic))
|
||||
}
|
||||
|
||||
reconstructedLog := types.Log{
|
||||
Address: common.HexToAddress(dbLog.Address),
|
||||
Topics: logTopics,
|
||||
Data: dbLog.Data,
|
||||
BlockNumber: dbLog.BlockNumber,
|
||||
TxHash: common.HexToHash(dbLog.TxHash),
|
||||
TxIndex: dbLog.TxIndex,
|
||||
BlockHash: common.HexToHash(dbLog.BlockHash),
|
||||
Index: dbLog.LogIndex,
|
||||
Removed: false,
|
||||
}
|
||||
Expect(reconstructedLog).To(Equal(log))
|
||||
})
|
||||
|
||||
It("does not duplicate logs", func() {
|
||||
log := test_data.GenericTestLog()
|
||||
|
||||
results, err := shared.CreateLogs(headerID, []types.Log{log, log}, db)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(results)).To(Equal(1))
|
||||
var count int
|
||||
lookupErr := db.Get(&count, `SELECT COUNT(*) FROM header_sync_logs`)
|
||||
Expect(lookupErr).NotTo(HaveOccurred())
|
||||
Expect(count).To(Equal(1))
|
||||
})
|
||||
|
||||
It("returns results with log id and header id for persisted logs", func() {
|
||||
log1 := test_data.GenericTestLog()
|
||||
log2 := test_data.GenericTestLog()
|
||||
logs := []types.Log{log1, log2}
|
||||
|
||||
results, err := shared.CreateLogs(headerID, logs, db)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(results)).To(Equal(len(logs)))
|
||||
var log1ID, log2ID int64
|
||||
lookupErr := db.Get(&log1ID, `SELECT id FROM header_sync_logs WHERE data = $1`, log1.Data)
|
||||
Expect(lookupErr).NotTo(HaveOccurred())
|
||||
lookup2Err := db.Get(&log2ID, `SELECT id FROM header_sync_logs WHERE data = $1`, log2.Data)
|
||||
Expect(lookup2Err).NotTo(HaveOccurred())
|
||||
Expect(results[0].ID).To(Or(Equal(log1ID), Equal(log2ID)))
|
||||
Expect(results[1].ID).To(Or(Equal(log1ID), Equal(log2ID)))
|
||||
Expect(results[0].HeaderID).To(Equal(headerID))
|
||||
Expect(results[1].HeaderID).To(Equal(headerID))
|
||||
})
|
||||
|
||||
It("returns results with properties for persisted logs", func() {
|
||||
log1 := test_data.GenericTestLog()
|
||||
log2 := test_data.GenericTestLog()
|
||||
logs := []types.Log{log1, log2}
|
||||
|
||||
results, err := shared.CreateLogs(headerID, logs, db)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(results)).To(Equal(len(logs)))
|
||||
Expect(results[0].Log).To(Or(Equal(log1), Equal(log2)))
|
||||
Expect(results[1].Log).To(Or(Equal(log1), Equal(log2)))
|
||||
Expect(results[0].Transformed).To(BeFalse())
|
||||
Expect(results[1].Transformed).To(BeFalse())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func getExpectedColumnNames() []string {
|
||||
return []string{
|
||||
"column_1",
|
||||
"column_2",
|
||||
"column_3",
|
||||
"column_4",
|
||||
}
|
||||
}
|
||||
|
@ -17,14 +17,12 @@
|
||||
package test_data
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
)
|
||||
|
||||
type GenericModel struct{}
|
||||
|
@ -18,13 +18,12 @@ package transformer
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
)
|
||||
|
||||
type EventTransformer interface {
|
||||
Execute(logs []core.HeaderSyncLog, headerID int64) error
|
||||
Execute(logs []core.HeaderSyncLog) error
|
||||
GetConfig() EventTransformerConfig
|
||||
}
|
||||
|
||||
|
@ -17,141 +17,67 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transactions"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/repository"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/logs"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transactions"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||
)
|
||||
|
||||
type EventWatcher struct {
|
||||
Transformers []transformer.EventTransformer
|
||||
BlockChain core.BlockChain
|
||||
DB *postgres.DB
|
||||
Fetcher fetcher.ILogFetcher
|
||||
Chunker chunker.Chunker
|
||||
Addresses []common.Address
|
||||
Topics []common.Hash
|
||||
StartingBlock *int64
|
||||
Syncer transactions.ITransactionsSyncer
|
||||
blockChain core.BlockChain
|
||||
db *postgres.DB
|
||||
LogDelegator logs.ILogDelegator
|
||||
LogExtractor logs.ILogExtractor
|
||||
}
|
||||
|
||||
func NewEventWatcher(db *postgres.DB, bc core.BlockChain) EventWatcher {
|
||||
logChunker := chunker.NewLogChunker()
|
||||
logFetcher := fetcher.NewLogFetcher(bc)
|
||||
transactionSyncer := transactions.NewTransactionsSyncer(db, bc)
|
||||
extractor := &logs.LogExtractor{
|
||||
Fetcher: fetcher.NewLogFetcher(bc),
|
||||
CheckedHeadersRepository: repositories.NewCheckedHeadersRepository(db),
|
||||
LogRepository: repositories.NewHeaderSyncLogRepository(db),
|
||||
Syncer: transactions.NewTransactionsSyncer(db, bc),
|
||||
}
|
||||
logTransformer := &logs.LogDelegator{
|
||||
Chunker: chunker.NewLogChunker(),
|
||||
LogRepository: repositories.NewHeaderSyncLogRepository(db),
|
||||
}
|
||||
return EventWatcher{
|
||||
BlockChain: bc,
|
||||
DB: db,
|
||||
Fetcher: logFetcher,
|
||||
Chunker: logChunker,
|
||||
Syncer: transactionSyncer,
|
||||
blockChain: bc,
|
||||
db: db,
|
||||
LogExtractor: extractor,
|
||||
LogDelegator: logTransformer,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds transformers to the watcher and updates the chunker, so that it will consider the new transformers.
|
||||
// Adds transformers to the watcher so that their logs will be extracted and delegated.
|
||||
func (watcher *EventWatcher) AddTransformers(initializers []transformer.EventTransformerInitializer) {
|
||||
var contractAddresses []common.Address
|
||||
var topic0s []common.Hash
|
||||
var configs []transformer.EventTransformerConfig
|
||||
|
||||
for _, initializer := range initializers {
|
||||
t := initializer(watcher.DB)
|
||||
watcher.Transformers = append(watcher.Transformers, t)
|
||||
t := initializer(watcher.db)
|
||||
|
||||
config := t.GetConfig()
|
||||
configs = append(configs, config)
|
||||
|
||||
if watcher.StartingBlock == nil {
|
||||
watcher.StartingBlock = &config.StartingBlockNumber
|
||||
} else if earlierStartingBlockNumber(config.StartingBlockNumber, *watcher.StartingBlock) {
|
||||
watcher.StartingBlock = &config.StartingBlockNumber
|
||||
}
|
||||
|
||||
addresses := transformer.HexStringsToAddresses(config.ContractAddresses)
|
||||
contractAddresses = append(contractAddresses, addresses...)
|
||||
topic0s = append(topic0s, common.HexToHash(config.Topic))
|
||||
watcher.LogDelegator.AddTransformer(t)
|
||||
watcher.LogExtractor.AddTransformerConfig(t.GetConfig())
|
||||
}
|
||||
|
||||
watcher.Addresses = append(watcher.Addresses, contractAddresses...)
|
||||
watcher.Topics = append(watcher.Topics, topic0s...)
|
||||
watcher.Chunker.AddConfigs(configs)
|
||||
}
|
||||
|
||||
// Extracts and delegates watched log events.
|
||||
func (watcher *EventWatcher) Execute(recheckHeaders constants.TransformerExecution) error {
|
||||
if watcher.Transformers == nil {
|
||||
return fmt.Errorf("No transformers added to watcher")
|
||||
extractErr := watcher.LogExtractor.ExtractLogs(recheckHeaders)
|
||||
if extractErr != nil {
|
||||
logrus.Errorf("error extracting logs in event watcher: %s", extractErr.Error())
|
||||
return extractErr
|
||||
}
|
||||
|
||||
checkedColumnNames, err := repository.GetCheckedColumnNames(watcher.DB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notCheckedSQL := repository.CreateHeaderCheckedPredicateSQL(checkedColumnNames, recheckHeaders)
|
||||
|
||||
missingHeaders, err := repository.MissingHeaders(*watcher.StartingBlock, -1, watcher.DB, notCheckedSQL)
|
||||
if err != nil {
|
||||
logrus.Error("Couldn't fetch missing headers in watcher: ", err)
|
||||
return err
|
||||
delegateErr := watcher.LogDelegator.DelegateLogs()
|
||||
if delegateErr != nil {
|
||||
logrus.Errorf("error delegating logs in event watcher: %s", delegateErr.Error())
|
||||
return delegateErr
|
||||
}
|
||||
|
||||
for _, header := range missingHeaders {
|
||||
// TODO Extend FetchLogs for doing several blocks at a time
|
||||
logs, err := watcher.Fetcher.FetchLogs(watcher.Addresses, watcher.Topics, header)
|
||||
if err != nil {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"headerId": header.Id,
|
||||
"headerHash": header.Hash,
|
||||
"blockNumber": header.BlockNumber,
|
||||
}).Errorf("Couldn't fetch logs for header: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
transactionsSyncErr := watcher.Syncer.SyncTransactions(header.Id, logs)
|
||||
if transactionsSyncErr != nil {
|
||||
logrus.Errorf("error syncing transactions: %s", transactionsSyncErr.Error())
|
||||
return transactionsSyncErr
|
||||
}
|
||||
|
||||
persistedLogs, createLogsErr := repository.CreateLogs(header.Id, logs, watcher.DB)
|
||||
if createLogsErr != nil {
|
||||
logrus.Errorf("error persisting logs: %s", createLogsErr.Error())
|
||||
}
|
||||
|
||||
transformErr := watcher.transformLogs(persistedLogs, header.Id)
|
||||
if transformErr != nil {
|
||||
logrus.Error("Could not transform logs: ", transformErr)
|
||||
return transformErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (watcher *EventWatcher) transformLogs(logs []core.HeaderSyncLog, headerID int64) error {
|
||||
chunkedLogs := watcher.Chunker.ChunkLogs(logs)
|
||||
|
||||
// Can't quit early and mark as checked if there are no logs. If we are running continuousLogSync,
|
||||
// not all logs we're interested in might have been fetched.
|
||||
for _, t := range watcher.Transformers {
|
||||
transformerName := t.GetConfig().TransformerName
|
||||
logChunk := chunkedLogs[transformerName]
|
||||
err := t.Execute(logChunk, headerID)
|
||||
if err != nil {
|
||||
logrus.Errorf("%v transformer failed to execute in watcher: %v", transformerName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func earlierStartingBlockNumber(transformerBlock, watcherBlock int64) bool {
|
||||
return transformerBlock < watcherBlock
|
||||
}
|
||||
|
@ -17,239 +17,97 @@
|
||||
package watcher_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/watcher"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"github.com/vulcanize/vulcanizedb/test_config"
|
||||
)
|
||||
|
||||
var _ = Describe("Watcher", func() {
|
||||
It("initialises correctly", func() {
|
||||
db := test_config.NewTestDB(core.Node{ID: "testNode"})
|
||||
bc := fakes.NewMockBlockChain()
|
||||
var _ = Describe("Event Watcher", func() {
|
||||
var (
|
||||
delegator *mocks.MockLogDelegator
|
||||
extractor *mocks.MockLogExtractor
|
||||
eventWatcher watcher.EventWatcher
|
||||
)
|
||||
|
||||
w := watcher.NewEventWatcher(db, bc)
|
||||
|
||||
Expect(w.DB).To(Equal(db))
|
||||
Expect(w.Fetcher).NotTo(BeNil())
|
||||
Expect(w.Chunker).NotTo(BeNil())
|
||||
BeforeEach(func() {
|
||||
delegator = &mocks.MockLogDelegator{}
|
||||
extractor = &mocks.MockLogExtractor{}
|
||||
eventWatcher = watcher.EventWatcher{
|
||||
LogDelegator: delegator,
|
||||
LogExtractor: extractor,
|
||||
}
|
||||
})
|
||||
|
||||
It("adds transformers", func() {
|
||||
w := watcher.NewEventWatcher(nil, nil)
|
||||
fakeTransformer := &mocks.MockTransformer{}
|
||||
fakeTransformer.SetTransformerConfig(mocks.FakeTransformerConfig)
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
||||
|
||||
Expect(len(w.Transformers)).To(Equal(1))
|
||||
Expect(w.Transformers).To(ConsistOf(fakeTransformer))
|
||||
Expect(w.Topics).To(Equal([]common.Hash{common.HexToHash("FakeTopic")}))
|
||||
Expect(w.Addresses).To(Equal([]common.Address{common.HexToAddress("FakeAddress")}))
|
||||
})
|
||||
|
||||
It("adds transformers from multiple sources", func() {
|
||||
w := watcher.NewEventWatcher(nil, nil)
|
||||
fakeTransformer1 := &mocks.MockTransformer{}
|
||||
fakeTransformer1.SetTransformerConfig(mocks.FakeTransformerConfig)
|
||||
|
||||
fakeTransformer2 := &mocks.MockTransformer{}
|
||||
fakeTransformer2.SetTransformerConfig(mocks.FakeTransformerConfig)
|
||||
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer1.FakeTransformerInitializer})
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer2.FakeTransformerInitializer})
|
||||
|
||||
Expect(len(w.Transformers)).To(Equal(2))
|
||||
Expect(w.Topics).To(Equal([]common.Hash{common.HexToHash("FakeTopic"),
|
||||
common.HexToHash("FakeTopic")}))
|
||||
Expect(w.Addresses).To(Equal([]common.Address{common.HexToAddress("FakeAddress"),
|
||||
common.HexToAddress("FakeAddress")}))
|
||||
})
|
||||
|
||||
It("calculates earliest starting block number", func() {
|
||||
fakeTransformer1 := &mocks.MockTransformer{}
|
||||
fakeTransformer1.SetTransformerConfig(transformer.EventTransformerConfig{StartingBlockNumber: 5})
|
||||
|
||||
fakeTransformer2 := &mocks.MockTransformer{}
|
||||
fakeTransformer2.SetTransformerConfig(transformer.EventTransformerConfig{StartingBlockNumber: 3})
|
||||
|
||||
w := watcher.NewEventWatcher(nil, nil)
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{
|
||||
fakeTransformer1.FakeTransformerInitializer,
|
||||
fakeTransformer2.FakeTransformerInitializer,
|
||||
})
|
||||
|
||||
Expect(*w.StartingBlock).To(Equal(int64(3)))
|
||||
})
|
||||
|
||||
It("returns an error when run without transformers", func() {
|
||||
w := watcher.NewEventWatcher(nil, nil)
|
||||
err := w.Execute(constants.HeaderMissing)
|
||||
Expect(err).To(MatchError("No transformers added to watcher"))
|
||||
})
|
||||
|
||||
Describe("with missing headers", func() {
|
||||
Describe("AddTransformers", func() {
|
||||
var (
|
||||
db *postgres.DB
|
||||
w watcher.EventWatcher
|
||||
mockBlockChain fakes.MockBlockChain
|
||||
headerRepository repositories.HeaderRepository
|
||||
fakeTransformerOne, fakeTransformerTwo *mocks.MockEventTransformer
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||
test_config.CleanTestDB(db)
|
||||
mockBlockChain = fakes.MockBlockChain{}
|
||||
headerRepository = repositories.NewHeaderRepository(db)
|
||||
_, err := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fakeTransformerOne = &mocks.MockEventTransformer{}
|
||||
fakeTransformerOne.SetTransformerConfig(mocks.FakeTransformerConfig)
|
||||
fakeTransformerTwo = &mocks.MockEventTransformer{}
|
||||
fakeTransformerTwo.SetTransformerConfig(mocks.FakeTransformerConfig)
|
||||
initializers := []transformer.EventTransformerInitializer{
|
||||
fakeTransformerOne.FakeTransformerInitializer,
|
||||
fakeTransformerTwo.FakeTransformerInitializer,
|
||||
}
|
||||
|
||||
w = watcher.NewEventWatcher(db, &mockBlockChain)
|
||||
eventWatcher.AddTransformers(initializers)
|
||||
})
|
||||
|
||||
It("syncs transactions for fetched logs", func() {
|
||||
fakeTransformer := &mocks.MockTransformer{}
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
||||
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
|
||||
w.Syncer = mockTransactionSyncer
|
||||
|
||||
err := w.Execute(constants.HeaderMissing)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(mockTransactionSyncer.SyncTransactionsCalled).To(BeTrue())
|
||||
It("adds initialized transformer to log delegator", func() {
|
||||
expectedTransformers := []transformer.EventTransformer{
|
||||
fakeTransformerOne,
|
||||
fakeTransformerTwo,
|
||||
}
|
||||
Expect(delegator.AddedTransformers).To(Equal(expectedTransformers))
|
||||
})
|
||||
|
||||
It("returns error if syncing transactions fails", func() {
|
||||
fakeTransformer := &mocks.MockTransformer{}
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
||||
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
|
||||
mockTransactionSyncer.SyncTransactionsError = fakes.FakeError
|
||||
w.Syncer = mockTransactionSyncer
|
||||
It("adds transformer config to log extractor", func() {
|
||||
expectedConfigs := []transformer.EventTransformerConfig{
|
||||
mocks.FakeTransformerConfig,
|
||||
mocks.FakeTransformerConfig,
|
||||
}
|
||||
Expect(extractor.AddedConfigs).To(Equal(expectedConfigs))
|
||||
})
|
||||
})
|
||||
|
||||
err := w.Execute(constants.HeaderMissing)
|
||||
Describe("Execute", func() {
|
||||
It("extracts watched logs", func() {
|
||||
err := eventWatcher.Execute(constants.HeaderMissing)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(extractor.ExtractLogsCalled).To(BeTrue())
|
||||
})
|
||||
|
||||
It("returns error if extracting logs fails", func() {
|
||||
extractor.ExtractLogsError = fakes.FakeError
|
||||
|
||||
err := eventWatcher.Execute(constants.HeaderMissing)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
|
||||
It("persists fetched logs", func() {
|
||||
fakeTransformer := &mocks.MockTransformer{}
|
||||
transformerConfig := transformer.EventTransformerConfig{TransformerName: "transformerA",
|
||||
ContractAddresses: []string{"0x000000000000000000000000000000000000000A"},
|
||||
Topic: "0xA"}
|
||||
fakeTransformer.SetTransformerConfig(transformerConfig)
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
||||
log := types.Log{Address: common.HexToAddress("0xA"),
|
||||
Topics: []common.Hash{common.HexToHash("0xA")},
|
||||
Index: 0,
|
||||
}
|
||||
mockBlockChain.SetGetEthLogsWithCustomQueryReturnLogs([]types.Log{log})
|
||||
|
||||
err := w.Execute(constants.HeaderMissing)
|
||||
It("delegates untransformed logs", func() {
|
||||
err := eventWatcher.Execute(constants.HeaderMissing)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(fakeTransformer.PassedLogs)).NotTo(BeZero())
|
||||
Expect(fakeTransformer.PassedLogs[0].ID).NotTo(BeZero())
|
||||
Expect(fakeTransformer.PassedLogs[0].Log).To(Equal(log))
|
||||
Expect(delegator.DelegateCalled).To(BeTrue())
|
||||
})
|
||||
|
||||
It("executes each transformer", func() {
|
||||
fakeTransformer := &mocks.MockTransformer{}
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
||||
It("returns error if delegating logs fails", func() {
|
||||
delegator.DelegateError = fakes.FakeError
|
||||
|
||||
err := w.Execute(constants.HeaderMissing)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(fakeTransformer.ExecuteWasCalled).To(BeTrue())
|
||||
})
|
||||
err := eventWatcher.Execute(constants.HeaderMissing)
|
||||
|
||||
It("returns an error if transformer returns an error", func() {
|
||||
fakeTransformer := &mocks.MockTransformer{ExecuteError: errors.New("Something bad happened")}
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
||||
|
||||
err := w.Execute(constants.HeaderMissing)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(fakeTransformer.ExecuteWasCalled).To(BeFalse())
|
||||
})
|
||||
|
||||
It("passes only relevant logs to each transformer", func() {
|
||||
transformerA := &mocks.MockTransformer{}
|
||||
transformerB := &mocks.MockTransformer{}
|
||||
|
||||
configA := transformer.EventTransformerConfig{TransformerName: "transformerA",
|
||||
ContractAddresses: []string{"0x000000000000000000000000000000000000000A"},
|
||||
Topic: "0xA"}
|
||||
configB := transformer.EventTransformerConfig{TransformerName: "transformerB",
|
||||
ContractAddresses: []string{"0x000000000000000000000000000000000000000b"},
|
||||
Topic: "0xB"}
|
||||
|
||||
transformerA.SetTransformerConfig(configA)
|
||||
transformerB.SetTransformerConfig(configB)
|
||||
|
||||
logA := types.Log{Address: common.HexToAddress("0xA"),
|
||||
Topics: []common.Hash{common.HexToHash("0xA")},
|
||||
Index: 0,
|
||||
}
|
||||
logB := types.Log{Address: common.HexToAddress("0xB"),
|
||||
Topics: []common.Hash{common.HexToHash("0xB")},
|
||||
Index: 1,
|
||||
}
|
||||
mockBlockChain.SetGetEthLogsWithCustomQueryReturnLogs([]types.Log{logA, logB})
|
||||
|
||||
w = watcher.NewEventWatcher(db, &mockBlockChain)
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{
|
||||
transformerA.FakeTransformerInitializer, transformerB.FakeTransformerInitializer})
|
||||
|
||||
err := w.Execute(constants.HeaderMissing)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(transformerA.PassedLogs)).NotTo(BeZero())
|
||||
Expect(transformerA.PassedLogs[0].Log).To(Equal(logA))
|
||||
Expect(len(transformerB.PassedLogs)).NotTo(BeZero())
|
||||
Expect(transformerB.PassedLogs[0].Log).To(Equal(logB))
|
||||
})
|
||||
|
||||
Describe("uses the LogFetcher correctly:", func() {
|
||||
var fakeTransformer mocks.MockTransformer
|
||||
BeforeEach(func() {
|
||||
fakeTransformer = mocks.MockTransformer{}
|
||||
})
|
||||
|
||||
It("fetches logs for added transformers", func() {
|
||||
addresses := []string{"0xA", "0xB"}
|
||||
topic := "0x1"
|
||||
fakeTransformer.SetTransformerConfig(transformer.EventTransformerConfig{
|
||||
Topic: topic, ContractAddresses: addresses})
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
||||
|
||||
err := w.Execute(constants.HeaderMissing)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
fakeHash := common.HexToHash(fakes.FakeHeader.Hash)
|
||||
mockBlockChain.AssertGetEthLogsWithCustomQueryCalledWith(ethereum.FilterQuery{
|
||||
BlockHash: &fakeHash,
|
||||
Addresses: transformer.HexStringsToAddresses(addresses),
|
||||
Topics: [][]common.Hash{{common.HexToHash(topic)}},
|
||||
})
|
||||
})
|
||||
|
||||
It("propagates log fetcher errors", func() {
|
||||
fetcherError := errors.New("FetcherError")
|
||||
mockBlockChain.SetGetEthLogsWithCustomQueryErr(fetcherError)
|
||||
|
||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
||||
err := w.Execute(constants.HeaderMissing)
|
||||
Expect(err).To(MatchError(fetcherError))
|
||||
})
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -72,7 +72,7 @@ var _ = Describe("Block Retriever", func() {
|
||||
Receipt: core.Receipt{
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
||||
ContractAddress: constants.TusdContractAddress,
|
||||
Logs: []core.Log{},
|
||||
Logs: []core.FullSyncLog{},
|
||||
},
|
||||
TxIndex: 0,
|
||||
Value: "0",
|
||||
@ -92,7 +92,7 @@ var _ = Describe("Block Retriever", func() {
|
||||
Receipt: core.Receipt{
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
||||
ContractAddress: constants.TusdContractAddress,
|
||||
Logs: []core.Log{{
|
||||
Logs: []core.FullSyncLog{{
|
||||
BlockNumber: 3,
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
||||
Address: constants.TusdContractAddress,
|
||||
@ -136,7 +136,7 @@ var _ = Describe("Block Retriever", func() {
|
||||
Receipt: core.Receipt{
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
||||
ContractAddress: "",
|
||||
Logs: []core.Log{{
|
||||
Logs: []core.FullSyncLog{{
|
||||
BlockNumber: 2,
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
||||
Address: constants.DaiContractAddress,
|
||||
@ -167,7 +167,7 @@ var _ = Describe("Block Retriever", func() {
|
||||
Receipt: core.Receipt{
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
||||
ContractAddress: "",
|
||||
Logs: []core.Log{{
|
||||
Logs: []core.FullSyncLog{{
|
||||
BlockNumber: 3,
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
||||
Address: constants.DaiContractAddress,
|
||||
|
@ -39,7 +39,7 @@ import (
|
||||
|
||||
type TransferLog struct {
|
||||
Id int64 `db:"id"`
|
||||
VulvanizeLogId int64 `db:"vulcanize_log_id"`
|
||||
VulcanizeLogId int64 `db:"vulcanize_log_id"`
|
||||
TokenName string `db:"token_name"`
|
||||
Block int64 `db:"block"`
|
||||
Tx string `db:"tx"`
|
||||
@ -50,7 +50,7 @@ type TransferLog struct {
|
||||
|
||||
type NewOwnerLog struct {
|
||||
Id int64 `db:"id"`
|
||||
VulvanizeLogId int64 `db:"vulcanize_log_id"`
|
||||
VulcanizeLogId int64 `db:"vulcanize_log_id"`
|
||||
TokenName string `db:"token_name"`
|
||||
Block int64 `db:"block"`
|
||||
Tx string `db:"tx"`
|
||||
@ -138,13 +138,13 @@ func SetupTusdRepo(vulcanizeLogId *int64, wantedEvents, wantedMethods []string)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
receiptRepository := repositories.FullSyncReceiptRepository{DB: db}
|
||||
logRepository := repositories.LogRepository{DB: db}
|
||||
logRepository := repositories.FullSyncLogRepository{DB: db}
|
||||
blockRepository := *repositories.NewBlockRepository(db)
|
||||
|
||||
blockNumber := rand.Int63()
|
||||
blockId := CreateBlock(blockNumber, blockRepository)
|
||||
|
||||
receipts := []core.Receipt{{Logs: []core.Log{{}}}}
|
||||
receipts := []core.Receipt{{Logs: []core.FullSyncLog{{}}}}
|
||||
|
||||
err = receiptRepository.CreateReceiptsAndLogs(blockId, receipts)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@ -184,13 +184,13 @@ func SetupENSRepo(vulcanizeLogId *int64, wantedEvents, wantedMethods []string) (
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
receiptRepository := repositories.FullSyncReceiptRepository{DB: db}
|
||||
logRepository := repositories.LogRepository{DB: db}
|
||||
logRepository := repositories.FullSyncLogRepository{DB: db}
|
||||
blockRepository := *repositories.NewBlockRepository(db)
|
||||
|
||||
blockNumber := rand.Int63()
|
||||
blockId := CreateBlock(blockNumber, blockRepository)
|
||||
|
||||
receipts := []core.Receipt{{Logs: []core.Log{{}}}}
|
||||
receipts := []core.Receipt{{Logs: []core.FullSyncLog{{}}}}
|
||||
|
||||
err = receiptRepository.CreateReceiptsAndLogs(blockId, receipts)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@ -221,6 +221,7 @@ func SetupENSContract(wantedEvents, wantedMethods []string) *contract.Contract {
|
||||
}.Init()
|
||||
}
|
||||
|
||||
// TODO: tear down/setup DB from migrations so this doesn't alter the schema between tests
|
||||
func TearDown(db *postgres.DB) {
|
||||
tx, err := db.Beginx()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -255,7 +256,10 @@ func TearDown(db *postgres.DB) {
|
||||
_, err = tx.Exec(`DROP TABLE checked_headers`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_, err = tx.Exec(`CREATE TABLE checked_headers (id SERIAL PRIMARY KEY, header_id INTEGER UNIQUE NOT NULL REFERENCES headers (id) ON DELETE CASCADE);`)
|
||||
_, err = tx.Exec(`CREATE TABLE checked_headers (
|
||||
id SERIAL PRIMARY KEY,
|
||||
header_id INTEGER UNIQUE NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
|
||||
check_count INTEGER NOT NULL DEFAULT 1);`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_, err = tx.Exec(`DROP SCHEMA IF EXISTS full_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e CASCADE`)
|
||||
|
@ -41,7 +41,7 @@ var TransferBlock1 = core.Block{
|
||||
Receipt: core.Receipt{
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654aaa",
|
||||
ContractAddress: "",
|
||||
Logs: []core.Log{{
|
||||
Logs: []core.FullSyncLog{{
|
||||
BlockNumber: 6194633,
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654aaa",
|
||||
Address: constants.TusdContractAddress,
|
||||
@ -71,7 +71,7 @@ var TransferBlock2 = core.Block{
|
||||
Receipt: core.Receipt{
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654eee",
|
||||
ContractAddress: "",
|
||||
Logs: []core.Log{{
|
||||
Logs: []core.FullSyncLog{{
|
||||
BlockNumber: 6194634,
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654eee",
|
||||
Address: constants.TusdContractAddress,
|
||||
@ -101,7 +101,7 @@ var NewOwnerBlock1 = core.Block{
|
||||
Receipt: core.Receipt{
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654bbb",
|
||||
ContractAddress: "",
|
||||
Logs: []core.Log{{
|
||||
Logs: []core.FullSyncLog{{
|
||||
BlockNumber: 6194635,
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654bbb",
|
||||
Address: constants.EnsContractAddress,
|
||||
@ -131,7 +131,7 @@ var NewOwnerBlock2 = core.Block{
|
||||
Receipt: core.Receipt{
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654lll",
|
||||
ContractAddress: "",
|
||||
Logs: []core.Log{{
|
||||
Logs: []core.FullSyncLog{{
|
||||
BlockNumber: 6194636,
|
||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654lll",
|
||||
Address: constants.EnsContractAddress,
|
||||
|
@ -140,7 +140,7 @@ func (r *eventRepository) persistHeaderSyncLogs(logs []types.Log, eventInfo type
|
||||
|
||||
// Mark header as checked for this eventId
|
||||
eventId := strings.ToLower(eventInfo.Name + "_" + contractAddr)
|
||||
err = repository.MarkHeaderCheckedInTransaction(logs[0].Id, tx, eventId) // This assumes all logs are from same block
|
||||
err = repository.MarkContractWatcherHeaderCheckedInTransaction(logs[0].Id, tx, eventId) // This assumes all logs are from same block
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
|
@ -158,7 +158,7 @@ var _ = Describe("Repository", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedLog := test_helpers.TransferLog{
|
||||
Id: 1,
|
||||
VulvanizeLogId: vulcanizeLogId,
|
||||
VulcanizeLogId: vulcanizeLogId,
|
||||
TokenName: "TrueUSD",
|
||||
Block: 5488076,
|
||||
Tx: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
||||
@ -180,7 +180,7 @@ var _ = Describe("Repository", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedLog := test_helpers.TransferLog{
|
||||
Id: 1,
|
||||
VulvanizeLogId: vulcanizeLogId,
|
||||
VulcanizeLogId: vulcanizeLogId,
|
||||
TokenName: "TrueUSD",
|
||||
Block: 5488076,
|
||||
Tx: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
||||
|
@ -31,7 +31,7 @@ type BlockChain interface {
|
||||
GetEthLogsWithCustomQuery(query ethereum.FilterQuery) ([]types.Log, error)
|
||||
GetHeaderByNumber(blockNumber int64) (Header, error)
|
||||
GetHeadersByNumbers(blockNumbers []int64) ([]Header, error)
|
||||
GetLogs(contract Contract, startingBlockNumber *big.Int, endingBlockNumber *big.Int) ([]Log, error)
|
||||
GetFullSyncLogs(contract Contract, startingBlockNumber *big.Int, endingBlockNumber *big.Int) ([]FullSyncLog, error)
|
||||
GetTransactions(transactionHashes []common.Hash) ([]TransactionModel, error)
|
||||
LastBlock() (*big.Int, error)
|
||||
Node() Node
|
||||
|
@ -18,7 +18,7 @@ package core
|
||||
|
||||
import "github.com/ethereum/go-ethereum/core/types"
|
||||
|
||||
type Log struct {
|
||||
type FullSyncLog struct {
|
||||
BlockNumber int64
|
||||
TxHash string
|
||||
Address string
|
||||
|
@ -21,7 +21,7 @@ type Receipt struct {
|
||||
ContractAddress string `db:"contract_address"`
|
||||
CumulativeGasUsed uint64 `db:"cumulative_gas_used"`
|
||||
GasUsed uint64 `db:"gas_used"`
|
||||
Logs []Log
|
||||
Logs []FullSyncLog
|
||||
StateRoot string `db:"state_root"`
|
||||
Status int
|
||||
TxHash string `db:"tx_hash"`
|
||||
|
@ -126,16 +126,16 @@ var _ = Describe("Postgres DB", func() {
|
||||
It("does not commit log if log is invalid", func() {
|
||||
//badTxHash violates db tx_hash field length
|
||||
badTxHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
||||
badLog := core.Log{
|
||||
badLog := core.FullSyncLog{
|
||||
Address: "x123",
|
||||
BlockNumber: 1,
|
||||
TxHash: badTxHash,
|
||||
}
|
||||
node := core.Node{GenesisBlock: "GENESIS", NetworkID: 1, ID: "x123", ClientName: "geth"}
|
||||
db, _ := postgres.NewDB(test_config.DBConfig, node)
|
||||
logRepository := repositories.LogRepository{DB: db}
|
||||
logRepository := repositories.FullSyncLogRepository{DB: db}
|
||||
|
||||
err := logRepository.CreateLogs([]core.Log{badLog}, 123)
|
||||
err := logRepository.CreateLogs([]core.FullSyncLog{badLog}, 123)
|
||||
|
||||
Expect(err).ToNot(BeNil())
|
||||
savedBlock, err := logRepository.GetLogs("x123", 1)
|
||||
|
@ -19,10 +19,8 @@ package repositories
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
@ -81,7 +79,7 @@ func (blockRepository BlockRepository) MissingBlockNumbers(startingBlockNumber i
|
||||
startingBlockNumber,
|
||||
highestBlockNumber, nodeId)
|
||||
if err != nil {
|
||||
log.Error("MissingBlockNumbers: error getting blocks: ", err)
|
||||
logrus.Error("MissingBlockNumbers: error getting blocks: ", err)
|
||||
}
|
||||
return numbers
|
||||
}
|
||||
@ -112,7 +110,7 @@ func (blockRepository BlockRepository) GetBlock(blockNumber int64) (core.Block,
|
||||
case sql.ErrNoRows:
|
||||
return core.Block{}, datastore.ErrBlockDoesNotExist(blockNumber)
|
||||
default:
|
||||
log.Error("GetBlock: error loading blocks: ", err)
|
||||
logrus.Error("GetBlock: error loading blocks: ", err)
|
||||
return savedBlock, err
|
||||
}
|
||||
}
|
||||
@ -151,7 +149,7 @@ func (blockRepository BlockRepository) insertBlock(block core.Block) (int64, err
|
||||
if insertBlockErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error("failed to rollback transaction: ", rollbackErr)
|
||||
logrus.Error("failed to rollback transaction: ", rollbackErr)
|
||||
}
|
||||
return 0, postgres.ErrDBInsertFailed(insertBlockErr)
|
||||
}
|
||||
@ -167,7 +165,7 @@ func (blockRepository BlockRepository) insertBlock(block core.Block) (int64, err
|
||||
if insertTxErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Warn("failed to rollback transaction: ", rollbackErr)
|
||||
logrus.Warn("failed to rollback transaction: ", rollbackErr)
|
||||
}
|
||||
return 0, postgres.ErrDBInsertFailed(insertTxErr)
|
||||
}
|
||||
@ -176,7 +174,7 @@ func (blockRepository BlockRepository) insertBlock(block core.Block) (int64, err
|
||||
if commitErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Warn("failed to rollback transaction: ", rollbackErr)
|
||||
logrus.Warn("failed to rollback transaction: ", rollbackErr)
|
||||
}
|
||||
return 0, commitErr
|
||||
}
|
||||
@ -268,7 +266,7 @@ func (blockRepository BlockRepository) getBlockHash(block core.Block) (string, b
|
||||
return retrievedBlockHash, blockExists(retrievedBlockHash)
|
||||
}
|
||||
|
||||
func (blockRepository BlockRepository) createLogs(tx *sqlx.Tx, logs []core.Log, receiptId int64) error {
|
||||
func (blockRepository BlockRepository) createLogs(tx *sqlx.Tx, logs []core.FullSyncLog, receiptId int64) error {
|
||||
for _, tlog := range logs {
|
||||
_, err := tx.Exec(
|
||||
`INSERT INTO full_sync_logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
|
||||
@ -305,7 +303,7 @@ func (blockRepository BlockRepository) loadBlock(blockRows *sqlx.Row) (core.Bloc
|
||||
var block b
|
||||
err := blockRows.StructScan(&block)
|
||||
if err != nil {
|
||||
log.Error("loadBlock: error loading block: ", err)
|
||||
logrus.Error("loadBlock: error loading block: ", err)
|
||||
return core.Block{}, err
|
||||
}
|
||||
transactionRows, err := blockRepository.database.Queryx(`
|
||||
@ -323,7 +321,7 @@ func (blockRepository BlockRepository) loadBlock(blockRows *sqlx.Row) (core.Bloc
|
||||
WHERE block_id = $1
|
||||
ORDER BY hash`, block.ID)
|
||||
if err != nil {
|
||||
log.Error("loadBlock: error fetting transactions: ", err)
|
||||
logrus.Error("loadBlock: error fetting transactions: ", err)
|
||||
return core.Block{}, err
|
||||
}
|
||||
block.Transactions = blockRepository.LoadTransactions(transactionRows)
|
||||
@ -336,7 +334,7 @@ func (blockRepository BlockRepository) LoadTransactions(transactionRows *sqlx.Ro
|
||||
var transaction core.TransactionModel
|
||||
err := transactionRows.StructScan(&transaction)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
transactions = append(transactions, transaction)
|
||||
}
|
||||
|
@ -0,0 +1,70 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package repositories
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
)
|
||||
|
||||
const (
|
||||
insertCheckedHeaderQuery = `INSERT INTO public.checked_headers (header_id) VALUES ($1)
|
||||
ON CONFLICT (header_id) DO UPDATE
|
||||
SET check_count = (SELECT check_count FROM public.checked_headers WHERE header_id = $1) + 1`
|
||||
)
|
||||
|
||||
type CheckedHeadersRepository struct {
|
||||
db *postgres.DB
|
||||
}
|
||||
|
||||
func NewCheckedHeadersRepository(db *postgres.DB) CheckedHeadersRepository {
|
||||
return CheckedHeadersRepository{db: db}
|
||||
}
|
||||
|
||||
// Adds header_id to the checked_headers table, or increment check_count if header_id already present
|
||||
func (repo CheckedHeadersRepository) MarkHeaderChecked(headerID int64) error {
|
||||
_, err := repo.db.Exec(insertCheckedHeaderQuery, headerID)
|
||||
return err
|
||||
}
|
||||
|
||||
// Return header_id if not present in checked_headers or its check_count is < passed checkCount
|
||||
func (repo CheckedHeadersRepository) MissingHeaders(startingBlockNumber, endingBlockNumber, checkCount int64) ([]core.Header, error) {
|
||||
var result []core.Header
|
||||
var query string
|
||||
var err error
|
||||
|
||||
if endingBlockNumber == -1 {
|
||||
query = `SELECT headers.id, headers.block_number, headers.hash FROM headers
|
||||
LEFT JOIN checked_headers on headers.id = header_id
|
||||
WHERE (header_id ISNULL OR check_count < $2)
|
||||
AND headers.block_number >= $1
|
||||
AND headers.eth_node_fingerprint = $3
|
||||
LIMIT 100`
|
||||
err = repo.db.Select(&result, query, startingBlockNumber, checkCount, repo.db.Node.ID)
|
||||
} else {
|
||||
query = `SELECT headers.id, headers.block_number, headers.hash FROM headers
|
||||
LEFT JOIN checked_headers on headers.id = header_id
|
||||
WHERE (header_id ISNULL OR check_count < $3)
|
||||
AND headers.block_number >= $1
|
||||
AND headers.block_number <= $2
|
||||
AND headers.eth_node_fingerprint = $4
|
||||
LIMIT 100`
|
||||
err = repo.db.Select(&result, query, startingBlockNumber, endingBlockNumber, checkCount, repo.db.Node.ID)
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
@ -0,0 +1,270 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package repositories_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"github.com/vulcanize/vulcanizedb/test_config"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
var _ = Describe("Checked Headers repository", func() {
|
||||
var (
|
||||
db *postgres.DB
|
||||
repo datastore.CheckedHeadersRepository
|
||||
)
|
||||
|
||||
Describe("MarkHeaderChecked", func() {
|
||||
BeforeEach(func() {
|
||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||
test_config.CleanTestDB(db)
|
||||
repo = repositories.NewCheckedHeadersRepository(db)
|
||||
})
|
||||
|
||||
It("marks passed header as checked on insert", func() {
|
||||
headerRepository := repositories.NewHeaderRepository(db)
|
||||
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
||||
Expect(headerErr).NotTo(HaveOccurred())
|
||||
|
||||
err := repo.MarkHeaderChecked(headerID)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var checkedCount int
|
||||
fetchErr := db.Get(&checkedCount, `SELECT check_count FROM public.checked_headers WHERE header_id = $1`, headerID)
|
||||
Expect(fetchErr).NotTo(HaveOccurred())
|
||||
Expect(checkedCount).To(Equal(1))
|
||||
})
|
||||
|
||||
It("increments check count on update", func() {
|
||||
headerRepository := repositories.NewHeaderRepository(db)
|
||||
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
||||
Expect(headerErr).NotTo(HaveOccurred())
|
||||
|
||||
insertErr := repo.MarkHeaderChecked(headerID)
|
||||
Expect(insertErr).NotTo(HaveOccurred())
|
||||
|
||||
updateErr := repo.MarkHeaderChecked(headerID)
|
||||
Expect(updateErr).NotTo(HaveOccurred())
|
||||
|
||||
var checkedCount int
|
||||
fetchErr := db.Get(&checkedCount, `SELECT check_count FROM public.checked_headers WHERE header_id = $1`, headerID)
|
||||
Expect(fetchErr).NotTo(HaveOccurred())
|
||||
Expect(checkedCount).To(Equal(2))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("MissingHeaders", func() {
|
||||
var (
|
||||
headerRepository datastore.HeaderRepository
|
||||
startingBlockNumber int64
|
||||
endingBlockNumber int64
|
||||
middleBlockNumber int64
|
||||
outOfRangeBlockNumber int64
|
||||
blockNumbers []int64
|
||||
headerIDs []int64
|
||||
err error
|
||||
uncheckedCheckCount = int64(1)
|
||||
recheckCheckCount = int64(2)
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||
test_config.CleanTestDB(db)
|
||||
headerRepository = repositories.NewHeaderRepository(db)
|
||||
repo = repositories.NewCheckedHeadersRepository(db)
|
||||
|
||||
startingBlockNumber = rand.Int63()
|
||||
middleBlockNumber = startingBlockNumber + 1
|
||||
endingBlockNumber = startingBlockNumber + 2
|
||||
outOfRangeBlockNumber = endingBlockNumber + 1
|
||||
|
||||
blockNumbers = []int64{startingBlockNumber, middleBlockNumber, endingBlockNumber, outOfRangeBlockNumber}
|
||||
|
||||
headerIDs = []int64{}
|
||||
for _, n := range blockNumbers {
|
||||
headerID, err := headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(n))
|
||||
headerIDs = append(headerIDs, headerID)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
Describe("when ending block is specified", func() {
|
||||
It("excludes headers that are out of range", func() {
|
||||
headers, err := repo.MissingHeaders(startingBlockNumber, endingBlockNumber, uncheckedCheckCount)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// doesn't include outOfRangeBlockNumber
|
||||
Expect(len(headers)).To(Equal(3))
|
||||
Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(middleBlockNumber)))
|
||||
Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(middleBlockNumber)))
|
||||
Expect(headers[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(middleBlockNumber)))
|
||||
})
|
||||
|
||||
It("excludes headers that have been checked more than the check count", func() {
|
||||
_, err = db.Exec(`INSERT INTO public.checked_headers (header_id) VALUES ($1)`, headerIDs[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
headers, err := repo.MissingHeaders(startingBlockNumber, endingBlockNumber, uncheckedCheckCount)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// doesn't include middleBlockNumber
|
||||
Expect(len(headers)).To(Equal(2))
|
||||
Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
|
||||
Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
|
||||
})
|
||||
|
||||
It("does not exclude headers that have been checked less than the check count", func() {
|
||||
_, err = db.Exec(`INSERT INTO public.checked_headers (header_id) VALUES ($1)`, headerIDs[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
headers, err := repo.MissingHeaders(startingBlockNumber, endingBlockNumber, recheckCheckCount)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(headers)).To(Equal(3))
|
||||
Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber)))
|
||||
Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber)))
|
||||
Expect(headers[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber)))
|
||||
})
|
||||
|
||||
It("only returns headers associated with the current node", func() {
|
||||
dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
|
||||
headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
|
||||
repoTwo := repositories.NewCheckedHeadersRepository(dbTwo)
|
||||
for _, n := range blockNumbers {
|
||||
_, err = headerRepositoryTwo.CreateOrUpdateHeader(fakes.GetFakeHeader(n + 10))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeOneMissingHeaders, err := repo.MissingHeaders(startingBlockNumber, endingBlockNumber, uncheckedCheckCount)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(nodeOneMissingHeaders)).To(Equal(3))
|
||||
Expect(nodeOneMissingHeaders[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber)))
|
||||
Expect(nodeOneMissingHeaders[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber)))
|
||||
Expect(nodeOneMissingHeaders[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber)))
|
||||
|
||||
nodeTwoMissingHeaders, err := repoTwo.MissingHeaders(startingBlockNumber, endingBlockNumber+10, uncheckedCheckCount)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(nodeTwoMissingHeaders)).To(Equal(3))
|
||||
Expect(nodeTwoMissingHeaders[0].BlockNumber).To(Or(Equal(startingBlockNumber+10), Equal(middleBlockNumber+10), Equal(endingBlockNumber+10)))
|
||||
Expect(nodeTwoMissingHeaders[1].BlockNumber).To(Or(Equal(startingBlockNumber+10), Equal(middleBlockNumber+10), Equal(endingBlockNumber+10)))
|
||||
Expect(nodeTwoMissingHeaders[2].BlockNumber).To(Or(Equal(startingBlockNumber+10), Equal(middleBlockNumber+10), Equal(endingBlockNumber+10)))
|
||||
})
|
||||
|
||||
It("only returns 100 results to prevent blocking log delegation", func() {
|
||||
for n := outOfRangeBlockNumber + 1; n < outOfRangeBlockNumber+100; n++ {
|
||||
_, err := headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(n))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
missingHeaders, err := repo.MissingHeaders(startingBlockNumber, endingBlockNumber+200, uncheckedCheckCount)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(missingHeaders)).To(Equal(100))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("when ending block is -1", func() {
|
||||
var endingBlock = int64(-1)
|
||||
|
||||
It("includes all non-checked headers when ending block is -1 ", func() {
|
||||
headers, err := repo.MissingHeaders(startingBlockNumber, endingBlock, uncheckedCheckCount)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(headers)).To(Equal(4))
|
||||
Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(middleBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(middleBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(headers[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(middleBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(headers[3].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(middleBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
})
|
||||
|
||||
It("excludes headers that have been checked more than the check count", func() {
|
||||
_, err = db.Exec(`INSERT INTO public.checked_headers (header_id) VALUES ($1)`, headerIDs[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
headers, err := repo.MissingHeaders(startingBlockNumber, endingBlock, uncheckedCheckCount)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// doesn't include middleBlockNumber
|
||||
Expect(len(headers)).To(Equal(3))
|
||||
Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(headers[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
})
|
||||
|
||||
It("does not exclude headers that have been checked less than the check count", func() {
|
||||
_, err = db.Exec(`INSERT INTO public.checked_headers (header_id) VALUES ($1)`, headerIDs[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
headers, err := repo.MissingHeaders(startingBlockNumber, endingBlock, recheckCheckCount)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(headers)).To(Equal(4))
|
||||
Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(headers[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(headers[3].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
})
|
||||
|
||||
It("only returns headers associated with the current node", func() {
|
||||
dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
|
||||
headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
|
||||
repoTwo := repositories.NewCheckedHeadersRepository(dbTwo)
|
||||
for _, n := range blockNumbers {
|
||||
_, err = headerRepositoryTwo.CreateOrUpdateHeader(fakes.GetFakeHeader(n + 10))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeOneMissingHeaders, err := repo.MissingHeaders(startingBlockNumber, endingBlock, uncheckedCheckCount)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(nodeOneMissingHeaders)).To(Equal(4))
|
||||
Expect(nodeOneMissingHeaders[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(nodeOneMissingHeaders[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(nodeOneMissingHeaders[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
Expect(nodeOneMissingHeaders[3].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(middleBlockNumber), Equal(endingBlockNumber), Equal(outOfRangeBlockNumber)))
|
||||
|
||||
nodeTwoMissingHeaders, err := repoTwo.MissingHeaders(startingBlockNumber, endingBlock, uncheckedCheckCount)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(nodeTwoMissingHeaders)).To(Equal(4))
|
||||
Expect(nodeTwoMissingHeaders[0].BlockNumber).To(Or(Equal(startingBlockNumber+10), Equal(middleBlockNumber+10), Equal(endingBlockNumber+10), Equal(outOfRangeBlockNumber+10)))
|
||||
Expect(nodeTwoMissingHeaders[1].BlockNumber).To(Or(Equal(startingBlockNumber+10), Equal(middleBlockNumber+10), Equal(endingBlockNumber+10), Equal(outOfRangeBlockNumber+10)))
|
||||
Expect(nodeTwoMissingHeaders[2].BlockNumber).To(Or(Equal(startingBlockNumber+10), Equal(middleBlockNumber+10), Equal(endingBlockNumber+10), Equal(outOfRangeBlockNumber+10)))
|
||||
Expect(nodeTwoMissingHeaders[3].BlockNumber).To(Or(Equal(startingBlockNumber+10), Equal(middleBlockNumber+10), Equal(endingBlockNumber+10), Equal(outOfRangeBlockNumber+10)))
|
||||
})
|
||||
|
||||
It("only returns 100 results to prevent blocking log delegation", func() {
|
||||
for n := outOfRangeBlockNumber + 1; n < outOfRangeBlockNumber+100; n++ {
|
||||
_, err := headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(n))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
missingHeaders, err := repo.MissingHeaders(startingBlockNumber, endingBlock, uncheckedCheckCount)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(missingHeaders)).To(Equal(100))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
})
|
@ -17,20 +17,18 @@
|
||||
package repositories
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"database/sql"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
)
|
||||
|
||||
type LogRepository struct {
|
||||
type FullSyncLogRepository struct {
|
||||
*postgres.DB
|
||||
}
|
||||
|
||||
func (logRepository LogRepository) CreateLogs(lgs []core.Log, receiptId int64) error {
|
||||
tx, _ := logRepository.DB.Beginx()
|
||||
func (repository FullSyncLogRepository) CreateLogs(lgs []core.FullSyncLog, receiptId int64) error {
|
||||
tx, _ := repository.DB.Beginx()
|
||||
for _, tlog := range lgs {
|
||||
_, insertLogErr := tx.Exec(
|
||||
`INSERT INTO full_sync_logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
|
||||
@ -57,8 +55,8 @@ func (logRepository LogRepository) CreateLogs(lgs []core.Log, receiptId int64) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (logRepository LogRepository) GetLogs(address string, blockNumber int64) ([]core.Log, error) {
|
||||
logRows, err := logRepository.DB.Query(
|
||||
func (repository FullSyncLogRepository) GetLogs(address string, blockNumber int64) ([]core.FullSyncLog, error) {
|
||||
logRows, err := repository.DB.Query(
|
||||
`SELECT block_number,
|
||||
address,
|
||||
tx_hash,
|
||||
@ -72,13 +70,13 @@ func (logRepository LogRepository) GetLogs(address string, blockNumber int64) ([
|
||||
WHERE address = $1 AND block_number = $2
|
||||
ORDER BY block_number DESC`, address, blockNumber)
|
||||
if err != nil {
|
||||
return []core.Log{}, err
|
||||
return []core.FullSyncLog{}, err
|
||||
}
|
||||
return logRepository.loadLogs(logRows)
|
||||
return repository.loadLogs(logRows)
|
||||
}
|
||||
|
||||
func (logRepository LogRepository) loadLogs(logsRows *sql.Rows) ([]core.Log, error) {
|
||||
var lgs []core.Log
|
||||
func (repository FullSyncLogRepository) loadLogs(logsRows *sql.Rows) ([]core.FullSyncLog, error) {
|
||||
var lgs []core.FullSyncLog
|
||||
for logsRows.Next() {
|
||||
var blockNumber int64
|
||||
var address string
|
||||
@ -89,9 +87,9 @@ func (logRepository LogRepository) loadLogs(logsRows *sql.Rows) ([]core.Log, err
|
||||
err := logsRows.Scan(&blockNumber, &address, &txHash, &index, &topics[0], &topics[1], &topics[2], &topics[3], &data)
|
||||
if err != nil {
|
||||
logrus.Error("loadLogs: Error scanning a row in logRows: ", err)
|
||||
return []core.Log{}, err
|
||||
return []core.FullSyncLog{}, err
|
||||
}
|
||||
lg := core.Log{
|
||||
lg := core.FullSyncLog{
|
||||
BlockNumber: blockNumber,
|
||||
TxHash: txHash,
|
||||
Address: address,
|
@ -29,11 +29,11 @@ import (
|
||||
"github.com/vulcanize/vulcanizedb/test_config"
|
||||
)
|
||||
|
||||
var _ = Describe("Logs Repository", func() {
|
||||
var _ = Describe("Full sync log Repository", func() {
|
||||
Describe("Saving logs", func() {
|
||||
var db *postgres.DB
|
||||
var blockRepository datastore.BlockRepository
|
||||
var logsRepository datastore.LogRepository
|
||||
var logsRepository datastore.FullSyncLogRepository
|
||||
var receiptRepository datastore.FullSyncReceiptRepository
|
||||
var node core.Node
|
||||
|
||||
@ -47,7 +47,7 @@ var _ = Describe("Logs Repository", func() {
|
||||
db = test_config.NewTestDB(node)
|
||||
test_config.CleanTestDB(db)
|
||||
blockRepository = repositories.NewBlockRepository(db)
|
||||
logsRepository = repositories.LogRepository{DB: db}
|
||||
logsRepository = repositories.FullSyncLogRepository{DB: db}
|
||||
receiptRepository = repositories.FullSyncReceiptRepository{DB: db}
|
||||
})
|
||||
|
||||
@ -59,7 +59,7 @@ var _ = Describe("Logs Repository", func() {
|
||||
receiptId, err := receiptRepository.CreateFullSyncReceiptInTx(blockId, core.Receipt{}, tx)
|
||||
tx.Commit()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = logsRepository.CreateLogs([]core.Log{{
|
||||
err = logsRepository.CreateLogs([]core.FullSyncLog{{
|
||||
BlockNumber: blockNumber,
|
||||
Index: 0,
|
||||
Address: "x123",
|
||||
@ -98,7 +98,7 @@ var _ = Describe("Logs Repository", func() {
|
||||
tx.Commit()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = logsRepository.CreateLogs([]core.Log{{
|
||||
err = logsRepository.CreateLogs([]core.FullSyncLog{{
|
||||
BlockNumber: blockNumber,
|
||||
Index: 0,
|
||||
Address: "x123",
|
||||
@ -108,7 +108,7 @@ var _ = Describe("Logs Repository", func() {
|
||||
}}, receiptId)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = logsRepository.CreateLogs([]core.Log{{
|
||||
err = logsRepository.CreateLogs([]core.FullSyncLog{{
|
||||
BlockNumber: blockNumber,
|
||||
Index: 1,
|
||||
Address: "x123",
|
||||
@ -118,7 +118,7 @@ var _ = Describe("Logs Repository", func() {
|
||||
}}, receiptId)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = logsRepository.CreateLogs([]core.Log{{
|
||||
err = logsRepository.CreateLogs([]core.FullSyncLog{{
|
||||
BlockNumber: 2,
|
||||
Index: 0,
|
||||
Address: "x123",
|
||||
@ -162,7 +162,7 @@ var _ = Describe("Logs Repository", func() {
|
||||
|
||||
It("saves the logs attached to a receipt", func() {
|
||||
|
||||
logs := []core.Log{{
|
||||
logs := []core.FullSyncLog{{
|
||||
Address: "0x8a4774fe82c63484afef97ca8d89a6ea5e21f973",
|
||||
BlockNumber: 4745407,
|
||||
Data: "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000645a68669900000000000000000000000000000000000000000000003397684ab5869b0000000000000000000000000000000000000000000000000000000000005a36053200000000000000000000000099041f808d598b782d5a3e498681c2452a31da08",
|
@ -20,7 +20,6 @@ import (
|
||||
"database/sql"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
@ -53,7 +52,22 @@ func (receiptRepository FullSyncReceiptRepository) CreateReceiptsAndLogs(blockId
|
||||
return nil
|
||||
}
|
||||
|
||||
func createLogs(logs []core.Log, receiptId int64, tx *sqlx.Tx) error {
|
||||
func createReceipt(receipt core.Receipt, blockId int64, tx *sqlx.Tx) (int64, error) {
|
||||
var receiptId int64
|
||||
err := tx.QueryRow(
|
||||
`INSERT INTO full_sync_receipts
|
||||
(contract_address, tx_hash, cumulative_gas_used, gas_used, state_root, status, block_id)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
RETURNING id`,
|
||||
receipt.ContractAddress, receipt.TxHash, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.StateRoot, receipt.Status, blockId,
|
||||
).Scan(&receiptId)
|
||||
if err != nil {
|
||||
logrus.Error("createReceipt: Error inserting: ", err)
|
||||
}
|
||||
return receiptId, err
|
||||
}
|
||||
|
||||
func createLogs(logs []core.FullSyncLog, receiptId int64, tx *sqlx.Tx) error {
|
||||
for _, log := range logs {
|
||||
_, err := tx.Exec(
|
||||
`INSERT INTO full_sync_logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
|
||||
var _ = Describe("Receipt Repository", func() {
|
||||
var blockRepository datastore.BlockRepository
|
||||
var logRepository datastore.LogRepository
|
||||
var logRepository datastore.FullSyncLogRepository
|
||||
var receiptRepository datastore.FullSyncReceiptRepository
|
||||
var db *postgres.DB
|
||||
var node core.Node
|
||||
@ -43,7 +43,7 @@ var _ = Describe("Receipt Repository", func() {
|
||||
db = test_config.NewTestDB(node)
|
||||
test_config.CleanTestDB(db)
|
||||
blockRepository = repositories.NewBlockRepository(db)
|
||||
logRepository = repositories.LogRepository{DB: db}
|
||||
logRepository = repositories.FullSyncLogRepository{DB: db}
|
||||
receiptRepository = repositories.FullSyncReceiptRepository{DB: db}
|
||||
})
|
||||
|
||||
@ -56,7 +56,7 @@ var _ = Describe("Receipt Repository", func() {
|
||||
txHashTwo := "0xTxHashTwo"
|
||||
addressOne := "0xAddressOne"
|
||||
addressTwo := "0xAddressTwo"
|
||||
logsOne := []core.Log{{
|
||||
logsOne := []core.FullSyncLog{{
|
||||
Address: addressOne,
|
||||
BlockNumber: blockNumber,
|
||||
TxHash: txHashOne,
|
||||
@ -65,7 +65,7 @@ var _ = Describe("Receipt Repository", func() {
|
||||
BlockNumber: blockNumber,
|
||||
TxHash: txHashOne,
|
||||
}}
|
||||
logsTwo := []core.Log{{
|
||||
logsTwo := []core.FullSyncLog{{
|
||||
BlockNumber: blockNumber,
|
||||
TxHash: txHashTwo,
|
||||
Address: addressTwo,
|
||||
@ -112,7 +112,7 @@ var _ = Describe("Receipt Repository", func() {
|
||||
ContractAddress: "0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae",
|
||||
CumulativeGasUsed: 7996119,
|
||||
GasUsed: 21000,
|
||||
Logs: []core.Log{},
|
||||
Logs: []core.FullSyncLog{},
|
||||
StateRoot: "0x88abf7e73128227370aa7baa3dd4e18d0af70e92ef1f9ef426942fbe2dddb733",
|
||||
Status: 1,
|
||||
TxHash: "0xe340558980f89d5f86045ac11e5cc34e4bcec20f9f1e2a427aa39d87114e8223",
|
||||
|
@ -0,0 +1,134 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package repositories
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/lib/pq"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
)
|
||||
|
||||
const insertHeaderSyncLogQuery = `INSERT INTO header_sync_logs
|
||||
(header_id, address, topics, data, block_number, block_hash, tx_index, tx_hash, log_index, raw)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) ON CONFLICT DO NOTHING`
|
||||
|
||||
type HeaderSyncLogRepository struct {
|
||||
db *postgres.DB
|
||||
}
|
||||
|
||||
func NewHeaderSyncLogRepository(db *postgres.DB) HeaderSyncLogRepository {
|
||||
return HeaderSyncLogRepository{db: db}
|
||||
}
|
||||
|
||||
type headerSyncLog struct {
|
||||
ID int64
|
||||
HeaderID int64 `db:"header_id"`
|
||||
Address string
|
||||
Topics pq.ByteaArray
|
||||
Data []byte
|
||||
BlockNumber uint64 `db:"block_number"`
|
||||
BlockHash string `db:"block_hash"`
|
||||
TxHash string `db:"tx_hash"`
|
||||
TxIndex uint `db:"tx_index"`
|
||||
LogIndex uint `db:"log_index"`
|
||||
Transformed bool
|
||||
Raw []byte
|
||||
}
|
||||
|
||||
func (repository HeaderSyncLogRepository) GetUntransformedHeaderSyncLogs() ([]core.HeaderSyncLog, error) {
|
||||
rows, queryErr := repository.db.Queryx(`SELECT * FROM public.header_sync_logs WHERE transformed = false`)
|
||||
if queryErr != nil {
|
||||
return nil, queryErr
|
||||
}
|
||||
|
||||
var results []core.HeaderSyncLog
|
||||
for rows.Next() {
|
||||
var rawLog headerSyncLog
|
||||
scanErr := rows.StructScan(&rawLog)
|
||||
if scanErr != nil {
|
||||
return nil, scanErr
|
||||
}
|
||||
var logTopics []common.Hash
|
||||
for _, topic := range rawLog.Topics {
|
||||
logTopics = append(logTopics, common.BytesToHash(topic))
|
||||
}
|
||||
reconstructedLog := types.Log{
|
||||
Address: common.HexToAddress(rawLog.Address),
|
||||
Topics: logTopics,
|
||||
Data: rawLog.Data,
|
||||
BlockNumber: rawLog.BlockNumber,
|
||||
TxHash: common.HexToHash(rawLog.TxHash),
|
||||
TxIndex: rawLog.TxIndex,
|
||||
BlockHash: common.HexToHash(rawLog.BlockHash),
|
||||
Index: rawLog.LogIndex,
|
||||
// TODO: revisit if not cascade deleting logs when header removed
|
||||
// currently, fetched logs are cascade deleted if removed
|
||||
Removed: false,
|
||||
}
|
||||
result := core.HeaderSyncLog{
|
||||
ID: rawLog.ID,
|
||||
HeaderID: rawLog.HeaderID,
|
||||
Log: reconstructedLog,
|
||||
Transformed: rawLog.Transformed,
|
||||
}
|
||||
// TODO: Consider returning each result async to avoid keeping large result sets in memory
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (repository HeaderSyncLogRepository) CreateHeaderSyncLogs(headerID int64, logs []types.Log) error {
|
||||
tx, txErr := repository.db.Beginx()
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
for _, log := range logs {
|
||||
err := insertLog(headerID, log, tx)
|
||||
if err != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
logrus.Errorf("failed to rollback header sync log insert: %s", rollbackErr.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func insertLog(headerID int64, log types.Log, tx *sqlx.Tx) error {
|
||||
topics := buildTopics(log)
|
||||
raw, jsonErr := log.MarshalJSON()
|
||||
if jsonErr != nil {
|
||||
return jsonErr
|
||||
}
|
||||
_, insertErr := tx.Exec(insertHeaderSyncLogQuery, headerID, log.Address.Hex(), topics, log.Data, log.BlockNumber,
|
||||
log.BlockHash.Hex(), log.TxIndex, log.TxHash.Hex(), log.Index, raw)
|
||||
return insertErr
|
||||
}
|
||||
|
||||
func buildTopics(log types.Log) pq.ByteaArray {
|
||||
var topics pq.ByteaArray
|
||||
for _, topic := range log.Topics {
|
||||
topics = append(topics, topic.Bytes())
|
||||
}
|
||||
return topics
|
||||
}
|
@ -0,0 +1,203 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package repositories_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/lib/pq"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"github.com/vulcanize/vulcanizedb/test_config"
|
||||
)
|
||||
|
||||
var _ = Describe("Header sync log repository", func() {
|
||||
var (
|
||||
db *postgres.DB
|
||||
headerID int64
|
||||
repository datastore.HeaderSyncLogRepository
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||
test_config.CleanTestDB(db)
|
||||
headerRepository := repositories.NewHeaderRepository(db)
|
||||
var headerErr error
|
||||
headerID, headerErr = headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
||||
Expect(headerErr).NotTo(HaveOccurred())
|
||||
repository = repositories.NewHeaderSyncLogRepository(db)
|
||||
})
|
||||
|
||||
Describe("CreateHeaderSyncLogs", func() {
|
||||
type HeaderSyncLog struct {
|
||||
ID int64
|
||||
HeaderID int64 `db:"header_id"`
|
||||
Address string
|
||||
Topics pq.ByteaArray
|
||||
Data []byte
|
||||
BlockNumber uint64 `db:"block_number"`
|
||||
BlockHash string `db:"block_hash"`
|
||||
TxHash string `db:"tx_hash"`
|
||||
TxIndex uint `db:"tx_index"`
|
||||
LogIndex uint `db:"log_index"`
|
||||
Transformed bool
|
||||
Raw []byte
|
||||
}
|
||||
|
||||
It("writes a log to the db", func() {
|
||||
log := test_data.GenericTestLog()
|
||||
|
||||
err := repository.CreateHeaderSyncLogs(headerID, []types.Log{log})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var dbLog HeaderSyncLog
|
||||
lookupErr := db.Get(&dbLog, `SELECT * FROM header_sync_logs`)
|
||||
Expect(lookupErr).NotTo(HaveOccurred())
|
||||
Expect(dbLog.ID).NotTo(BeZero())
|
||||
Expect(dbLog.HeaderID).To(Equal(headerID))
|
||||
Expect(dbLog.Address).To(Equal(log.Address.Hex()))
|
||||
Expect(dbLog.Topics[0]).To(Equal(log.Topics[0].Bytes()))
|
||||
Expect(dbLog.Topics[1]).To(Equal(log.Topics[1].Bytes()))
|
||||
Expect(dbLog.Data).To(Equal(log.Data))
|
||||
Expect(dbLog.BlockNumber).To(Equal(log.BlockNumber))
|
||||
Expect(dbLog.BlockHash).To(Equal(log.BlockHash.Hex()))
|
||||
Expect(dbLog.TxIndex).To(Equal(log.TxIndex))
|
||||
Expect(dbLog.TxHash).To(Equal(log.TxHash.Hex()))
|
||||
Expect(dbLog.LogIndex).To(Equal(log.Index))
|
||||
expectedRaw, jsonErr := log.MarshalJSON()
|
||||
Expect(jsonErr).NotTo(HaveOccurred())
|
||||
Expect(dbLog.Raw).To(MatchJSON(expectedRaw))
|
||||
Expect(dbLog.Transformed).To(BeFalse())
|
||||
})
|
||||
|
||||
It("writes several logs to the db", func() {
|
||||
log1 := test_data.GenericTestLog()
|
||||
log2 := test_data.GenericTestLog()
|
||||
logs := []types.Log{log1, log2}
|
||||
|
||||
err := repository.CreateHeaderSyncLogs(headerID, logs)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var count int
|
||||
lookupErr := db.Get(&count, `SELECT COUNT(*) FROM header_sync_logs`)
|
||||
Expect(lookupErr).NotTo(HaveOccurred())
|
||||
Expect(count).To(Equal(len(logs)))
|
||||
})
|
||||
|
||||
It("persists record that can be unpacked into types.Log", func() {
|
||||
// important if we want to decouple log persistence from transforming and still make use of
|
||||
// tools on types.Log like abi.Unpack
|
||||
log := test_data.GenericTestLog()
|
||||
|
||||
err := repository.CreateHeaderSyncLogs(headerID, []types.Log{log})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var dbLog HeaderSyncLog
|
||||
lookupErr := db.Get(&dbLog, `SELECT * FROM header_sync_logs`)
|
||||
Expect(lookupErr).NotTo(HaveOccurred())
|
||||
|
||||
var logTopics []common.Hash
|
||||
for _, topic := range dbLog.Topics {
|
||||
logTopics = append(logTopics, common.BytesToHash(topic))
|
||||
}
|
||||
|
||||
reconstructedLog := types.Log{
|
||||
Address: common.HexToAddress(dbLog.Address),
|
||||
Topics: logTopics,
|
||||
Data: dbLog.Data,
|
||||
BlockNumber: dbLog.BlockNumber,
|
||||
TxHash: common.HexToHash(dbLog.TxHash),
|
||||
TxIndex: dbLog.TxIndex,
|
||||
BlockHash: common.HexToHash(dbLog.BlockHash),
|
||||
Index: dbLog.LogIndex,
|
||||
Removed: false,
|
||||
}
|
||||
Expect(reconstructedLog).To(Equal(log))
|
||||
})
|
||||
|
||||
It("does not duplicate logs", func() {
|
||||
log := test_data.GenericTestLog()
|
||||
|
||||
err := repository.CreateHeaderSyncLogs(headerID, []types.Log{log, log})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var count int
|
||||
lookupErr := db.Get(&count, `SELECT COUNT(*) FROM header_sync_logs`)
|
||||
Expect(lookupErr).NotTo(HaveOccurred())
|
||||
Expect(count).To(Equal(1))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GetFullSyncLogs", func() {
|
||||
Describe("when there are no logs", func() {
|
||||
It("returns empty collection", func() {
|
||||
result, err := repository.GetUntransformedHeaderSyncLogs()
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(result)).To(BeZero())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("when there are logs", func() {
|
||||
var log1, log2 types.Log
|
||||
|
||||
BeforeEach(func() {
|
||||
log1 = test_data.GenericTestLog()
|
||||
log2 = test_data.GenericTestLog()
|
||||
logs := []types.Log{log1, log2}
|
||||
logsErr := repository.CreateHeaderSyncLogs(headerID, logs)
|
||||
Expect(logsErr).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("returns persisted logs", func() {
|
||||
result, err := repository.GetUntransformedHeaderSyncLogs()
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(result)).To(Equal(2))
|
||||
Expect(result[0].Log).To(Or(Equal(log1), Equal(log2)))
|
||||
Expect(result[1].Log).To(Or(Equal(log1), Equal(log2)))
|
||||
Expect(result[0].Log).NotTo(Equal(result[1].Log))
|
||||
})
|
||||
|
||||
It("excludes logs that have been transformed", func() {
|
||||
_, insertErr := db.Exec(`UPDATE public.header_sync_logs SET transformed = true WHERE tx_hash = $1`, log1.TxHash.Hex())
|
||||
Expect(insertErr).NotTo(HaveOccurred())
|
||||
|
||||
result, err := repository.GetUntransformedHeaderSyncLogs()
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(result)).To(Equal(1))
|
||||
Expect(result[0].Log).To(Equal(log2))
|
||||
})
|
||||
|
||||
It("returns empty collection if all logs transformed", func() {
|
||||
_, insertErr := db.Exec(`UPDATE public.header_sync_logs SET transformed = true WHERE header_id = $1`, headerID)
|
||||
Expect(insertErr).NotTo(HaveOccurred())
|
||||
|
||||
result, err := repository.GetUntransformedHeaderSyncLogs()
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(result)).To(BeZero())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
@ -32,7 +32,7 @@ var _ = Describe("Watched Events Repository", func() {
|
||||
var db *postgres.DB
|
||||
var blocksRepository datastore.BlockRepository
|
||||
var filterRepository datastore.FilterRepository
|
||||
var logRepository datastore.LogRepository
|
||||
var logRepository datastore.FullSyncLogRepository
|
||||
var receiptRepository datastore.FullSyncReceiptRepository
|
||||
var watchedEventRepository datastore.WatchedEventRepository
|
||||
|
||||
@ -41,7 +41,7 @@ var _ = Describe("Watched Events Repository", func() {
|
||||
test_config.CleanTestDB(db)
|
||||
blocksRepository = repositories.NewBlockRepository(db)
|
||||
filterRepository = repositories.FilterRepository{DB: db}
|
||||
logRepository = repositories.LogRepository{DB: db}
|
||||
logRepository = repositories.FullSyncLogRepository{DB: db}
|
||||
receiptRepository = repositories.FullSyncReceiptRepository{DB: db}
|
||||
watchedEventRepository = repositories.WatchedEventRepository{DB: db}
|
||||
})
|
||||
@ -54,7 +54,7 @@ var _ = Describe("Watched Events Repository", func() {
|
||||
Address: "0x123",
|
||||
Topics: core.Topics{0: "event1=10", 2: "event3=hello"},
|
||||
}
|
||||
logs := []core.Log{
|
||||
logs := []core.FullSyncLog{
|
||||
{
|
||||
BlockNumber: 0,
|
||||
TxHash: "0x1",
|
||||
@ -108,7 +108,7 @@ var _ = Describe("Watched Events Repository", func() {
|
||||
Address: "0x123",
|
||||
Topics: core.Topics{0: "event1=10", 2: "event3=hello"},
|
||||
}
|
||||
logs := []core.Log{
|
||||
logs := []core.FullSyncLog{
|
||||
{
|
||||
BlockNumber: 0,
|
||||
TxHash: "0x1",
|
||||
|
@ -17,6 +17,7 @@
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/filters"
|
||||
@ -33,6 +34,11 @@ type BlockRepository interface {
|
||||
SetBlocksStatus(chainHead int64) error
|
||||
}
|
||||
|
||||
type CheckedHeadersRepository interface {
|
||||
MarkHeaderChecked(headerID int64) error
|
||||
MissingHeaders(startingBlockNumber, endingBlockNumber, checkCount int64) ([]core.Header, error)
|
||||
}
|
||||
|
||||
type ContractRepository interface {
|
||||
CreateContract(contract core.Contract) error
|
||||
GetContract(contractHash string) (core.Contract, error)
|
||||
@ -44,6 +50,11 @@ type FilterRepository interface {
|
||||
GetFilter(name string) (filters.LogFilter, error)
|
||||
}
|
||||
|
||||
type FullSyncLogRepository interface {
|
||||
CreateLogs(logs []core.FullSyncLog, receiptId int64) error
|
||||
GetLogs(address string, blockNumber int64) ([]core.FullSyncLog, error)
|
||||
}
|
||||
|
||||
type HeaderRepository interface {
|
||||
CreateOrUpdateHeader(header core.Header) (int64, error)
|
||||
CreateTransactions(headerID int64, transactions []core.TransactionModel) error
|
||||
@ -51,9 +62,9 @@ type HeaderRepository interface {
|
||||
MissingBlockNumbers(startingBlockNumber, endingBlockNumber int64, nodeID string) ([]int64, error)
|
||||
}
|
||||
|
||||
type LogRepository interface {
|
||||
CreateLogs(logs []core.Log, receiptId int64) error
|
||||
GetLogs(address string, blockNumber int64) ([]core.Log, error)
|
||||
type HeaderSyncLogRepository interface {
|
||||
GetUntransformedHeaderSyncLogs() ([]core.HeaderSyncLog, error)
|
||||
CreateHeaderSyncLogs(headerID int64, logs []types.Log) error
|
||||
}
|
||||
|
||||
type FullSyncReceiptRepository interface {
|
||||
|
@ -107,8 +107,8 @@ func (chain *MockBlockChain) GetHeadersByNumbers(blockNumbers []int64) ([]core.H
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
func (chain *MockBlockChain) GetLogs(contract core.Contract, startingBlockNumber, endingBlockNumber *big.Int) ([]core.Log, error) {
|
||||
return []core.Log{}, nil
|
||||
func (chain *MockBlockChain) GetFullSyncLogs(contract core.Contract, startingBlockNumber, endingBlockNumber *big.Int) ([]core.FullSyncLog, error) {
|
||||
return []core.FullSyncLog{}, nil
|
||||
}
|
||||
|
||||
func (chain *MockBlockChain) GetTransactions(transactionHashes []common.Hash) ([]core.TransactionModel, error) {
|
||||
|
43
pkg/fakes/mock_checked_headers_repository.go
Normal file
43
pkg/fakes/mock_checked_headers_repository.go
Normal file
@ -0,0 +1,43 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package fakes
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
type MockCheckedHeadersRepository struct {
|
||||
CheckCount int64
|
||||
StartingBlockNumber int64
|
||||
EndingBlockNumber int64
|
||||
HeaderID int64
|
||||
ReturnHeaders []core.Header
|
||||
MarkHeaderCheckedReturnError error
|
||||
MissingHeadersReturnError error
|
||||
}
|
||||
|
||||
func (repository *MockCheckedHeadersRepository) MarkHeaderChecked(headerID int64) error {
|
||||
repository.HeaderID = headerID
|
||||
return repository.MarkHeaderCheckedReturnError
|
||||
}
|
||||
|
||||
func (repository *MockCheckedHeadersRepository) MissingHeaders(startingBlockNumber, endingBlockNumber, checkCount int64) ([]core.Header, error) {
|
||||
repository.StartingBlockNumber = startingBlockNumber
|
||||
repository.EndingBlockNumber = endingBlockNumber
|
||||
repository.CheckCount = checkCount
|
||||
return repository.ReturnHeaders, repository.MissingHeadersReturnError
|
||||
}
|
42
pkg/fakes/mock_header_sync_log_repository.go
Normal file
42
pkg/fakes/mock_header_sync_log_repository.go
Normal file
@ -0,0 +1,42 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package fakes
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
type MockHeaderSyncLogRepository struct {
|
||||
CreateError error
|
||||
GetCalled bool
|
||||
GetError error
|
||||
PassedHeaderID int64
|
||||
PassedLogs []types.Log
|
||||
ReturnLogs []core.HeaderSyncLog
|
||||
}
|
||||
|
||||
func (repository *MockHeaderSyncLogRepository) GetUntransformedHeaderSyncLogs() ([]core.HeaderSyncLog, error) {
|
||||
repository.GetCalled = true
|
||||
return repository.ReturnLogs, repository.GetError
|
||||
}
|
||||
|
||||
func (repository *MockHeaderSyncLogRepository) CreateHeaderSyncLogs(headerID int64, logs []types.Log) error {
|
||||
repository.PassedHeaderID = headerID
|
||||
repository.PassedLogs = logs
|
||||
return repository.CreateError
|
||||
}
|
@ -86,7 +86,7 @@ func (blockChain *BlockChain) GetHeadersByNumbers(blockNumbers []int64) (header
|
||||
return blockChain.getPOWHeaders(blockNumbers)
|
||||
}
|
||||
|
||||
func (blockChain *BlockChain) GetLogs(contract core.Contract, startingBlockNumber, endingBlockNumber *big.Int) ([]core.Log, error) {
|
||||
func (blockChain *BlockChain) GetFullSyncLogs(contract core.Contract, startingBlockNumber, endingBlockNumber *big.Int) ([]core.FullSyncLog, error) {
|
||||
if endingBlockNumber == nil {
|
||||
endingBlockNumber = startingBlockNumber
|
||||
}
|
||||
@ -99,9 +99,9 @@ func (blockChain *BlockChain) GetLogs(contract core.Contract, startingBlockNumbe
|
||||
}
|
||||
gethLogs, err := blockChain.GetEthLogsWithCustomQuery(fc)
|
||||
if err != nil {
|
||||
return []core.Log{}, err
|
||||
return []core.FullSyncLog{}, err
|
||||
}
|
||||
logs := vulcCommon.ToCoreLogs(gethLogs)
|
||||
logs := vulcCommon.ToFullSyncLogs(gethLogs)
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
|
@ -154,7 +154,7 @@ var _ = Describe("Geth blockchain", func() {
|
||||
startingBlockNumber := big.NewInt(1)
|
||||
endingBlockNumber := big.NewInt(2)
|
||||
|
||||
_, err := blockChain.GetLogs(contract, startingBlockNumber, endingBlockNumber)
|
||||
_, err := blockChain.GetFullSyncLogs(contract, startingBlockNumber, endingBlockNumber)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectedQuery := ethereum.FilterQuery{
|
||||
@ -171,7 +171,7 @@ var _ = Describe("Geth blockchain", func() {
|
||||
startingBlockNumber := big.NewInt(1)
|
||||
endingBlockNumber := big.NewInt(2)
|
||||
|
||||
_, err := blockChain.GetLogs(contract, startingBlockNumber, endingBlockNumber)
|
||||
_, err := blockChain.GetFullSyncLogs(contract, startingBlockNumber, endingBlockNumber)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
|
@ -26,8 +26,8 @@ import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
func ToCoreLogs(gethLogs []types.Log) []core.Log {
|
||||
var logs []core.Log
|
||||
func ToFullSyncLogs(gethLogs []types.Log) []core.FullSyncLog {
|
||||
var logs []core.FullSyncLog
|
||||
for _, log := range gethLogs {
|
||||
log := ToCoreLog(log)
|
||||
logs = append(logs, log)
|
||||
@ -43,10 +43,10 @@ func makeTopics(topics []common.Hash) core.Topics {
|
||||
return hexTopics
|
||||
}
|
||||
|
||||
func ToCoreLog(gethLog types.Log) core.Log {
|
||||
func ToCoreLog(gethLog types.Log) core.FullSyncLog {
|
||||
topics := gethLog.Topics
|
||||
hexTopics := makeTopics(topics)
|
||||
return core.Log{
|
||||
return core.FullSyncLog{
|
||||
Address: strings.ToLower(gethLog.Address.Hex()),
|
||||
BlockNumber: int64(gethLog.BlockNumber),
|
||||
Topics: hexTopics,
|
@ -29,7 +29,7 @@ import (
|
||||
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
|
||||
)
|
||||
|
||||
var _ = Describe("Conversion of GethLog to core.Log", func() {
|
||||
var _ = Describe("Conversion of GethLog to core.FullSyncLog", func() {
|
||||
|
||||
It("converts geth log to internal log format", func() {
|
||||
gethLog := types.Log{
|
||||
@ -46,7 +46,7 @@ var _ = Describe("Conversion of GethLog to core.Log", func() {
|
||||
},
|
||||
}
|
||||
|
||||
expected := core.Log{
|
||||
expected := core.FullSyncLog{
|
||||
Address: strings.ToLower(gethLog.Address.Hex()),
|
||||
BlockNumber: int64(gethLog.BlockNumber),
|
||||
Data: hexutil.Encode(gethLog.Data),
|
||||
@ -101,7 +101,7 @@ var _ = Describe("Conversion of GethLog to core.Log", func() {
|
||||
expectedOne := vulcCommon.ToCoreLog(gethLogOne)
|
||||
expectedTwo := vulcCommon.ToCoreLog(gethLogTwo)
|
||||
|
||||
coreLogs := vulcCommon.ToCoreLogs([]types.Log{gethLogOne, gethLogTwo})
|
||||
coreLogs := vulcCommon.ToFullSyncLogs([]types.Log{gethLogOne, gethLogTwo})
|
||||
|
||||
Expect(len(coreLogs)).To(Equal(2))
|
||||
Expect(coreLogs[0]).To(Equal(expectedOne))
|
@ -73,8 +73,8 @@ func setContractAddress(gethReceipt *types.Receipt) string {
|
||||
return gethReceipt.ContractAddress.Hex()
|
||||
}
|
||||
|
||||
func dereferenceLogs(gethReceipt *types.Receipt) []core.Log {
|
||||
logs := []core.Log{}
|
||||
func dereferenceLogs(gethReceipt *types.Receipt) []core.FullSyncLog {
|
||||
logs := []core.FullSyncLog{}
|
||||
for _, log := range gethReceipt.Logs {
|
||||
logs = append(logs, ToCoreLog(*log))
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
|
||||
ContractAddress: "",
|
||||
CumulativeGasUsed: 25000,
|
||||
GasUsed: 21000,
|
||||
Logs: []core.Log{},
|
||||
Logs: []core.FullSyncLog{},
|
||||
StateRoot: "0x88abf7e73128227370aa7baa3dd4e18d0af70e92ef1f9ef426942fbe2dddb733",
|
||||
Status: -99,
|
||||
TxHash: receipt.TxHash.Hex(),
|
||||
@ -92,7 +92,7 @@ var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
|
||||
ContractAddress: receipt.ContractAddress.Hex(),
|
||||
CumulativeGasUsed: 7996119,
|
||||
GasUsed: 21000,
|
||||
Logs: []core.Log{},
|
||||
Logs: []core.FullSyncLog{},
|
||||
StateRoot: "",
|
||||
Status: 1,
|
||||
TxHash: receipt.TxHash.Hex(),
|
||||
|
@ -19,16 +19,14 @@ package test_config
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||
"os"
|
||||
)
|
||||
|
||||
var TestConfig *viper.Viper
|
||||
@ -50,7 +48,7 @@ func setTestConfig() {
|
||||
TestConfig.AddConfigPath("$GOPATH/src/github.com/vulcanize/vulcanizedb/environments/")
|
||||
err := TestConfig.ReadInConfig()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
ipc := TestConfig.GetString("client.ipcPath")
|
||||
hn := TestConfig.GetString("database.hostname")
|
||||
@ -73,7 +71,7 @@ func setInfuraConfig() {
|
||||
Infura.AddConfigPath("$GOPATH/src/github.com/vulcanize/vulcanizedb/environments/")
|
||||
err := Infura.ReadInConfig()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
ipc := Infura.GetString("client.ipcpath")
|
||||
|
||||
@ -83,7 +81,7 @@ func setInfuraConfig() {
|
||||
ipc = Infura.GetString("url")
|
||||
}
|
||||
if ipc == "" {
|
||||
log.Fatal(errors.New("infura.toml IPC path or $INFURA_URL env variable need to be set"))
|
||||
logrus.Fatal(errors.New("infura.toml IPC path or $INFURA_URL env variable need to be set"))
|
||||
}
|
||||
|
||||
InfuraClient = config.Client{
|
||||
|
Loading…
Reference in New Issue
Block a user