Merge pull request #131 from vulcanize/vdb-699-logs-table
(VDB-699) Decouple Log extraction vs delegation to transformers
This commit is contained in:
commit
2b151c2ba3
@ -170,7 +170,10 @@ func composeAndExecute() {
|
|||||||
var wg syn.WaitGroup
|
var wg syn.WaitGroup
|
||||||
if len(ethEventInitializers) > 0 {
|
if len(ethEventInitializers) > 0 {
|
||||||
ew := watcher.NewEventWatcher(&db, blockChain)
|
ew := watcher.NewEventWatcher(&db, blockChain)
|
||||||
ew.AddTransformers(ethEventInitializers)
|
err := ew.AddTransformers(ethEventInitializers)
|
||||||
|
if err != nil {
|
||||||
|
LogWithCommand.Fatalf("failed to add event transformer initializers to watcher: %s", err.Error())
|
||||||
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go watchEthEvents(&ew, &wg)
|
go watchEthEvents(&ew, &wg)
|
||||||
}
|
}
|
||||||
|
@ -114,7 +114,10 @@ func execute() {
|
|||||||
var wg syn.WaitGroup
|
var wg syn.WaitGroup
|
||||||
if len(ethEventInitializers) > 0 {
|
if len(ethEventInitializers) > 0 {
|
||||||
ew := watcher.NewEventWatcher(&db, blockChain)
|
ew := watcher.NewEventWatcher(&db, blockChain)
|
||||||
ew.AddTransformers(ethEventInitializers)
|
err = ew.AddTransformers(ethEventInitializers)
|
||||||
|
if err != nil {
|
||||||
|
LogWithCommand.Fatalf("failed to add event transformer initializers to watcher: %s", err.Error())
|
||||||
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go watchEthEvents(&ew, &wg)
|
go watchEthEvents(&ew, &wg)
|
||||||
}
|
}
|
||||||
@ -155,12 +158,11 @@ func watchEthEvents(w *watcher.EventWatcher, wg *syn.WaitGroup) {
|
|||||||
if recheckHeadersArg {
|
if recheckHeadersArg {
|
||||||
recheck = constants.HeaderRecheck
|
recheck = constants.HeaderRecheck
|
||||||
} else {
|
} else {
|
||||||
recheck = constants.HeaderMissing
|
recheck = constants.HeaderUnchecked
|
||||||
}
|
}
|
||||||
ticker := time.NewTicker(pollingInterval)
|
err := w.Execute(recheck)
|
||||||
defer ticker.Stop()
|
if err != nil {
|
||||||
for range ticker.C {
|
LogWithCommand.Fatalf("error executing event watcher: %s", err.Error())
|
||||||
w.Execute(recheck)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
18
db/migrations/00007_create_full_sync_logs_table.sql
Normal file
18
db/migrations/00007_create_full_sync_logs_table.sql
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE full_sync_logs
|
||||||
|
(
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
block_number BIGINT,
|
||||||
|
address VARCHAR(66),
|
||||||
|
tx_hash VARCHAR(66),
|
||||||
|
index BIGINT,
|
||||||
|
topic0 VARCHAR(66),
|
||||||
|
topic1 VARCHAR(66),
|
||||||
|
topic2 VARCHAR(66),
|
||||||
|
topic3 VARCHAR(66),
|
||||||
|
data TEXT
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE full_sync_logs;
|
@ -1,19 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE logs (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
block_number BIGINT,
|
|
||||||
address VARCHAR(66),
|
|
||||||
tx_hash VARCHAR(66),
|
|
||||||
index BIGINT,
|
|
||||||
topic0 VARCHAR(66),
|
|
||||||
topic1 VARCHAR(66),
|
|
||||||
topic2 VARCHAR(66),
|
|
||||||
topic3 VARCHAR(66),
|
|
||||||
data TEXT,
|
|
||||||
CONSTRAINT log_uc UNIQUE (block_number, index)
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE logs;
|
|
@ -1,23 +1,17 @@
|
|||||||
-- +goose Up
|
-- +goose Up
|
||||||
ALTER TABLE logs
|
ALTER TABLE full_sync_logs
|
||||||
DROP CONSTRAINT log_uc;
|
|
||||||
|
|
||||||
ALTER TABLE logs
|
|
||||||
ADD COLUMN receipt_id INT;
|
ADD COLUMN receipt_id INT;
|
||||||
|
|
||||||
ALTER TABLE logs
|
ALTER TABLE full_sync_logs
|
||||||
ADD CONSTRAINT receipts_fk
|
ADD CONSTRAINT receipts_fk
|
||||||
FOREIGN KEY (receipt_id)
|
FOREIGN KEY (receipt_id)
|
||||||
REFERENCES full_sync_receipts (id)
|
REFERENCES full_sync_receipts (id)
|
||||||
ON DELETE CASCADE;
|
ON DELETE CASCADE;
|
||||||
|
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
ALTER TABLE logs
|
ALTER TABLE full_sync_logs
|
||||||
DROP CONSTRAINT receipts_fk;
|
DROP CONSTRAINT receipts_fk;
|
||||||
|
|
||||||
ALTER TABLE logs
|
ALTER TABLE full_sync_logs
|
||||||
DROP COLUMN receipt_id;
|
DROP COLUMN receipt_id;
|
||||||
|
|
||||||
ALTER TABLE logs
|
|
||||||
ADD CONSTRAINT log_uc UNIQUE (block_number, index);
|
|
||||||
|
@ -1,33 +1,31 @@
|
|||||||
-- +goose Up
|
-- +goose Up
|
||||||
CREATE VIEW block_stats AS
|
CREATE VIEW block_stats AS
|
||||||
SELECT
|
SELECT max(block_number) AS max_block,
|
||||||
max(block_number) AS max_block,
|
|
||||||
min(block_number) AS min_block
|
min(block_number) AS min_block
|
||||||
FROM logs;
|
FROM full_sync_logs;
|
||||||
|
|
||||||
CREATE VIEW watched_event_logs AS
|
CREATE VIEW watched_event_logs AS
|
||||||
SELECT
|
SELECT log_filters.name,
|
||||||
log_filters.name,
|
full_sync_logs.id,
|
||||||
logs.id,
|
|
||||||
block_number,
|
block_number,
|
||||||
logs.address,
|
full_sync_logs.address,
|
||||||
tx_hash,
|
tx_hash,
|
||||||
index,
|
index,
|
||||||
logs.topic0,
|
full_sync_logs.topic0,
|
||||||
logs.topic1,
|
full_sync_logs.topic1,
|
||||||
logs.topic2,
|
full_sync_logs.topic2,
|
||||||
logs.topic3,
|
full_sync_logs.topic3,
|
||||||
data,
|
data,
|
||||||
receipt_id
|
receipt_id
|
||||||
FROM log_filters
|
FROM log_filters
|
||||||
CROSS JOIN block_stats
|
CROSS JOIN block_stats
|
||||||
JOIN logs ON logs.address = log_filters.address
|
JOIN full_sync_logs ON full_sync_logs.address = log_filters.address
|
||||||
AND logs.block_number >= coalesce(log_filters.from_block, block_stats.min_block)
|
AND full_sync_logs.block_number >= coalesce(log_filters.from_block, block_stats.min_block)
|
||||||
AND logs.block_number <= coalesce(log_filters.to_block, block_stats.max_block)
|
AND full_sync_logs.block_number <= coalesce(log_filters.to_block, block_stats.max_block)
|
||||||
WHERE (log_filters.topic0 = logs.topic0 OR log_filters.topic0 ISNULL)
|
WHERE (log_filters.topic0 = full_sync_logs.topic0 OR log_filters.topic0 ISNULL)
|
||||||
AND (log_filters.topic1 = logs.topic1 OR log_filters.topic1 ISNULL)
|
AND (log_filters.topic1 = full_sync_logs.topic1 OR log_filters.topic1 ISNULL)
|
||||||
AND (log_filters.topic2 = logs.topic2 OR log_filters.topic2 ISNULL)
|
AND (log_filters.topic2 = full_sync_logs.topic2 OR log_filters.topic2 ISNULL)
|
||||||
AND (log_filters.topic3 = logs.topic3 OR log_filters.topic3 ISNULL);
|
AND (log_filters.topic3 = full_sync_logs.topic3 OR log_filters.topic3 ISNULL);
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
DROP VIEW watched_event_logs;
|
DROP VIEW watched_event_logs;
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
-- +goose Up
|
-- +goose Up
|
||||||
CREATE TABLE public.headers (
|
CREATE TABLE public.headers
|
||||||
|
(
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
hash VARCHAR(66),
|
hash VARCHAR(66),
|
||||||
block_number BIGINT,
|
block_number BIGINT,
|
||||||
raw JSONB,
|
raw JSONB,
|
||||||
block_timestamp NUMERIC,
|
block_timestamp NUMERIC,
|
||||||
|
check_count INTEGER NOT NULL DEFAULT 0,
|
||||||
eth_node_id INTEGER NOT NULL REFERENCES eth_nodes (id) ON DELETE CASCADE,
|
eth_node_id INTEGER NOT NULL REFERENCES eth_nodes (id) ON DELETE CASCADE,
|
||||||
eth_node_fingerprint VARCHAR(128)
|
eth_node_fingerprint VARCHAR(128)
|
||||||
);
|
);
|
||||||
|
22
db/migrations/00029_create_header_sync_logs_table.sql
Normal file
22
db/migrations/00029_create_header_sync_logs_table.sql
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
-- +goose Up
|
||||||
|
-- SQL in this section is executed when the migration is applied.
|
||||||
|
CREATE TABLE header_sync_logs
|
||||||
|
(
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
|
||||||
|
address INTEGER NOT NULL REFERENCES addresses (id) ON DELETE CASCADE,
|
||||||
|
topics BYTEA[],
|
||||||
|
data BYTEA,
|
||||||
|
block_number BIGINT,
|
||||||
|
block_hash VARCHAR(66),
|
||||||
|
tx_hash VARCHAR(66),
|
||||||
|
tx_index INTEGER,
|
||||||
|
log_index INTEGER,
|
||||||
|
raw JSONB,
|
||||||
|
transformed BOOL NOT NULL DEFAULT FALSE,
|
||||||
|
UNIQUE (header_id, tx_index, log_index)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
-- SQL in this section is executed when the migration is rolled back.
|
||||||
|
DROP TABLE header_sync_logs;
|
12
db/migrations/00030_create_watched_logs_table.sql
Normal file
12
db/migrations/00030_create_watched_logs_table.sql
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
-- +goose Up
|
||||||
|
-- SQL in this section is executed when the migration is applied.
|
||||||
|
CREATE TABLE public.watched_logs
|
||||||
|
(
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
contract_address VARCHAR(42),
|
||||||
|
topic_zero VARCHAR(66)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
-- SQL in this section is executed when the migration is rolled back.
|
||||||
|
DROP TABLE public.watched_logs;
|
240
db/schema.sql
240
db/schema.sql
@ -2,8 +2,8 @@
|
|||||||
-- PostgreSQL database dump
|
-- PostgreSQL database dump
|
||||||
--
|
--
|
||||||
|
|
||||||
-- Dumped from database version 11.3
|
-- Dumped from database version 11.4
|
||||||
-- Dumped by pg_dump version 11.3
|
-- Dumped by pg_dump version 11.4
|
||||||
|
|
||||||
SET statement_timeout = 0;
|
SET statement_timeout = 0;
|
||||||
SET lock_timeout = 0;
|
SET lock_timeout = 0;
|
||||||
@ -51,10 +51,10 @@ ALTER SEQUENCE public.addresses_id_seq OWNED BY public.addresses.id;
|
|||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: logs; Type: TABLE; Schema: public; Owner: -
|
-- Name: full_sync_logs; Type: TABLE; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
|
|
||||||
CREATE TABLE public.logs (
|
CREATE TABLE public.full_sync_logs (
|
||||||
id integer NOT NULL,
|
id integer NOT NULL,
|
||||||
block_number bigint,
|
block_number bigint,
|
||||||
address character varying(66),
|
address character varying(66),
|
||||||
@ -74,9 +74,9 @@ CREATE TABLE public.logs (
|
|||||||
--
|
--
|
||||||
|
|
||||||
CREATE VIEW public.block_stats AS
|
CREATE VIEW public.block_stats AS
|
||||||
SELECT max(logs.block_number) AS max_block,
|
SELECT max(full_sync_logs.block_number) AS max_block,
|
||||||
min(logs.block_number) AS min_block
|
min(full_sync_logs.block_number) AS min_block
|
||||||
FROM public.logs;
|
FROM public.full_sync_logs;
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
@ -168,6 +168,26 @@ CREATE TABLE public.eth_nodes (
|
|||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: full_sync_logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE SEQUENCE public.full_sync_logs_id_seq
|
||||||
|
AS integer
|
||||||
|
START WITH 1
|
||||||
|
INCREMENT BY 1
|
||||||
|
NO MINVALUE
|
||||||
|
NO MAXVALUE
|
||||||
|
CACHE 1;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: full_sync_logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER SEQUENCE public.full_sync_logs_id_seq OWNED BY public.full_sync_logs.id;
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: full_sync_receipts; Type: TABLE; Schema: public; Owner: -
|
-- Name: full_sync_receipts; Type: TABLE; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -276,6 +296,46 @@ CREATE SEQUENCE public.goose_db_version_id_seq
|
|||||||
ALTER SEQUENCE public.goose_db_version_id_seq OWNED BY public.goose_db_version.id;
|
ALTER SEQUENCE public.goose_db_version_id_seq OWNED BY public.goose_db_version.id;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: header_sync_logs; Type: TABLE; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE TABLE public.header_sync_logs (
|
||||||
|
id integer NOT NULL,
|
||||||
|
header_id integer NOT NULL,
|
||||||
|
address integer NOT NULL,
|
||||||
|
topics bytea[],
|
||||||
|
data bytea,
|
||||||
|
block_number bigint,
|
||||||
|
block_hash character varying(66),
|
||||||
|
tx_hash character varying(66),
|
||||||
|
tx_index integer,
|
||||||
|
log_index integer,
|
||||||
|
raw jsonb,
|
||||||
|
transformed boolean DEFAULT false NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: header_sync_logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE SEQUENCE public.header_sync_logs_id_seq
|
||||||
|
AS integer
|
||||||
|
START WITH 1
|
||||||
|
INCREMENT BY 1
|
||||||
|
NO MINVALUE
|
||||||
|
NO MAXVALUE
|
||||||
|
CACHE 1;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: header_sync_logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER SEQUENCE public.header_sync_logs_id_seq OWNED BY public.header_sync_logs.id;
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: header_sync_receipts; Type: TABLE; Schema: public; Owner: -
|
-- Name: header_sync_receipts; Type: TABLE; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -364,6 +424,7 @@ CREATE TABLE public.headers (
|
|||||||
block_number bigint,
|
block_number bigint,
|
||||||
raw jsonb,
|
raw jsonb,
|
||||||
block_timestamp numeric,
|
block_timestamp numeric,
|
||||||
|
check_count integer DEFAULT 0 NOT NULL,
|
||||||
eth_node_id integer NOT NULL,
|
eth_node_id integer NOT NULL,
|
||||||
eth_node_fingerprint character varying(128)
|
eth_node_fingerprint character varying(128)
|
||||||
);
|
);
|
||||||
@ -429,26 +490,6 @@ CREATE SEQUENCE public.log_filters_id_seq
|
|||||||
ALTER SEQUENCE public.log_filters_id_seq OWNED BY public.log_filters.id;
|
ALTER SEQUENCE public.log_filters_id_seq OWNED BY public.log_filters.id;
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
|
||||||
--
|
|
||||||
|
|
||||||
CREATE SEQUENCE public.logs_id_seq
|
|
||||||
AS integer
|
|
||||||
START WITH 1
|
|
||||||
INCREMENT BY 1
|
|
||||||
NO MINVALUE
|
|
||||||
NO MAXVALUE
|
|
||||||
CACHE 1;
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER SEQUENCE public.logs_id_seq OWNED BY public.logs.id;
|
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: nodes_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
-- Name: nodes_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -577,21 +618,52 @@ ALTER SEQUENCE public.watched_contracts_contract_id_seq OWNED BY public.watched_
|
|||||||
|
|
||||||
CREATE VIEW public.watched_event_logs AS
|
CREATE VIEW public.watched_event_logs AS
|
||||||
SELECT log_filters.name,
|
SELECT log_filters.name,
|
||||||
logs.id,
|
full_sync_logs.id,
|
||||||
logs.block_number,
|
full_sync_logs.block_number,
|
||||||
logs.address,
|
full_sync_logs.address,
|
||||||
logs.tx_hash,
|
full_sync_logs.tx_hash,
|
||||||
logs.index,
|
full_sync_logs.index,
|
||||||
logs.topic0,
|
full_sync_logs.topic0,
|
||||||
logs.topic1,
|
full_sync_logs.topic1,
|
||||||
logs.topic2,
|
full_sync_logs.topic2,
|
||||||
logs.topic3,
|
full_sync_logs.topic3,
|
||||||
logs.data,
|
full_sync_logs.data,
|
||||||
logs.receipt_id
|
full_sync_logs.receipt_id
|
||||||
FROM ((public.log_filters
|
FROM ((public.log_filters
|
||||||
CROSS JOIN public.block_stats)
|
CROSS JOIN public.block_stats)
|
||||||
JOIN public.logs ON ((((logs.address)::text = (log_filters.address)::text) AND (logs.block_number >= COALESCE(log_filters.from_block, block_stats.min_block)) AND (logs.block_number <= COALESCE(log_filters.to_block, block_stats.max_block)))))
|
JOIN public.full_sync_logs ON ((((full_sync_logs.address)::text = (log_filters.address)::text) AND (full_sync_logs.block_number >= COALESCE(log_filters.from_block, block_stats.min_block)) AND (full_sync_logs.block_number <= COALESCE(log_filters.to_block, block_stats.max_block)))))
|
||||||
WHERE ((((log_filters.topic0)::text = (logs.topic0)::text) OR (log_filters.topic0 IS NULL)) AND (((log_filters.topic1)::text = (logs.topic1)::text) OR (log_filters.topic1 IS NULL)) AND (((log_filters.topic2)::text = (logs.topic2)::text) OR (log_filters.topic2 IS NULL)) AND (((log_filters.topic3)::text = (logs.topic3)::text) OR (log_filters.topic3 IS NULL)));
|
WHERE ((((log_filters.topic0)::text = (full_sync_logs.topic0)::text) OR (log_filters.topic0 IS NULL)) AND (((log_filters.topic1)::text = (full_sync_logs.topic1)::text) OR (log_filters.topic1 IS NULL)) AND (((log_filters.topic2)::text = (full_sync_logs.topic2)::text) OR (log_filters.topic2 IS NULL)) AND (((log_filters.topic3)::text = (full_sync_logs.topic3)::text) OR (log_filters.topic3 IS NULL)));
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: watched_logs; Type: TABLE; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE TABLE public.watched_logs (
|
||||||
|
id integer NOT NULL,
|
||||||
|
contract_address character varying(42),
|
||||||
|
topic_zero character varying(66)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: watched_logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
CREATE SEQUENCE public.watched_logs_id_seq
|
||||||
|
AS integer
|
||||||
|
START WITH 1
|
||||||
|
INCREMENT BY 1
|
||||||
|
NO MINVALUE
|
||||||
|
NO MAXVALUE
|
||||||
|
CACHE 1;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: watched_logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER SEQUENCE public.watched_logs_id_seq OWNED BY public.watched_logs.id;
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
@ -622,6 +694,13 @@ ALTER TABLE ONLY public.checked_headers ALTER COLUMN id SET DEFAULT nextval('pub
|
|||||||
ALTER TABLE ONLY public.eth_nodes ALTER COLUMN id SET DEFAULT nextval('public.nodes_id_seq'::regclass);
|
ALTER TABLE ONLY public.eth_nodes ALTER COLUMN id SET DEFAULT nextval('public.nodes_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: full_sync_logs id; Type: DEFAULT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY public.full_sync_logs ALTER COLUMN id SET DEFAULT nextval('public.full_sync_logs_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: full_sync_receipts id; Type: DEFAULT; Schema: public; Owner: -
|
-- Name: full_sync_receipts id; Type: DEFAULT; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -643,6 +722,13 @@ ALTER TABLE ONLY public.full_sync_transactions ALTER COLUMN id SET DEFAULT nextv
|
|||||||
ALTER TABLE ONLY public.goose_db_version ALTER COLUMN id SET DEFAULT nextval('public.goose_db_version_id_seq'::regclass);
|
ALTER TABLE ONLY public.goose_db_version ALTER COLUMN id SET DEFAULT nextval('public.goose_db_version_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: header_sync_logs id; Type: DEFAULT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY public.header_sync_logs ALTER COLUMN id SET DEFAULT nextval('public.header_sync_logs_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: header_sync_receipts id; Type: DEFAULT; Schema: public; Owner: -
|
-- Name: header_sync_receipts id; Type: DEFAULT; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -671,13 +757,6 @@ ALTER TABLE ONLY public.headers ALTER COLUMN id SET DEFAULT nextval('public.head
|
|||||||
ALTER TABLE ONLY public.log_filters ALTER COLUMN id SET DEFAULT nextval('public.log_filters_id_seq'::regclass);
|
ALTER TABLE ONLY public.log_filters ALTER COLUMN id SET DEFAULT nextval('public.log_filters_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: logs id; Type: DEFAULT; Schema: public; Owner: -
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.logs ALTER COLUMN id SET DEFAULT nextval('public.logs_id_seq'::regclass);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: queued_storage id; Type: DEFAULT; Schema: public; Owner: -
|
-- Name: queued_storage id; Type: DEFAULT; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -699,6 +778,13 @@ ALTER TABLE ONLY public.uncles ALTER COLUMN id SET DEFAULT nextval('public.uncle
|
|||||||
ALTER TABLE ONLY public.watched_contracts ALTER COLUMN contract_id SET DEFAULT nextval('public.watched_contracts_contract_id_seq'::regclass);
|
ALTER TABLE ONLY public.watched_contracts ALTER COLUMN contract_id SET DEFAULT nextval('public.watched_contracts_contract_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: watched_logs id; Type: DEFAULT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY public.watched_logs ALTER COLUMN id SET DEFAULT nextval('public.watched_logs_id_seq'::regclass);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: addresses addresses_address_key; Type: CONSTRAINT; Schema: public; Owner: -
|
-- Name: addresses addresses_address_key; Type: CONSTRAINT; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -755,6 +841,14 @@ ALTER TABLE ONLY public.eth_nodes
|
|||||||
ADD CONSTRAINT eth_node_uc UNIQUE (genesis_block, network_id, eth_node_id);
|
ADD CONSTRAINT eth_node_uc UNIQUE (genesis_block, network_id, eth_node_id);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: full_sync_logs full_sync_logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY public.full_sync_logs
|
||||||
|
ADD CONSTRAINT full_sync_logs_pkey PRIMARY KEY (id);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: full_sync_receipts full_sync_receipts_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
-- Name: full_sync_receipts full_sync_receipts_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -779,6 +873,22 @@ ALTER TABLE ONLY public.goose_db_version
|
|||||||
ADD CONSTRAINT goose_db_version_pkey PRIMARY KEY (id);
|
ADD CONSTRAINT goose_db_version_pkey PRIMARY KEY (id);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: header_sync_logs header_sync_logs_header_id_tx_index_log_index_key; Type: CONSTRAINT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY public.header_sync_logs
|
||||||
|
ADD CONSTRAINT header_sync_logs_header_id_tx_index_log_index_key UNIQUE (header_id, tx_index, log_index);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: header_sync_logs header_sync_logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY public.header_sync_logs
|
||||||
|
ADD CONSTRAINT header_sync_logs_pkey PRIMARY KEY (id);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: header_sync_receipts header_sync_receipts_header_id_transaction_id_key; Type: CONSTRAINT; Schema: public; Owner: -
|
-- Name: header_sync_receipts header_sync_receipts_header_id_transaction_id_key; Type: CONSTRAINT; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -819,14 +929,6 @@ ALTER TABLE ONLY public.headers
|
|||||||
ADD CONSTRAINT headers_pkey PRIMARY KEY (id);
|
ADD CONSTRAINT headers_pkey PRIMARY KEY (id);
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: logs logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.logs
|
|
||||||
ADD CONSTRAINT logs_pkey PRIMARY KEY (id);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: log_filters name_uc; Type: CONSTRAINT; Schema: public; Owner: -
|
-- Name: log_filters name_uc; Type: CONSTRAINT; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -891,6 +993,14 @@ ALTER TABLE ONLY public.watched_contracts
|
|||||||
ADD CONSTRAINT watched_contracts_pkey PRIMARY KEY (contract_id);
|
ADD CONSTRAINT watched_contracts_pkey PRIMARY KEY (contract_id);
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: watched_logs watched_logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY public.watched_logs
|
||||||
|
ADD CONSTRAINT watched_logs_pkey PRIMARY KEY (id);
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: block_id_index; Type: INDEX; Schema: public; Owner: -
|
-- Name: block_id_index; Type: INDEX; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -965,6 +1075,22 @@ ALTER TABLE ONLY public.full_sync_transactions
|
|||||||
ADD CONSTRAINT full_sync_transactions_block_id_fkey FOREIGN KEY (block_id) REFERENCES public.blocks(id) ON DELETE CASCADE;
|
ADD CONSTRAINT full_sync_transactions_block_id_fkey FOREIGN KEY (block_id) REFERENCES public.blocks(id) ON DELETE CASCADE;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: header_sync_logs header_sync_logs_address_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY public.header_sync_logs
|
||||||
|
ADD CONSTRAINT header_sync_logs_address_fkey FOREIGN KEY (address) REFERENCES public.addresses(id) ON DELETE CASCADE;
|
||||||
|
|
||||||
|
|
||||||
|
--
|
||||||
|
-- Name: header_sync_logs header_sync_logs_header_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||||
|
--
|
||||||
|
|
||||||
|
ALTER TABLE ONLY public.header_sync_logs
|
||||||
|
ADD CONSTRAINT header_sync_logs_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: header_sync_receipts header_sync_receipts_contract_address_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
|
-- Name: header_sync_receipts header_sync_receipts_contract_address_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
@ -1014,10 +1140,10 @@ ALTER TABLE ONLY public.blocks
|
|||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
-- Name: logs receipts_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
|
-- Name: full_sync_logs receipts_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||||
--
|
--
|
||||||
|
|
||||||
ALTER TABLE ONLY public.logs
|
ALTER TABLE ONLY public.full_sync_logs
|
||||||
ADD CONSTRAINT receipts_fk FOREIGN KEY (receipt_id) REFERENCES public.full_sync_receipts(id) ON DELETE CASCADE;
|
ADD CONSTRAINT receipts_fk FOREIGN KEY (receipt_id) REFERENCES public.full_sync_receipts(id) ON DELETE CASCADE;
|
||||||
|
|
||||||
|
|
||||||
|
18
go.mod
18
go.mod
@ -6,28 +6,44 @@ require (
|
|||||||
github.com/allegro/bigcache v1.2.1 // indirect
|
github.com/allegro/bigcache v1.2.1 // indirect
|
||||||
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015 // indirect
|
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015 // indirect
|
||||||
github.com/dave/jennifer v1.3.0
|
github.com/dave/jennifer v1.3.0
|
||||||
github.com/ethereum/go-ethereum v1.9.1
|
github.com/deckarep/golang-set v1.7.1 // indirect
|
||||||
|
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||||
|
github.com/elastic/gosigar v0.10.4 // indirect
|
||||||
|
github.com/ethereum/go-ethereum v1.9.5
|
||||||
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||||
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||||
|
github.com/gorilla/websocket v1.4.1 // indirect
|
||||||
github.com/graph-gophers/graphql-go v0.0.0-20190724201507-010347b5f9e6 // indirect
|
github.com/graph-gophers/graphql-go v0.0.0-20190724201507-010347b5f9e6 // indirect
|
||||||
github.com/hashicorp/golang-lru v0.5.1
|
github.com/hashicorp/golang-lru v0.5.1
|
||||||
github.com/hpcloud/tail v1.0.0
|
github.com/hpcloud/tail v1.0.0
|
||||||
|
github.com/huin/goupnp v1.0.0 // indirect
|
||||||
|
github.com/jackpal/go-nat-pmp v1.0.1 // indirect
|
||||||
github.com/jmoiron/sqlx v0.0.0-20181024163419-82935fac6c1a
|
github.com/jmoiron/sqlx v0.0.0-20181024163419-82935fac6c1a
|
||||||
github.com/karalabe/usb v0.0.0-20190819132248-550797b1cad8 // indirect
|
github.com/karalabe/usb v0.0.0-20190819132248-550797b1cad8 // indirect
|
||||||
github.com/lib/pq v1.0.0
|
github.com/lib/pq v1.0.0
|
||||||
|
github.com/mattn/go-colorable v0.1.2 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.9 // indirect
|
github.com/mattn/go-isatty v0.0.9 // indirect
|
||||||
|
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0
|
github.com/mitchellh/go-homedir v1.1.0
|
||||||
|
github.com/olekukonko/tablewriter v0.0.1 // indirect
|
||||||
github.com/onsi/ginkgo v1.7.0
|
github.com/onsi/ginkgo v1.7.0
|
||||||
github.com/onsi/gomega v1.4.3
|
github.com/onsi/gomega v1.4.3
|
||||||
github.com/pborman/uuid v1.2.0 // indirect
|
github.com/pborman/uuid v1.2.0 // indirect
|
||||||
github.com/pressly/goose v2.6.0+incompatible
|
github.com/pressly/goose v2.6.0+incompatible
|
||||||
github.com/prometheus/tsdb v0.10.0 // indirect
|
github.com/prometheus/tsdb v0.10.0 // indirect
|
||||||
|
github.com/rjeczalik/notify v0.9.2 // indirect
|
||||||
github.com/rs/cors v1.7.0 // indirect
|
github.com/rs/cors v1.7.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.2.0
|
github.com/sirupsen/logrus v1.2.0
|
||||||
github.com/spf13/cobra v0.0.3
|
github.com/spf13/cobra v0.0.3
|
||||||
github.com/spf13/viper v1.3.2
|
github.com/spf13/viper v1.3.2
|
||||||
|
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 // indirect
|
||||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 // indirect
|
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 // indirect
|
||||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect
|
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect
|
||||||
|
github.com/syndtr/goleveldb v1.0.0 // indirect
|
||||||
github.com/tyler-smith/go-bip39 v1.0.2 // indirect
|
github.com/tyler-smith/go-bip39 v1.0.2 // indirect
|
||||||
|
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 // indirect
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||||
|
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190709231704-1e4459ed25ff // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
|
||||||
)
|
)
|
||||||
|
11
go.sum
11
go.sum
@ -50,6 +50,8 @@ github.com/elastic/gosigar v0.10.4/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTy
|
|||||||
github.com/ethereum/go-ethereum v1.9.0/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
|
github.com/ethereum/go-ethereum v1.9.0/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
|
||||||
github.com/ethereum/go-ethereum v1.9.1 h1:MrdTRvKIa3apdx6NW1azzSgl8BQB1eTBVSUmFhuztaU=
|
github.com/ethereum/go-ethereum v1.9.1 h1:MrdTRvKIa3apdx6NW1azzSgl8BQB1eTBVSUmFhuztaU=
|
||||||
github.com/ethereum/go-ethereum v1.9.1/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
|
github.com/ethereum/go-ethereum v1.9.1/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
|
||||||
|
github.com/ethereum/go-ethereum v1.9.5 h1:4oxsF+/3N/sTgda9XTVG4r+wMVLsveziSMcK83hPbsk=
|
||||||
|
github.com/ethereum/go-ethereum v1.9.5/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
|
||||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
@ -60,6 +62,7 @@ github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
|||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic=
|
github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
|
||||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||||
@ -67,9 +70,11 @@ github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
|||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
|
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
|
||||||
@ -78,6 +83,8 @@ github.com/google/uuid v1.1.0 h1:Jf4mxPC/ziBnoPIdpQdPJ9OeiomAUHLvxmPRSPH9m4s=
|
|||||||
github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
|
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||||
|
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/graph-gophers/graphql-go v0.0.0-20190610161739-8f92f34fc598/go.mod h1:Au3iQ8DvDis8hZ4q2OzRcaKYlAsPt+fYvib5q4nIqu4=
|
github.com/graph-gophers/graphql-go v0.0.0-20190610161739-8f92f34fc598/go.mod h1:Au3iQ8DvDis8hZ4q2OzRcaKYlAsPt+fYvib5q4nIqu4=
|
||||||
github.com/graph-gophers/graphql-go v0.0.0-20190724201507-010347b5f9e6 h1:9WiNlI9Cds5S5YITwRpRs8edNaq0nxTEymhDW20A1QE=
|
github.com/graph-gophers/graphql-go v0.0.0-20190724201507-010347b5f9e6 h1:9WiNlI9Cds5S5YITwRpRs8edNaq0nxTEymhDW20A1QE=
|
||||||
github.com/graph-gophers/graphql-go v0.0.0-20190724201507-010347b5f9e6/go.mod h1:Au3iQ8DvDis8hZ4q2OzRcaKYlAsPt+fYvib5q4nIqu4=
|
github.com/graph-gophers/graphql-go v0.0.0-20190724201507-010347b5f9e6/go.mod h1:Au3iQ8DvDis8hZ4q2OzRcaKYlAsPt+fYvib5q4nIqu4=
|
||||||
@ -132,6 +139,7 @@ github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg
|
|||||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
|
github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
|
||||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
|
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
|
||||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
@ -166,6 +174,7 @@ github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
|
|||||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
|
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
@ -226,6 +235,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
|||||||
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||||
github.com/syndtr/goleveldb v0.0.0-20181128100959-b001fa50d6b2 h1:GnOzE5fEFN3b2zDhJJABEofdb51uMRNb8eqIVtdducs=
|
github.com/syndtr/goleveldb v0.0.0-20181128100959-b001fa50d6b2 h1:GnOzE5fEFN3b2zDhJJABEofdb51uMRNb8eqIVtdducs=
|
||||||
github.com/syndtr/goleveldb v0.0.0-20181128100959-b001fa50d6b2/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
github.com/syndtr/goleveldb v0.0.0-20181128100959-b001fa50d6b2/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||||
|
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||||
|
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||||
github.com/tyler-smith/go-bip39 v1.0.0/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
github.com/tyler-smith/go-bip39 v1.0.0/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
||||||
github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8=
|
github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8=
|
||||||
github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
||||||
|
@ -38,7 +38,7 @@ var _ = Describe("Reading contracts", func() {
|
|||||||
|
|
||||||
Describe("Getting a contract attribute", func() {
|
Describe("Getting a contract attribute", func() {
|
||||||
It("retrieves the event log for a specific block and contract", func() {
|
It("retrieves the event log for a specific block and contract", func() {
|
||||||
expectedLogZero := core.Log{
|
expectedLogZero := core.FullSyncLog{
|
||||||
BlockNumber: 4703824,
|
BlockNumber: 4703824,
|
||||||
TxHash: "0xf896bfd1eb539d881a1a31102b78de9f25cd591bf1fe1924b86148c0b205fd5d",
|
TxHash: "0xf896bfd1eb539d881a1a31102b78de9f25cd591bf1fe1924b86148c0b205fd5d",
|
||||||
Address: "0xd26114cd6ee289accf82350c8d8487fedb8a0c07",
|
Address: "0xd26114cd6ee289accf82350c8d8487fedb8a0c07",
|
||||||
@ -59,7 +59,7 @@ var _ = Describe("Reading contracts", func() {
|
|||||||
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
|
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
|
||||||
contract := testing.SampleContract()
|
contract := testing.SampleContract()
|
||||||
|
|
||||||
logs, err := blockChain.GetLogs(contract, big.NewInt(4703824), nil)
|
logs, err := blockChain.GetFullSyncLogs(contract, big.NewInt(4703824), nil)
|
||||||
|
|
||||||
Expect(err).To(BeNil())
|
Expect(err).To(BeNil())
|
||||||
Expect(len(logs)).To(Equal(3))
|
Expect(len(logs)).To(Equal(3))
|
||||||
@ -76,7 +76,7 @@ var _ = Describe("Reading contracts", func() {
|
|||||||
transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
|
transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
|
||||||
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
|
blockChain := geth.NewBlockChain(blockChainClient, rpcClient, node, transactionConverter)
|
||||||
|
|
||||||
logs, err := blockChain.GetLogs(core.Contract{Hash: "0x123"}, big.NewInt(4703824), nil)
|
logs, err := blockChain.GetFullSyncLogs(core.Contract{Hash: "0x123"}, big.NewInt(4703824), nil)
|
||||||
|
|
||||||
Expect(err).To(BeNil())
|
Expect(err).To(BeNil())
|
||||||
Expect(len(logs)).To(Equal(0))
|
Expect(len(logs)).To(Equal(0))
|
||||||
|
@ -17,17 +17,15 @@
|
|||||||
package chunker
|
package chunker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
shared_t "github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Chunker interface {
|
type Chunker interface {
|
||||||
AddConfigs(transformerConfigs []shared_t.EventTransformerConfig)
|
AddConfig(transformerConfig transformer.EventTransformerConfig)
|
||||||
ChunkLogs(logs []types.Log) map[string][]types.Log
|
ChunkLogs(logs []core.HeaderSyncLog) map[string][]core.HeaderSyncLog
|
||||||
}
|
}
|
||||||
|
|
||||||
type LogChunker struct {
|
type LogChunker struct {
|
||||||
@ -44,27 +42,25 @@ func NewLogChunker() *LogChunker {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configures the chunker by adding more addreses and topics to consider.
|
// Configures the chunker by adding one config with more addresses and topics to consider.
|
||||||
func (chunker *LogChunker) AddConfigs(transformerConfigs []shared_t.EventTransformerConfig) {
|
func (chunker *LogChunker) AddConfig(transformerConfig transformer.EventTransformerConfig) {
|
||||||
for _, config := range transformerConfigs {
|
for _, address := range transformerConfig.ContractAddresses {
|
||||||
for _, address := range config.ContractAddresses {
|
|
||||||
var lowerCaseAddress = strings.ToLower(address)
|
var lowerCaseAddress = strings.ToLower(address)
|
||||||
chunker.AddressToNames[lowerCaseAddress] = append(chunker.AddressToNames[lowerCaseAddress], config.TransformerName)
|
chunker.AddressToNames[lowerCaseAddress] = append(chunker.AddressToNames[lowerCaseAddress], transformerConfig.TransformerName)
|
||||||
chunker.NameToTopic0[config.TransformerName] = common.HexToHash(config.Topic)
|
chunker.NameToTopic0[transformerConfig.TransformerName] = common.HexToHash(transformerConfig.Topic)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Goes through an array of logs, associating relevant logs (matching addresses and topic) with transformers
|
// Goes through a slice of logs, associating relevant logs (matching addresses and topic) with transformers
|
||||||
func (chunker *LogChunker) ChunkLogs(logs []types.Log) map[string][]types.Log {
|
func (chunker *LogChunker) ChunkLogs(logs []core.HeaderSyncLog) map[string][]core.HeaderSyncLog {
|
||||||
chunks := map[string][]types.Log{}
|
chunks := map[string][]core.HeaderSyncLog{}
|
||||||
for _, log := range logs {
|
for _, log := range logs {
|
||||||
// Topic0 is not unique to each transformer, also need to consider the contract address
|
// Topic0 is not unique to each transformer, also need to consider the contract address
|
||||||
relevantTransformers := chunker.AddressToNames[strings.ToLower(log.Address.String())]
|
relevantTransformers := chunker.AddressToNames[strings.ToLower(log.Log.Address.Hex())]
|
||||||
|
|
||||||
for _, transformer := range relevantTransformers {
|
for _, t := range relevantTransformers {
|
||||||
if chunker.NameToTopic0[transformer] == log.Topics[0] {
|
if chunker.NameToTopic0[t] == log.Log.Topics[0] {
|
||||||
chunks[transformer] = append(chunks[transformer], log)
|
chunks[t] = append(chunks[t], log)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,38 +21,39 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
chunk "github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
|
chunk "github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
|
||||||
shared_t "github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Log chunker", func() {
|
var _ = Describe("Log chunker", func() {
|
||||||
var (
|
var (
|
||||||
configs []shared_t.EventTransformerConfig
|
|
||||||
chunker *chunk.LogChunker
|
chunker *chunk.LogChunker
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
configA := shared_t.EventTransformerConfig{
|
chunker = chunk.NewLogChunker()
|
||||||
|
|
||||||
|
configA := transformer.EventTransformerConfig{
|
||||||
TransformerName: "TransformerA",
|
TransformerName: "TransformerA",
|
||||||
ContractAddresses: []string{"0x00000000000000000000000000000000000000A1", "0x00000000000000000000000000000000000000A2"},
|
ContractAddresses: []string{"0x00000000000000000000000000000000000000A1", "0x00000000000000000000000000000000000000A2"},
|
||||||
Topic: "0xA",
|
Topic: "0xA",
|
||||||
}
|
}
|
||||||
configB := shared_t.EventTransformerConfig{
|
chunker.AddConfig(configA)
|
||||||
|
|
||||||
|
configB := transformer.EventTransformerConfig{
|
||||||
TransformerName: "TransformerB",
|
TransformerName: "TransformerB",
|
||||||
ContractAddresses: []string{"0x00000000000000000000000000000000000000B1"},
|
ContractAddresses: []string{"0x00000000000000000000000000000000000000B1"},
|
||||||
Topic: "0xB",
|
Topic: "0xB",
|
||||||
}
|
}
|
||||||
|
chunker.AddConfig(configB)
|
||||||
|
|
||||||
configC := shared_t.EventTransformerConfig{
|
configC := transformer.EventTransformerConfig{
|
||||||
TransformerName: "TransformerC",
|
TransformerName: "TransformerC",
|
||||||
ContractAddresses: []string{"0x00000000000000000000000000000000000000A2"},
|
ContractAddresses: []string{"0x00000000000000000000000000000000000000A2"},
|
||||||
Topic: "0xC",
|
Topic: "0xC",
|
||||||
}
|
}
|
||||||
|
chunker.AddConfig(configC)
|
||||||
configs = []shared_t.EventTransformerConfig{configA, configB, configC}
|
|
||||||
chunker = chunk.NewLogChunker()
|
|
||||||
chunker.AddConfigs(configs)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("initialisation", func() {
|
Describe("initialisation", func() {
|
||||||
@ -71,26 +72,26 @@ var _ = Describe("Log chunker", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("AddConfigs", func() {
|
Describe("AddConfig", func() {
|
||||||
It("can add more configs later", func() {
|
It("can add more configs later", func() {
|
||||||
configD := shared_t.EventTransformerConfig{
|
configD := transformer.EventTransformerConfig{
|
||||||
TransformerName: "TransformerD",
|
TransformerName: "TransformerD",
|
||||||
ContractAddresses: []string{"0x000000000000000000000000000000000000000D"},
|
ContractAddresses: []string{"0x000000000000000000000000000000000000000D"},
|
||||||
Topic: "0xD",
|
Topic: "0xD",
|
||||||
}
|
}
|
||||||
chunker.AddConfigs([]shared_t.EventTransformerConfig{configD})
|
chunker.AddConfig(configD)
|
||||||
|
|
||||||
Expect(chunker.AddressToNames).To(ContainElement([]string{"TransformerD"}))
|
Expect(chunker.AddressToNames).To(ContainElement([]string{"TransformerD"}))
|
||||||
Expect(chunker.NameToTopic0).To(ContainElement(common.HexToHash("0xD")))
|
Expect(chunker.NameToTopic0).To(ContainElement(common.HexToHash("0xD")))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("lower cases address", func() {
|
It("lower cases address", func() {
|
||||||
configD := shared_t.EventTransformerConfig{
|
configD := transformer.EventTransformerConfig{
|
||||||
TransformerName: "TransformerD",
|
TransformerName: "TransformerD",
|
||||||
ContractAddresses: []string{"0x000000000000000000000000000000000000000D"},
|
ContractAddresses: []string{"0x000000000000000000000000000000000000000D"},
|
||||||
Topic: "0xD",
|
Topic: "0xD",
|
||||||
}
|
}
|
||||||
chunker.AddConfigs([]shared_t.EventTransformerConfig{configD})
|
chunker.AddConfig(configD)
|
||||||
|
|
||||||
Expect(chunker.AddressToNames["0x000000000000000000000000000000000000000d"]).To(Equal([]string{"TransformerD"}))
|
Expect(chunker.AddressToNames["0x000000000000000000000000000000000000000d"]).To(Equal([]string{"TransformerD"}))
|
||||||
})
|
})
|
||||||
@ -98,7 +99,7 @@ var _ = Describe("Log chunker", func() {
|
|||||||
|
|
||||||
Describe("ChunkLogs", func() {
|
Describe("ChunkLogs", func() {
|
||||||
It("only associates logs with relevant topic0 and address to transformers", func() {
|
It("only associates logs with relevant topic0 and address to transformers", func() {
|
||||||
logs := []types.Log{log1, log2, log3, log4, log5}
|
logs := []core.HeaderSyncLog{log1, log2, log3, log4, log5}
|
||||||
chunks := chunker.ChunkLogs(logs)
|
chunks := chunker.ChunkLogs(logs)
|
||||||
|
|
||||||
Expect(chunks["TransformerA"]).To(And(ContainElement(log1), ContainElement(log4)))
|
Expect(chunks["TransformerA"]).To(And(ContainElement(log1), ContainElement(log4)))
|
||||||
@ -110,43 +111,53 @@ var _ = Describe("Log chunker", func() {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// Match TransformerA
|
// Match TransformerA
|
||||||
log1 = types.Log{
|
log1 = core.HeaderSyncLog{
|
||||||
|
Log: types.Log{
|
||||||
Address: common.HexToAddress("0xA1"),
|
Address: common.HexToAddress("0xA1"),
|
||||||
Topics: []common.Hash{
|
Topics: []common.Hash{
|
||||||
common.HexToHash("0xA"),
|
common.HexToHash("0xA"),
|
||||||
common.HexToHash("0xLogTopic1"),
|
common.HexToHash("0xLogTopic1"),
|
||||||
},
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
// Match TransformerA address, but not topic0
|
// Match TransformerA address, but not topic0
|
||||||
log2 = types.Log{
|
log2 = core.HeaderSyncLog{
|
||||||
|
Log: types.Log{
|
||||||
Address: common.HexToAddress("0xA1"),
|
Address: common.HexToAddress("0xA1"),
|
||||||
Topics: []common.Hash{
|
Topics: []common.Hash{
|
||||||
common.HexToHash("0xB"),
|
common.HexToHash("0xB"),
|
||||||
common.HexToHash("0xLogTopic2"),
|
common.HexToHash("0xLogTopic2"),
|
||||||
},
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
// Match TransformerA topic, but TransformerB address
|
// Match TransformerA topic, but TransformerB address
|
||||||
log3 = types.Log{
|
log3 = core.HeaderSyncLog{
|
||||||
|
Log: types.Log{
|
||||||
Address: common.HexToAddress("0xB1"),
|
Address: common.HexToAddress("0xB1"),
|
||||||
Topics: []common.Hash{
|
Topics: []common.Hash{
|
||||||
common.HexToHash("0xA"),
|
common.HexToHash("0xA"),
|
||||||
common.HexToHash("0xLogTopic3"),
|
common.HexToHash("0xLogTopic3"),
|
||||||
},
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
// Match TransformerA, with the other address
|
// Match TransformerA, with the other address
|
||||||
log4 = types.Log{
|
log4 = core.HeaderSyncLog{
|
||||||
|
Log: types.Log{
|
||||||
Address: common.HexToAddress("0xA2"),
|
Address: common.HexToAddress("0xA2"),
|
||||||
Topics: []common.Hash{
|
Topics: []common.Hash{
|
||||||
common.HexToHash("0xA"),
|
common.HexToHash("0xA"),
|
||||||
common.HexToHash("0xLogTopic4"),
|
common.HexToHash("0xLogTopic4"),
|
||||||
},
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
// Match TransformerC, which shares address with TransformerA
|
// Match TransformerC, which shares address with TransformerA
|
||||||
log5 = types.Log{
|
log5 = core.HeaderSyncLog{
|
||||||
|
Log: types.Log{
|
||||||
Address: common.HexToAddress("0xA2"),
|
Address: common.HexToAddress("0xA2"),
|
||||||
Topics: []common.Hash{
|
Topics: []common.Hash{
|
||||||
common.HexToHash("0xC"),
|
common.HexToHash("0xC"),
|
||||||
common.HexToHash("0xLogTopic5"),
|
common.HexToHash("0xLogTopic5"),
|
||||||
},
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
@ -20,6 +20,6 @@ type TransformerExecution bool
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
HeaderRecheck TransformerExecution = true
|
HeaderRecheck TransformerExecution = true
|
||||||
HeaderMissing TransformerExecution = false
|
HeaderUnchecked TransformerExecution = false
|
||||||
RecheckHeaderCap = "4"
|
RecheckHeaderCap = int64(5)
|
||||||
)
|
)
|
||||||
|
@ -16,9 +16,9 @@
|
|||||||
|
|
||||||
package event
|
package event
|
||||||
|
|
||||||
import "github.com/ethereum/go-ethereum/core/types"
|
import "github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
|
|
||||||
type Converter interface {
|
type Converter interface {
|
||||||
ToEntities(contractAbi string, ethLog []types.Log) ([]interface{}, error)
|
ToEntities(contractAbi string, ethLog []core.HeaderSyncLog) ([]interface{}, error)
|
||||||
ToModels([]interface{}) ([]interface{}, error)
|
ToModels([]interface{}) ([]interface{}, error)
|
||||||
}
|
}
|
||||||
|
@ -16,12 +16,9 @@
|
|||||||
|
|
||||||
package event
|
package event
|
||||||
|
|
||||||
import (
|
import "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Repository interface {
|
type Repository interface {
|
||||||
Create(headerID int64, models []interface{}) error
|
Create(models []interface{}) error
|
||||||
MarkHeaderChecked(headerID int64) error
|
|
||||||
SetDB(db *postgres.DB)
|
SetDB(db *postgres.DB)
|
||||||
}
|
}
|
||||||
|
@ -17,9 +17,7 @@
|
|||||||
package event
|
package event
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/sirupsen/logrus"
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
@ -36,34 +34,29 @@ func (transformer Transformer) NewTransformer(db *postgres.DB) transformer.Event
|
|||||||
return transformer
|
return transformer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (transformer Transformer) Execute(logs []types.Log, header core.Header) error {
|
func (transformer Transformer) Execute(logs []core.HeaderSyncLog) error {
|
||||||
transformerName := transformer.Config.TransformerName
|
transformerName := transformer.Config.TransformerName
|
||||||
config := transformer.Config
|
config := transformer.Config
|
||||||
|
|
||||||
if len(logs) < 1 {
|
if len(logs) < 1 {
|
||||||
err := transformer.Repository.MarkHeaderChecked(header.Id)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Error marking header as checked in %v: %v", transformerName, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
entities, err := transformer.Converter.ToEntities(config.ContractAbi, logs)
|
entities, err := transformer.Converter.ToEntities(config.ContractAbi, logs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error converting logs to entities in %v: %v", transformerName, err)
|
logrus.Errorf("error converting logs to entities in %v: %v", transformerName, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
models, err := transformer.Converter.ToModels(entities)
|
models, err := transformer.Converter.ToModels(entities)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error converting entities to models in %v: %v", transformerName, err)
|
logrus.Errorf("error converting entities to models in %v: %v", transformerName, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = transformer.Repository.Create(header.Id, models)
|
err = transformer.Repository.Create(models)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error persisting %v record: %v", transformerName, err)
|
logrus.Errorf("error persisting %v record: %v", transformerName, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,32 +17,29 @@
|
|||||||
package event_test
|
package event_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/factories/event"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/factories/event"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||||
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Transformer", func() {
|
var _ = Describe("Transformer", func() {
|
||||||
var (
|
var (
|
||||||
repository mocks.MockRepository
|
repository mocks.MockEventRepository
|
||||||
converter mocks.MockConverter
|
converter mocks.MockConverter
|
||||||
t transformer.EventTransformer
|
t transformer.EventTransformer
|
||||||
headerOne core.Header
|
headerOne core.Header
|
||||||
config = test_data.GenericTestConfig
|
config = test_data.GenericTestConfig
|
||||||
logs = test_data.GenericTestLogs
|
logs []core.HeaderSyncLog
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
repository = mocks.MockRepository{}
|
repository = mocks.MockEventRepository{}
|
||||||
converter = mocks.MockConverter{}
|
converter = mocks.MockConverter{}
|
||||||
|
|
||||||
t = event.Transformer{
|
t = event.Transformer{
|
||||||
@ -52,21 +49,21 @@ var _ = Describe("Transformer", func() {
|
|||||||
}.NewTransformer(nil)
|
}.NewTransformer(nil)
|
||||||
|
|
||||||
headerOne = core.Header{Id: rand.Int63(), BlockNumber: rand.Int63()}
|
headerOne = core.Header{Id: rand.Int63(), BlockNumber: rand.Int63()}
|
||||||
|
|
||||||
|
logs = []core.HeaderSyncLog{{
|
||||||
|
ID: 0,
|
||||||
|
HeaderID: headerOne.Id,
|
||||||
|
Log: test_data.GenericTestLog(),
|
||||||
|
Transformed: false,
|
||||||
|
}}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("sets the db", func() {
|
It("sets the db", func() {
|
||||||
Expect(repository.SetDbCalled).To(BeTrue())
|
Expect(repository.SetDbCalled).To(BeTrue())
|
||||||
})
|
})
|
||||||
|
|
||||||
It("marks header checked if no logs returned", func() {
|
|
||||||
err := t.Execute([]types.Log{}, headerOne)
|
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
repository.AssertMarkHeaderCheckedCalledWith(headerOne.Id)
|
|
||||||
})
|
|
||||||
|
|
||||||
It("doesn't attempt to convert or persist an empty collection when there are no logs", func() {
|
It("doesn't attempt to convert or persist an empty collection when there are no logs", func() {
|
||||||
err := t.Execute([]types.Log{}, headerOne)
|
err := t.Execute([]core.HeaderSyncLog{})
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(converter.ToEntitiesCalledCounter).To(Equal(0))
|
Expect(converter.ToEntitiesCalledCounter).To(Equal(0))
|
||||||
@ -74,24 +71,8 @@ var _ = Describe("Transformer", func() {
|
|||||||
Expect(repository.CreateCalledCounter).To(Equal(0))
|
Expect(repository.CreateCalledCounter).To(Equal(0))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("does not call repository.MarkCheckedHeader when there are logs", func() {
|
|
||||||
err := t.Execute(logs, headerOne)
|
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
repository.AssertMarkHeaderCheckedNotCalled()
|
|
||||||
})
|
|
||||||
|
|
||||||
It("returns error if marking header checked returns err", func() {
|
|
||||||
repository.SetMarkHeaderCheckedError(fakes.FakeError)
|
|
||||||
|
|
||||||
err := t.Execute([]types.Log{}, headerOne)
|
|
||||||
|
|
||||||
Expect(err).To(HaveOccurred())
|
|
||||||
Expect(err).To(MatchError(fakes.FakeError))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("converts an eth log to an entity", func() {
|
It("converts an eth log to an entity", func() {
|
||||||
err := t.Execute(logs, headerOne)
|
err := t.Execute(logs)
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(converter.ContractAbi).To(Equal(config.ContractAbi))
|
Expect(converter.ContractAbi).To(Equal(config.ContractAbi))
|
||||||
@ -101,7 +82,7 @@ var _ = Describe("Transformer", func() {
|
|||||||
It("returns an error if converter fails", func() {
|
It("returns an error if converter fails", func() {
|
||||||
converter.ToEntitiesError = fakes.FakeError
|
converter.ToEntitiesError = fakes.FakeError
|
||||||
|
|
||||||
err := t.Execute(logs, headerOne)
|
err := t.Execute(logs)
|
||||||
|
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
Expect(err).To(MatchError(fakes.FakeError))
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
@ -110,7 +91,7 @@ var _ = Describe("Transformer", func() {
|
|||||||
It("converts an entity to a model", func() {
|
It("converts an entity to a model", func() {
|
||||||
converter.EntitiesToReturn = []interface{}{test_data.GenericEntity{}}
|
converter.EntitiesToReturn = []interface{}{test_data.GenericEntity{}}
|
||||||
|
|
||||||
err := t.Execute(logs, headerOne)
|
err := t.Execute(logs)
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(converter.EntitiesToConvert[0]).To(Equal(test_data.GenericEntity{}))
|
Expect(converter.EntitiesToConvert[0]).To(Equal(test_data.GenericEntity{}))
|
||||||
@ -120,7 +101,7 @@ var _ = Describe("Transformer", func() {
|
|||||||
converter.EntitiesToReturn = []interface{}{test_data.GenericEntity{}}
|
converter.EntitiesToReturn = []interface{}{test_data.GenericEntity{}}
|
||||||
converter.ToModelsError = fakes.FakeError
|
converter.ToModelsError = fakes.FakeError
|
||||||
|
|
||||||
err := t.Execute(logs, headerOne)
|
err := t.Execute(logs)
|
||||||
|
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
Expect(err).To(MatchError(fakes.FakeError))
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
@ -129,16 +110,15 @@ var _ = Describe("Transformer", func() {
|
|||||||
It("persists the record", func() {
|
It("persists the record", func() {
|
||||||
converter.ModelsToReturn = []interface{}{test_data.GenericModel{}}
|
converter.ModelsToReturn = []interface{}{test_data.GenericModel{}}
|
||||||
|
|
||||||
err := t.Execute(logs, headerOne)
|
err := t.Execute(logs)
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(repository.PassedHeaderID).To(Equal(headerOne.Id))
|
|
||||||
Expect(repository.PassedModels[0]).To(Equal(test_data.GenericModel{}))
|
Expect(repository.PassedModels[0]).To(Equal(test_data.GenericModel{}))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("returns error if persisting the record fails", func() {
|
It("returns error if persisting the record fails", func() {
|
||||||
repository.SetCreateError(fakes.FakeError)
|
repository.SetCreateError(fakes.FakeError)
|
||||||
err := t.Execute(logs, headerOne)
|
err := t.Execute(logs)
|
||||||
|
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
Expect(err).To(MatchError(fakes.FakeError))
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
@ -14,14 +14,14 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package repository
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StorageRepository interface {
|
type Repository interface {
|
||||||
Create(blockNumber int, blockHash string, metadata utils.StorageValueMetadata, value interface{}) error
|
Create(blockNumber int, blockHash string, metadata utils.StorageValueMetadata, value interface{}) error
|
||||||
SetDB(db *postgres.DB)
|
SetDB(db *postgres.DB)
|
||||||
}
|
}
|
@ -19,7 +19,6 @@ package storage
|
|||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/repository"
|
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/storage"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
@ -29,7 +28,7 @@ import (
|
|||||||
type Transformer struct {
|
type Transformer struct {
|
||||||
Address common.Address
|
Address common.Address
|
||||||
Mappings storage.Mappings
|
Mappings storage.Mappings
|
||||||
Repository repository.StorageRepository
|
Repository Repository
|
||||||
}
|
}
|
||||||
|
|
||||||
func (transformer Transformer) NewTransformer(db *postgres.DB) transformer.StorageTransformer {
|
func (transformer Transformer) NewTransformer(db *postgres.DB) transformer.StorageTransformer {
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
|
|
||||||
type ILogFetcher interface {
|
type ILogFetcher interface {
|
||||||
FetchLogs(contractAddresses []common.Address, topics []common.Hash, missingHeader core.Header) ([]types.Log, error)
|
FetchLogs(contractAddresses []common.Address, topics []common.Hash, missingHeader core.Header) ([]types.Log, error)
|
||||||
|
// TODO Extend FetchLogs for doing several blocks at a time
|
||||||
}
|
}
|
||||||
|
|
||||||
type LogFetcher struct {
|
type LogFetcher struct {
|
||||||
|
85
libraries/shared/logs/delegator.go
Normal file
85
libraries/shared/logs/delegator.go
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package logs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNoLogs = errors.New("no logs available for transforming")
|
||||||
|
ErrNoTransformers = errors.New("no event transformers configured in the log delegator")
|
||||||
|
)
|
||||||
|
|
||||||
|
type ILogDelegator interface {
|
||||||
|
AddTransformer(t transformer.EventTransformer)
|
||||||
|
DelegateLogs() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type LogDelegator struct {
|
||||||
|
Chunker chunker.Chunker
|
||||||
|
LogRepository datastore.HeaderSyncLogRepository
|
||||||
|
Transformers []transformer.EventTransformer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (delegator *LogDelegator) AddTransformer(t transformer.EventTransformer) {
|
||||||
|
delegator.Transformers = append(delegator.Transformers, t)
|
||||||
|
delegator.Chunker.AddConfig(t.GetConfig())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (delegator *LogDelegator) DelegateLogs() error {
|
||||||
|
if len(delegator.Transformers) < 1 {
|
||||||
|
return ErrNoTransformers
|
||||||
|
}
|
||||||
|
|
||||||
|
persistedLogs, fetchErr := delegator.LogRepository.GetUntransformedHeaderSyncLogs()
|
||||||
|
if fetchErr != nil {
|
||||||
|
logrus.Errorf("error loading logs from db: %s", fetchErr.Error())
|
||||||
|
return fetchErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(persistedLogs) < 1 {
|
||||||
|
return ErrNoLogs
|
||||||
|
}
|
||||||
|
|
||||||
|
transformErr := delegator.delegateLogs(persistedLogs)
|
||||||
|
if transformErr != nil {
|
||||||
|
logrus.Errorf("error transforming logs: %s", transformErr)
|
||||||
|
return transformErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (delegator *LogDelegator) delegateLogs(logs []core.HeaderSyncLog) error {
|
||||||
|
chunkedLogs := delegator.Chunker.ChunkLogs(logs)
|
||||||
|
for _, t := range delegator.Transformers {
|
||||||
|
transformerName := t.GetConfig().TransformerName
|
||||||
|
logChunk := chunkedLogs[transformerName]
|
||||||
|
err := t.Execute(logChunk)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("%v transformer failed to execute in watcher: %v", transformerName, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
165
libraries/shared/logs/delegator_test.go
Normal file
165
libraries/shared/logs/delegator_test.go
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package logs_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/logs"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Log delegator", func() {
|
||||||
|
Describe("AddTransformer", func() {
|
||||||
|
It("adds transformers to the delegator", func() {
|
||||||
|
fakeTransformer := &mocks.MockEventTransformer{}
|
||||||
|
delegator := logs.LogDelegator{Chunker: chunker.NewLogChunker()}
|
||||||
|
|
||||||
|
delegator.AddTransformer(fakeTransformer)
|
||||||
|
|
||||||
|
Expect(delegator.Transformers).To(Equal([]transformer.EventTransformer{fakeTransformer}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("passes transformers' configs to the chunker", func() {
|
||||||
|
fakeTransformer := &mocks.MockEventTransformer{}
|
||||||
|
fakeConfig := mocks.FakeTransformerConfig
|
||||||
|
fakeTransformer.SetTransformerConfig(fakeConfig)
|
||||||
|
chunker := chunker.NewLogChunker()
|
||||||
|
delegator := logs.LogDelegator{Chunker: chunker}
|
||||||
|
|
||||||
|
delegator.AddTransformer(fakeTransformer)
|
||||||
|
|
||||||
|
expectedName := fakeConfig.TransformerName
|
||||||
|
expectedTopic := common.HexToHash(fakeConfig.Topic)
|
||||||
|
Expect(chunker.NameToTopic0).To(Equal(map[string]common.Hash{expectedName: expectedTopic}))
|
||||||
|
expectedAddress := strings.ToLower(fakeConfig.ContractAddresses[0])
|
||||||
|
Expect(chunker.AddressToNames).To(Equal(map[string][]string{expectedAddress: {expectedName}}))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("DelegateLogs", func() {
|
||||||
|
It("returns error if no transformers configured", func() {
|
||||||
|
delegator := newDelegator(&fakes.MockHeaderSyncLogRepository{})
|
||||||
|
|
||||||
|
err := delegator.DelegateLogs()
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(logs.ErrNoTransformers))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("gets untransformed logs", func() {
|
||||||
|
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||||
|
mockLogRepository.ReturnLogs = []core.HeaderSyncLog{{}}
|
||||||
|
delegator := newDelegator(mockLogRepository)
|
||||||
|
delegator.AddTransformer(&mocks.MockEventTransformer{})
|
||||||
|
|
||||||
|
err := delegator.DelegateLogs()
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(mockLogRepository.GetCalled).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if getting untransformed logs fails", func() {
|
||||||
|
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||||
|
mockLogRepository.GetError = fakes.FakeError
|
||||||
|
delegator := newDelegator(mockLogRepository)
|
||||||
|
delegator.AddTransformer(&mocks.MockEventTransformer{})
|
||||||
|
|
||||||
|
err := delegator.DelegateLogs()
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error that no logs were found if no logs returned", func() {
|
||||||
|
delegator := newDelegator(&fakes.MockHeaderSyncLogRepository{})
|
||||||
|
delegator.AddTransformer(&mocks.MockEventTransformer{})
|
||||||
|
|
||||||
|
err := delegator.DelegateLogs()
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(logs.ErrNoLogs))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("delegates chunked logs to transformers", func() {
|
||||||
|
fakeTransformer := &mocks.MockEventTransformer{}
|
||||||
|
config := mocks.FakeTransformerConfig
|
||||||
|
fakeTransformer.SetTransformerConfig(config)
|
||||||
|
fakeGethLog := types.Log{
|
||||||
|
Address: common.HexToAddress(config.ContractAddresses[0]),
|
||||||
|
Topics: []common.Hash{common.HexToHash(config.Topic)},
|
||||||
|
}
|
||||||
|
fakeHeaderSyncLogs := []core.HeaderSyncLog{{Log: fakeGethLog}}
|
||||||
|
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||||
|
mockLogRepository.ReturnLogs = fakeHeaderSyncLogs
|
||||||
|
delegator := newDelegator(mockLogRepository)
|
||||||
|
delegator.AddTransformer(fakeTransformer)
|
||||||
|
|
||||||
|
err := delegator.DelegateLogs()
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(fakeTransformer.ExecuteWasCalled).To(BeTrue())
|
||||||
|
Expect(fakeTransformer.PassedLogs).To(Equal(fakeHeaderSyncLogs))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if transformer returns an error", func() {
|
||||||
|
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||||
|
mockLogRepository.ReturnLogs = []core.HeaderSyncLog{{}}
|
||||||
|
delegator := newDelegator(mockLogRepository)
|
||||||
|
fakeTransformer := &mocks.MockEventTransformer{ExecuteError: fakes.FakeError}
|
||||||
|
delegator.AddTransformer(fakeTransformer)
|
||||||
|
|
||||||
|
err := delegator.DelegateLogs()
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns nil for error when logs returned and delegated", func() {
|
||||||
|
fakeTransformer := &mocks.MockEventTransformer{}
|
||||||
|
config := mocks.FakeTransformerConfig
|
||||||
|
fakeTransformer.SetTransformerConfig(config)
|
||||||
|
fakeGethLog := types.Log{
|
||||||
|
Address: common.HexToAddress(config.ContractAddresses[0]),
|
||||||
|
Topics: []common.Hash{common.HexToHash(config.Topic)},
|
||||||
|
}
|
||||||
|
fakeHeaderSyncLogs := []core.HeaderSyncLog{{Log: fakeGethLog}}
|
||||||
|
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||||
|
mockLogRepository.ReturnLogs = fakeHeaderSyncLogs
|
||||||
|
delegator := newDelegator(mockLogRepository)
|
||||||
|
delegator.AddTransformer(fakeTransformer)
|
||||||
|
|
||||||
|
err := delegator.DelegateLogs()
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
func newDelegator(headerSyncLogRepository *fakes.MockHeaderSyncLogRepository) *logs.LogDelegator {
|
||||||
|
return &logs.LogDelegator{
|
||||||
|
Chunker: chunker.NewLogChunker(),
|
||||||
|
LogRepository: headerSyncLogRepository,
|
||||||
|
}
|
||||||
|
}
|
154
libraries/shared/logs/extractor.go
Normal file
154
libraries/shared/logs/extractor.go
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package logs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transactions"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNoUncheckedHeaders = errors.New("no unchecked headers available for log fetching")
|
||||||
|
ErrNoWatchedAddresses = errors.New("no watched addresses configured in the log extractor")
|
||||||
|
)
|
||||||
|
|
||||||
|
type ILogExtractor interface {
|
||||||
|
AddTransformerConfig(config transformer.EventTransformerConfig) error
|
||||||
|
ExtractLogs(recheckHeaders constants.TransformerExecution) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type LogExtractor struct {
|
||||||
|
Addresses []common.Address
|
||||||
|
CheckedHeadersRepository datastore.CheckedHeadersRepository
|
||||||
|
CheckedLogsRepository datastore.CheckedLogsRepository
|
||||||
|
Fetcher fetcher.ILogFetcher
|
||||||
|
LogRepository datastore.HeaderSyncLogRepository
|
||||||
|
StartingBlock *int64
|
||||||
|
Syncer transactions.ITransactionsSyncer
|
||||||
|
Topics []common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add additional logs to extract
|
||||||
|
func (extractor *LogExtractor) AddTransformerConfig(config transformer.EventTransformerConfig) error {
|
||||||
|
checkedHeadersErr := extractor.updateCheckedHeaders(config)
|
||||||
|
if checkedHeadersErr != nil {
|
||||||
|
return checkedHeadersErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if extractor.StartingBlock == nil {
|
||||||
|
extractor.StartingBlock = &config.StartingBlockNumber
|
||||||
|
} else if earlierStartingBlockNumber(config.StartingBlockNumber, *extractor.StartingBlock) {
|
||||||
|
extractor.StartingBlock = &config.StartingBlockNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
addresses := transformer.HexStringsToAddresses(config.ContractAddresses)
|
||||||
|
extractor.Addresses = append(extractor.Addresses, addresses...)
|
||||||
|
extractor.Topics = append(extractor.Topics, common.HexToHash(config.Topic))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch and persist watched logs
|
||||||
|
func (extractor LogExtractor) ExtractLogs(recheckHeaders constants.TransformerExecution) error {
|
||||||
|
if len(extractor.Addresses) < 1 {
|
||||||
|
logrus.Errorf("error extracting logs: %s", ErrNoWatchedAddresses.Error())
|
||||||
|
return ErrNoWatchedAddresses
|
||||||
|
}
|
||||||
|
|
||||||
|
uncheckedHeaders, uncheckedHeadersErr := extractor.CheckedHeadersRepository.UncheckedHeaders(*extractor.StartingBlock, -1, getCheckCount(recheckHeaders))
|
||||||
|
if uncheckedHeadersErr != nil {
|
||||||
|
logrus.Errorf("error fetching missing headers: %s", uncheckedHeadersErr)
|
||||||
|
return uncheckedHeadersErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(uncheckedHeaders) < 1 {
|
||||||
|
return ErrNoUncheckedHeaders
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, header := range uncheckedHeaders {
|
||||||
|
logs, fetchLogsErr := extractor.Fetcher.FetchLogs(extractor.Addresses, extractor.Topics, header)
|
||||||
|
if fetchLogsErr != nil {
|
||||||
|
logError("error fetching logs for header: %s", fetchLogsErr, header)
|
||||||
|
return fetchLogsErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(logs) > 0 {
|
||||||
|
transactionsSyncErr := extractor.Syncer.SyncTransactions(header.Id, logs)
|
||||||
|
if transactionsSyncErr != nil {
|
||||||
|
logError("error syncing transactions: %s", transactionsSyncErr, header)
|
||||||
|
return transactionsSyncErr
|
||||||
|
}
|
||||||
|
|
||||||
|
createLogsErr := extractor.LogRepository.CreateHeaderSyncLogs(header.Id, logs)
|
||||||
|
if createLogsErr != nil {
|
||||||
|
logError("error persisting logs: %s", createLogsErr, header)
|
||||||
|
return createLogsErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
markHeaderCheckedErr := extractor.CheckedHeadersRepository.MarkHeaderChecked(header.Id)
|
||||||
|
if markHeaderCheckedErr != nil {
|
||||||
|
logError("error marking header checked: %s", markHeaderCheckedErr, header)
|
||||||
|
return markHeaderCheckedErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func earlierStartingBlockNumber(transformerBlock, watcherBlock int64) bool {
|
||||||
|
return transformerBlock < watcherBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
func logError(description string, err error, header core.Header) {
|
||||||
|
logrus.WithFields(logrus.Fields{
|
||||||
|
"headerId": header.Id,
|
||||||
|
"headerHash": header.Hash,
|
||||||
|
"blockNumber": header.BlockNumber,
|
||||||
|
}).Errorf(description, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCheckCount(recheckHeaders constants.TransformerExecution) int64 {
|
||||||
|
if recheckHeaders == constants.HeaderUnchecked {
|
||||||
|
return 1
|
||||||
|
} else {
|
||||||
|
return constants.RecheckHeaderCap
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (extractor *LogExtractor) updateCheckedHeaders(config transformer.EventTransformerConfig) error {
|
||||||
|
alreadyWatchingLog, watchingLogErr := extractor.CheckedLogsRepository.AlreadyWatchingLog(config.ContractAddresses, config.Topic)
|
||||||
|
if watchingLogErr != nil {
|
||||||
|
return watchingLogErr
|
||||||
|
}
|
||||||
|
if !alreadyWatchingLog {
|
||||||
|
uncheckHeadersErr := extractor.CheckedHeadersRepository.MarkHeadersUnchecked(config.StartingBlockNumber)
|
||||||
|
if uncheckHeadersErr != nil {
|
||||||
|
return uncheckHeadersErr
|
||||||
|
}
|
||||||
|
markLogWatchedErr := extractor.CheckedLogsRepository.MarkLogWatched(config.ContractAddresses, config.Topic)
|
||||||
|
if markLogWatchedErr != nil {
|
||||||
|
return markLogWatchedErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
414
libraries/shared/logs/extractor_test.go
Normal file
414
libraries/shared/logs/extractor_test.go
Normal file
@ -0,0 +1,414 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package logs_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/logs"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||||
|
"math/rand"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Log extractor", func() {
|
||||||
|
var (
|
||||||
|
checkedHeadersRepository *fakes.MockCheckedHeadersRepository
|
||||||
|
checkedLogsRepository *fakes.MockCheckedLogsRepository
|
||||||
|
extractor *logs.LogExtractor
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
checkedHeadersRepository = &fakes.MockCheckedHeadersRepository{}
|
||||||
|
checkedLogsRepository = &fakes.MockCheckedLogsRepository{}
|
||||||
|
extractor = &logs.LogExtractor{
|
||||||
|
CheckedHeadersRepository: checkedHeadersRepository,
|
||||||
|
CheckedLogsRepository: checkedLogsRepository,
|
||||||
|
Fetcher: &mocks.MockLogFetcher{},
|
||||||
|
LogRepository: &fakes.MockHeaderSyncLogRepository{},
|
||||||
|
Syncer: &fakes.MockTransactionSyncer{},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("AddTransformerConfig", func() {
|
||||||
|
It("updates extractor's starting block number to earliest available", func() {
|
||||||
|
earlierStartingBlockNumber := rand.Int63()
|
||||||
|
laterStartingBlockNumber := earlierStartingBlockNumber + 1
|
||||||
|
|
||||||
|
errOne := extractor.AddTransformerConfig(getTransformerConfig(laterStartingBlockNumber))
|
||||||
|
Expect(errOne).NotTo(HaveOccurred())
|
||||||
|
errTwo := extractor.AddTransformerConfig(getTransformerConfig(earlierStartingBlockNumber))
|
||||||
|
Expect(errTwo).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
Expect(*extractor.StartingBlock).To(Equal(earlierStartingBlockNumber))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("adds transformer's addresses to extractor's watched addresses", func() {
|
||||||
|
addresses := []string{"0xA", "0xB"}
|
||||||
|
configWithAddresses := transformer.EventTransformerConfig{
|
||||||
|
ContractAddresses: addresses,
|
||||||
|
StartingBlockNumber: rand.Int63(),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := extractor.AddTransformerConfig(configWithAddresses)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
expectedAddresses := transformer.HexStringsToAddresses(addresses)
|
||||||
|
Expect(extractor.Addresses).To(Equal(expectedAddresses))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("adds transformer's topic to extractor's watched topics", func() {
|
||||||
|
topic := "0x1"
|
||||||
|
configWithTopic := transformer.EventTransformerConfig{
|
||||||
|
ContractAddresses: []string{fakes.FakeAddress.Hex()},
|
||||||
|
Topic: topic,
|
||||||
|
StartingBlockNumber: rand.Int63(),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := extractor.AddTransformerConfig(configWithTopic)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(extractor.Topics).To(Equal([]common.Hash{common.HexToHash(topic)}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if checking whether log has been checked returns error", func() {
|
||||||
|
checkedLogsRepository.AlreadyWatchingLogError = fakes.FakeError
|
||||||
|
|
||||||
|
err := extractor.AddTransformerConfig(getTransformerConfig(rand.Int63()))
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("when log has previously been checked", func() {
|
||||||
|
It("does not mark any headers unchecked", func() {
|
||||||
|
checkedLogsRepository.AlreadyWatchingLogReturn = true
|
||||||
|
|
||||||
|
err := extractor.AddTransformerConfig(getTransformerConfig(rand.Int63()))
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(checkedHeadersRepository.MarkHeadersUncheckedCalled).To(BeFalse())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("when log has not previously been checked", func() {
|
||||||
|
BeforeEach(func() {
|
||||||
|
checkedLogsRepository.AlreadyWatchingLogReturn = false
|
||||||
|
})
|
||||||
|
|
||||||
|
It("marks headers since transformer's starting block number as unchecked", func() {
|
||||||
|
blockNumber := rand.Int63()
|
||||||
|
|
||||||
|
err := extractor.AddTransformerConfig(getTransformerConfig(blockNumber))
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(checkedHeadersRepository.MarkHeadersUncheckedCalled).To(BeTrue())
|
||||||
|
Expect(checkedHeadersRepository.MarkHeadersUncheckedStartingBlockNumber).To(Equal(blockNumber))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if marking headers unchecked returns error", func() {
|
||||||
|
checkedHeadersRepository.MarkHeadersUncheckedReturnError = fakes.FakeError
|
||||||
|
|
||||||
|
err := extractor.AddTransformerConfig(getTransformerConfig(rand.Int63()))
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("persists that tranformer's log has been checked", func() {
|
||||||
|
config := getTransformerConfig(rand.Int63())
|
||||||
|
|
||||||
|
err := extractor.AddTransformerConfig(config)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(checkedLogsRepository.MarkLogWatchedAddresses).To(Equal(config.ContractAddresses))
|
||||||
|
Expect(checkedLogsRepository.MarkLogWatchedTopicZero).To(Equal(config.Topic))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if marking logs checked returns error", func() {
|
||||||
|
checkedLogsRepository.MarkLogWatchedError = fakes.FakeError
|
||||||
|
|
||||||
|
err := extractor.AddTransformerConfig(getTransformerConfig(rand.Int63()))
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("ExtractLogs", func() {
|
||||||
|
It("returns error if no watched addresses configured", func() {
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(logs.ErrNoWatchedAddresses))
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("when checking unchecked headers", func() {
|
||||||
|
It("gets headers since configured starting block with check_count < 1", func() {
|
||||||
|
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||||
|
mockCheckedHeadersRepository.UncheckedHeadersReturnHeaders = []core.Header{{}}
|
||||||
|
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||||
|
startingBlockNumber := rand.Int63()
|
||||||
|
extractor.AddTransformerConfig(getTransformerConfig(startingBlockNumber))
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(mockCheckedHeadersRepository.UncheckedHeadersStartingBlockNumber).To(Equal(startingBlockNumber))
|
||||||
|
Expect(mockCheckedHeadersRepository.UncheckedHeadersEndingBlockNumber).To(Equal(int64(-1)))
|
||||||
|
Expect(mockCheckedHeadersRepository.UncheckedHeadersCheckCount).To(Equal(int64(1)))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("when rechecking headers", func() {
|
||||||
|
It("gets headers since configured starting block with check_count < RecheckHeaderCap", func() {
|
||||||
|
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||||
|
mockCheckedHeadersRepository.UncheckedHeadersReturnHeaders = []core.Header{{}}
|
||||||
|
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||||
|
startingBlockNumber := rand.Int63()
|
||||||
|
extractor.AddTransformerConfig(getTransformerConfig(startingBlockNumber))
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderRecheck)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(mockCheckedHeadersRepository.UncheckedHeadersStartingBlockNumber).To(Equal(startingBlockNumber))
|
||||||
|
Expect(mockCheckedHeadersRepository.UncheckedHeadersEndingBlockNumber).To(Equal(int64(-1)))
|
||||||
|
Expect(mockCheckedHeadersRepository.UncheckedHeadersCheckCount).To(Equal(constants.RecheckHeaderCap))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if getting unchecked headers fails", func() {
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||||
|
mockCheckedHeadersRepository.UncheckedHeadersReturnError = fakes.FakeError
|
||||||
|
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("when no unchecked headers", func() {
|
||||||
|
It("does not fetch logs", func() {
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
mockLogFetcher := &mocks.MockLogFetcher{}
|
||||||
|
extractor.Fetcher = mockLogFetcher
|
||||||
|
|
||||||
|
_ = extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(mockLogFetcher.FetchCalled).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error that no unchecked headers were found", func() {
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
mockLogFetcher := &mocks.MockLogFetcher{}
|
||||||
|
extractor.Fetcher = mockLogFetcher
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(MatchError(logs.ErrNoUncheckedHeaders))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("when there are unchecked headers", func() {
|
||||||
|
It("fetches logs for unchecked headers", func() {
|
||||||
|
addUncheckedHeader(extractor)
|
||||||
|
config := transformer.EventTransformerConfig{
|
||||||
|
ContractAddresses: []string{fakes.FakeAddress.Hex()},
|
||||||
|
Topic: fakes.FakeHash.Hex(),
|
||||||
|
StartingBlockNumber: rand.Int63(),
|
||||||
|
}
|
||||||
|
addTransformerErr := extractor.AddTransformerConfig(config)
|
||||||
|
Expect(addTransformerErr).NotTo(HaveOccurred())
|
||||||
|
mockLogFetcher := &mocks.MockLogFetcher{}
|
||||||
|
extractor.Fetcher = mockLogFetcher
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(mockLogFetcher.FetchCalled).To(BeTrue())
|
||||||
|
expectedTopics := []common.Hash{common.HexToHash(config.Topic)}
|
||||||
|
Expect(mockLogFetcher.Topics).To(Equal(expectedTopics))
|
||||||
|
expectedAddresses := transformer.HexStringsToAddresses(config.ContractAddresses)
|
||||||
|
Expect(mockLogFetcher.ContractAddresses).To(Equal(expectedAddresses))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if fetching logs fails", func() {
|
||||||
|
addUncheckedHeader(extractor)
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
mockLogFetcher := &mocks.MockLogFetcher{}
|
||||||
|
mockLogFetcher.ReturnError = fakes.FakeError
|
||||||
|
extractor.Fetcher = mockLogFetcher
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("when no fetched logs", func() {
|
||||||
|
It("does not sync transactions", func() {
|
||||||
|
addUncheckedHeader(extractor)
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
|
||||||
|
extractor.Syncer = mockTransactionSyncer
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(mockTransactionSyncer.SyncTransactionsCalled).To(BeFalse())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("when there are fetched logs", func() {
|
||||||
|
It("syncs transactions", func() {
|
||||||
|
addUncheckedHeader(extractor)
|
||||||
|
addFetchedLog(extractor)
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
|
||||||
|
extractor.Syncer = mockTransactionSyncer
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(mockTransactionSyncer.SyncTransactionsCalled).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if syncing transactions fails", func() {
|
||||||
|
addUncheckedHeader(extractor)
|
||||||
|
addFetchedLog(extractor)
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
|
||||||
|
mockTransactionSyncer.SyncTransactionsError = fakes.FakeError
|
||||||
|
extractor.Syncer = mockTransactionSyncer
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("persists fetched logs", func() {
|
||||||
|
addUncheckedHeader(extractor)
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
fakeLogs := []types.Log{{
|
||||||
|
Address: common.HexToAddress("0xA"),
|
||||||
|
Topics: []common.Hash{common.HexToHash("0xA")},
|
||||||
|
Data: []byte{},
|
||||||
|
Index: 0,
|
||||||
|
}}
|
||||||
|
mockLogFetcher := &mocks.MockLogFetcher{ReturnLogs: fakeLogs}
|
||||||
|
extractor.Fetcher = mockLogFetcher
|
||||||
|
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||||
|
extractor.LogRepository = mockLogRepository
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(mockLogRepository.PassedLogs).To(Equal(fakeLogs))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if persisting logs fails", func() {
|
||||||
|
addUncheckedHeader(extractor)
|
||||||
|
addFetchedLog(extractor)
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
mockLogRepository := &fakes.MockHeaderSyncLogRepository{}
|
||||||
|
mockLogRepository.CreateError = fakes.FakeError
|
||||||
|
extractor.LogRepository = mockLogRepository
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
It("marks header checked", func() {
|
||||||
|
addFetchedLog(extractor)
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||||
|
headerID := rand.Int63()
|
||||||
|
mockCheckedHeadersRepository.UncheckedHeadersReturnHeaders = []core.Header{{Id: headerID}}
|
||||||
|
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(mockCheckedHeadersRepository.MarkHeaderCheckedHeaderID).To(Equal(headerID))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if marking header checked fails", func() {
|
||||||
|
addFetchedLog(extractor)
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||||
|
mockCheckedHeadersRepository.UncheckedHeadersReturnHeaders = []core.Header{{Id: rand.Int63()}}
|
||||||
|
mockCheckedHeadersRepository.MarkHeaderCheckedReturnError = fakes.FakeError
|
||||||
|
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns nil for error if everything succeeds", func() {
|
||||||
|
addUncheckedHeader(extractor)
|
||||||
|
addTransformerConfig(extractor)
|
||||||
|
|
||||||
|
err := extractor.ExtractLogs(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
func addTransformerConfig(extractor *logs.LogExtractor) {
|
||||||
|
fakeConfig := transformer.EventTransformerConfig{
|
||||||
|
ContractAddresses: []string{fakes.FakeAddress.Hex()},
|
||||||
|
Topic: fakes.FakeHash.Hex(),
|
||||||
|
StartingBlockNumber: rand.Int63(),
|
||||||
|
}
|
||||||
|
extractor.AddTransformerConfig(fakeConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
func addUncheckedHeader(extractor *logs.LogExtractor) {
|
||||||
|
mockCheckedHeadersRepository := &fakes.MockCheckedHeadersRepository{}
|
||||||
|
mockCheckedHeadersRepository.UncheckedHeadersReturnHeaders = []core.Header{{}}
|
||||||
|
extractor.CheckedHeadersRepository = mockCheckedHeadersRepository
|
||||||
|
}
|
||||||
|
|
||||||
|
func addFetchedLog(extractor *logs.LogExtractor) {
|
||||||
|
mockLogFetcher := &mocks.MockLogFetcher{}
|
||||||
|
mockLogFetcher.ReturnLogs = []types.Log{{}}
|
||||||
|
extractor.Fetcher = mockLogFetcher
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTransformerConfig(startingBlockNumber int64) transformer.EventTransformerConfig {
|
||||||
|
return transformer.EventTransformerConfig{
|
||||||
|
ContractAddresses: []string{fakes.FakeAddress.Hex()},
|
||||||
|
Topic: fakes.FakeHash.Hex(),
|
||||||
|
StartingBlockNumber: startingBlockNumber,
|
||||||
|
}
|
||||||
|
}
|
35
libraries/shared/logs/logs_suite_test.go
Normal file
35
libraries/shared/logs/logs_suite_test.go
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package logs_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"io/ioutil"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLogs(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "Logs Suite")
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = BeforeSuite(func() {
|
||||||
|
logrus.SetOutput(ioutil.Discard)
|
||||||
|
})
|
@ -16,9 +16,7 @@
|
|||||||
|
|
||||||
package mocks
|
package mocks
|
||||||
|
|
||||||
import (
|
import "github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MockConverter struct {
|
type MockConverter struct {
|
||||||
ToEntitiesError error
|
ToEntitiesError error
|
||||||
@ -27,7 +25,7 @@ type MockConverter struct {
|
|||||||
entityConverterError error
|
entityConverterError error
|
||||||
modelConverterError error
|
modelConverterError error
|
||||||
ContractAbi string
|
ContractAbi string
|
||||||
LogsToConvert []types.Log
|
LogsToConvert []core.HeaderSyncLog
|
||||||
EntitiesToConvert []interface{}
|
EntitiesToConvert []interface{}
|
||||||
EntitiesToReturn []interface{}
|
EntitiesToReturn []interface{}
|
||||||
ModelsToReturn []interface{}
|
ModelsToReturn []interface{}
|
||||||
@ -35,9 +33,9 @@ type MockConverter struct {
|
|||||||
ToModelsCalledCounter int
|
ToModelsCalledCounter int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (converter *MockConverter) ToEntities(contractAbi string, ethLogs []types.Log) ([]interface{}, error) {
|
func (converter *MockConverter) ToEntities(contractAbi string, ethLogs []core.HeaderSyncLog) ([]interface{}, error) {
|
||||||
for _, log := range ethLogs {
|
for _, log := range ethLogs {
|
||||||
converter.PassedContractAddresses = append(converter.PassedContractAddresses, log.Address.Hex())
|
converter.PassedContractAddresses = append(converter.PassedContractAddresses, log.Log.Address.Hex())
|
||||||
}
|
}
|
||||||
converter.ContractAbi = contractAbi
|
converter.ContractAbi = contractAbi
|
||||||
converter.LogsToConvert = ethLogs
|
converter.LogsToConvert = ethLogs
|
43
libraries/shared/mocks/event_repository.go
Normal file
43
libraries/shared/mocks/event_repository.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MockEventRepository struct {
|
||||||
|
createError error
|
||||||
|
PassedModels []interface{}
|
||||||
|
SetDbCalled bool
|
||||||
|
CreateCalledCounter int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository *MockEventRepository) Create(models []interface{}) error {
|
||||||
|
repository.PassedModels = models
|
||||||
|
repository.CreateCalledCounter++
|
||||||
|
|
||||||
|
return repository.createError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository *MockEventRepository) SetDB(db *postgres.DB) {
|
||||||
|
repository.SetDbCalled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository *MockEventRepository) SetCreateError(e error) {
|
||||||
|
repository.createError = e
|
||||||
|
}
|
@ -17,45 +17,42 @@
|
|||||||
package mocks
|
package mocks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
|
|
||||||
shared_t "github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MockTransformer struct {
|
type MockEventTransformer struct {
|
||||||
ExecuteWasCalled bool
|
ExecuteWasCalled bool
|
||||||
ExecuteError error
|
ExecuteError error
|
||||||
PassedLogs []types.Log
|
PassedLogs []core.HeaderSyncLog
|
||||||
PassedHeader core.Header
|
config transformer.EventTransformerConfig
|
||||||
config shared_t.EventTransformerConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mh *MockTransformer) Execute(logs []types.Log, header core.Header) error {
|
func (t *MockEventTransformer) Execute(logs []core.HeaderSyncLog) error {
|
||||||
if mh.ExecuteError != nil {
|
if t.ExecuteError != nil {
|
||||||
return mh.ExecuteError
|
return t.ExecuteError
|
||||||
}
|
}
|
||||||
mh.ExecuteWasCalled = true
|
t.ExecuteWasCalled = true
|
||||||
mh.PassedLogs = logs
|
t.PassedLogs = logs
|
||||||
mh.PassedHeader = header
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mh *MockTransformer) GetConfig() shared_t.EventTransformerConfig {
|
func (t *MockEventTransformer) GetConfig() transformer.EventTransformerConfig {
|
||||||
return mh.config
|
return t.config
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mh *MockTransformer) SetTransformerConfig(config shared_t.EventTransformerConfig) {
|
func (t *MockEventTransformer) SetTransformerConfig(config transformer.EventTransformerConfig) {
|
||||||
mh.config = config
|
t.config = config
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mh *MockTransformer) FakeTransformerInitializer(db *postgres.DB) shared_t.EventTransformer {
|
func (t *MockEventTransformer) FakeTransformerInitializer(db *postgres.DB) transformer.EventTransformer {
|
||||||
return mh
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
var FakeTransformerConfig = shared_t.EventTransformerConfig{
|
var FakeTransformerConfig = transformer.EventTransformerConfig{
|
||||||
TransformerName: "FakeTransformer",
|
TransformerName: "FakeTransformer",
|
||||||
ContractAddresses: []string{"FakeAddress"},
|
ContractAddresses: []string{fakes.FakeAddress.Hex()},
|
||||||
Topic: "FakeTopic",
|
Topic: fakes.FakeHash.Hex(),
|
||||||
}
|
}
|
45
libraries/shared/mocks/log_delegator.go
Normal file
45
libraries/shared/mocks/log_delegator.go
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MockLogDelegator struct {
|
||||||
|
AddedTransformers []transformer.EventTransformer
|
||||||
|
DelegateCallCount int
|
||||||
|
DelegateErrors []error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (delegator *MockLogDelegator) AddTransformer(t transformer.EventTransformer) {
|
||||||
|
delegator.AddedTransformers = append(delegator.AddedTransformers, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (delegator *MockLogDelegator) DelegateLogs() error {
|
||||||
|
delegator.DelegateCallCount++
|
||||||
|
if len(delegator.DelegateErrors) > 1 {
|
||||||
|
var delegateErrorThisRun error
|
||||||
|
delegateErrorThisRun, delegator.DelegateErrors = delegator.DelegateErrors[0], delegator.DelegateErrors[1:]
|
||||||
|
return delegateErrorThisRun
|
||||||
|
} else if len(delegator.DelegateErrors) == 1 {
|
||||||
|
thisErr := delegator.DelegateErrors[0]
|
||||||
|
delegator.DelegateErrors = []error{}
|
||||||
|
return thisErr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
48
libraries/shared/mocks/log_extractor.go
Normal file
48
libraries/shared/mocks/log_extractor.go
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MockLogExtractor struct {
|
||||||
|
AddedConfigs []transformer.EventTransformerConfig
|
||||||
|
AddTransformerConfigError error
|
||||||
|
ExtractLogsCount int
|
||||||
|
ExtractLogsErrors []error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (extractor *MockLogExtractor) AddTransformerConfig(config transformer.EventTransformerConfig) error {
|
||||||
|
extractor.AddedConfigs = append(extractor.AddedConfigs, config)
|
||||||
|
return extractor.AddTransformerConfigError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (extractor *MockLogExtractor) ExtractLogs(recheckHeaders constants.TransformerExecution) error {
|
||||||
|
extractor.ExtractLogsCount++
|
||||||
|
if len(extractor.ExtractLogsErrors) > 1 {
|
||||||
|
var errorThisRun error
|
||||||
|
errorThisRun, extractor.ExtractLogsErrors = extractor.ExtractLogsErrors[0], extractor.ExtractLogsErrors[1:]
|
||||||
|
return errorThisRun
|
||||||
|
} else if len(extractor.ExtractLogsErrors) == 1 {
|
||||||
|
thisErr := extractor.ExtractLogsErrors[0]
|
||||||
|
extractor.ExtractLogsErrors = []error{}
|
||||||
|
return thisErr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@ -17,26 +17,24 @@
|
|||||||
package mocks
|
package mocks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MockLogNoteConverter struct {
|
type MockLogFetcher struct {
|
||||||
err error
|
ContractAddresses []common.Address
|
||||||
returnModels []interface{}
|
FetchCalled bool
|
||||||
PassedLogs []types.Log
|
MissingHeader core.Header
|
||||||
ToModelsCalledCounter int
|
ReturnError error
|
||||||
|
ReturnLogs []types.Log
|
||||||
|
Topics []common.Hash
|
||||||
}
|
}
|
||||||
|
|
||||||
func (converter *MockLogNoteConverter) ToModels(ethLogs []types.Log) ([]interface{}, error) {
|
func (fetcher *MockLogFetcher) FetchLogs(contractAddresses []common.Address, topics []common.Hash, missingHeader core.Header) ([]types.Log, error) {
|
||||||
converter.PassedLogs = ethLogs
|
fetcher.FetchCalled = true
|
||||||
converter.ToModelsCalledCounter++
|
fetcher.ContractAddresses = contractAddresses
|
||||||
return converter.returnModels, converter.err
|
fetcher.Topics = topics
|
||||||
}
|
fetcher.MissingHeader = missingHeader
|
||||||
|
return fetcher.ReturnLogs, fetcher.ReturnError
|
||||||
func (converter *MockLogNoteConverter) SetConverterError(e error) {
|
|
||||||
converter.err = e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (converter *MockLogNoteConverter) SetReturnModels(models []interface{}) {
|
|
||||||
converter.returnModels = models
|
|
||||||
}
|
}
|
@ -1,98 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2019 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package mocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MockRepository struct {
|
|
||||||
createError error
|
|
||||||
markHeaderCheckedError error
|
|
||||||
MarkHeaderCheckedPassedHeaderIDs []int64
|
|
||||||
CreatedHeaderIds []int64
|
|
||||||
missingHeaders []core.Header
|
|
||||||
allHeaders []core.Header
|
|
||||||
missingHeadersError error
|
|
||||||
PassedStartingBlockNumber int64
|
|
||||||
PassedEndingBlockNumber int64
|
|
||||||
PassedHeaderID int64
|
|
||||||
PassedModels []interface{}
|
|
||||||
SetDbCalled bool
|
|
||||||
CreateCalledCounter int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) Create(headerID int64, models []interface{}) error {
|
|
||||||
repository.PassedHeaderID = headerID
|
|
||||||
repository.PassedModels = models
|
|
||||||
repository.CreatedHeaderIds = append(repository.CreatedHeaderIds, headerID)
|
|
||||||
repository.CreateCalledCounter++
|
|
||||||
|
|
||||||
return repository.createError
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) MarkHeaderChecked(headerID int64) error {
|
|
||||||
repository.MarkHeaderCheckedPassedHeaderIDs = append(repository.MarkHeaderCheckedPassedHeaderIDs, headerID)
|
|
||||||
return repository.markHeaderCheckedError
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
|
|
||||||
repository.PassedStartingBlockNumber = startingBlockNumber
|
|
||||||
repository.PassedEndingBlockNumber = endingBlockNumber
|
|
||||||
return repository.missingHeaders, repository.missingHeadersError
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) RecheckHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
|
|
||||||
repository.PassedStartingBlockNumber = startingBlockNumber
|
|
||||||
repository.PassedEndingBlockNumber = endingBlockNumber
|
|
||||||
return repository.allHeaders, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) SetDB(db *postgres.DB) {
|
|
||||||
repository.SetDbCalled = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) SetMissingHeadersError(e error) {
|
|
||||||
repository.missingHeadersError = e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) SetAllHeaders(headers []core.Header) {
|
|
||||||
repository.allHeaders = headers
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) SetMissingHeaders(headers []core.Header) {
|
|
||||||
repository.missingHeaders = headers
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) SetMarkHeaderCheckedError(e error) {
|
|
||||||
repository.markHeaderCheckedError = e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) SetCreateError(e error) {
|
|
||||||
repository.createError = e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) AssertMarkHeaderCheckedCalledWith(i int64) {
|
|
||||||
Expect(repository.MarkHeaderCheckedPassedHeaderIDs).To(ContainElement(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockRepository) AssertMarkHeaderCheckedNotCalled() {
|
|
||||||
Expect(len(repository.MarkHeaderCheckedPassedHeaderIDs)).To(Equal(0))
|
|
||||||
}
|
|
@ -1,69 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2019 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package mocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MockWatcherRepository struct {
|
|
||||||
ReturnCheckedColumnNames []string
|
|
||||||
GetCheckedColumnNamesError error
|
|
||||||
GetCheckedColumnNamesCalled bool
|
|
||||||
|
|
||||||
ReturnNotCheckedSQL string
|
|
||||||
CreateNotCheckedSQLCalled bool
|
|
||||||
|
|
||||||
ReturnMissingHeaders []core.Header
|
|
||||||
MissingHeadersError error
|
|
||||||
MissingHeadersCalled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockWatcherRepository) GetCheckedColumnNames(db *postgres.DB) ([]string, error) {
|
|
||||||
repository.GetCheckedColumnNamesCalled = true
|
|
||||||
if repository.GetCheckedColumnNamesError != nil {
|
|
||||||
return []string{}, repository.GetCheckedColumnNamesError
|
|
||||||
}
|
|
||||||
|
|
||||||
return repository.ReturnCheckedColumnNames, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockWatcherRepository) SetCheckedColumnNames(checkedColumnNames []string) {
|
|
||||||
repository.ReturnCheckedColumnNames = checkedColumnNames
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockWatcherRepository) CreateNotCheckedSQL(boolColumns []string) string {
|
|
||||||
repository.CreateNotCheckedSQLCalled = true
|
|
||||||
return repository.ReturnNotCheckedSQL
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockWatcherRepository) SetNotCheckedSQL(notCheckedSql string) {
|
|
||||||
repository.ReturnNotCheckedSQL = notCheckedSql
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockWatcherRepository) MissingHeaders(startingBlockNumber int64, endingBlockNumber int64, db *postgres.DB, notCheckedSQL string) ([]core.Header, error) {
|
|
||||||
if repository.MissingHeadersError != nil {
|
|
||||||
return []core.Header{}, repository.MissingHeadersError
|
|
||||||
}
|
|
||||||
repository.MissingHeadersCalled = true
|
|
||||||
return repository.ReturnMissingHeaders, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (repository *MockWatcherRepository) SetMissingHeaders(headers []core.Header) {
|
|
||||||
repository.ReturnMissingHeaders = headers
|
|
||||||
}
|
|
@ -16,128 +16,12 @@
|
|||||||
|
|
||||||
package repository
|
package repository
|
||||||
|
|
||||||
import (
|
import "github.com/jmoiron/sqlx"
|
||||||
"bytes"
|
|
||||||
"database/sql/driver"
|
|
||||||
"fmt"
|
|
||||||
"github.com/jmoiron/sqlx"
|
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
func MarkContractWatcherHeaderCheckedInTransaction(headerID int64, tx *sqlx.Tx, checkedHeadersColumn string) error {
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
|
||||||
)
|
|
||||||
|
|
||||||
func MarkHeaderChecked(headerID int64, db *postgres.DB, checkedHeadersColumn string) error {
|
|
||||||
_, err := db.Exec(`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`)
|
|
||||||
VALUES ($1, $2)
|
|
||||||
ON CONFLICT (header_id) DO
|
|
||||||
UPDATE SET `+checkedHeadersColumn+` = checked_headers.`+checkedHeadersColumn+` + 1`, headerID, 1)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func MarkHeaderCheckedInTransaction(headerID int64, tx *sqlx.Tx, checkedHeadersColumn string) error {
|
|
||||||
_, err := tx.Exec(`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`)
|
_, err := tx.Exec(`INSERT INTO public.checked_headers (header_id, `+checkedHeadersColumn+`)
|
||||||
VALUES ($1, $2)
|
VALUES ($1, $2)
|
||||||
ON CONFLICT (header_id) DO
|
ON CONFLICT (header_id) DO
|
||||||
UPDATE SET `+checkedHeadersColumn+` = checked_headers.`+checkedHeadersColumn+` + 1`, headerID, 1)
|
UPDATE SET `+checkedHeadersColumn+` = checked_headers.`+checkedHeadersColumn+` + 1`, headerID, 1)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Treats a header as missing if it's not in the headers table, or not checked for some log type
|
|
||||||
func MissingHeaders(startingBlockNumber, endingBlockNumber int64, db *postgres.DB, notCheckedSQL string) ([]core.Header, error) {
|
|
||||||
var result []core.Header
|
|
||||||
var query string
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if endingBlockNumber == -1 {
|
|
||||||
query = `SELECT headers.id, headers.block_number, headers.hash FROM headers
|
|
||||||
LEFT JOIN checked_headers on headers.id = header_id
|
|
||||||
WHERE (header_id ISNULL OR ` + notCheckedSQL + `)
|
|
||||||
AND headers.block_number >= $1
|
|
||||||
AND headers.eth_node_fingerprint = $2`
|
|
||||||
err = db.Select(&result, query, startingBlockNumber, db.Node.ID)
|
|
||||||
} else {
|
|
||||||
query = `SELECT headers.id, headers.block_number, headers.hash FROM headers
|
|
||||||
LEFT JOIN checked_headers on headers.id = header_id
|
|
||||||
WHERE (header_id ISNULL OR ` + notCheckedSQL + `)
|
|
||||||
AND headers.block_number >= $1
|
|
||||||
AND headers.block_number <= $2
|
|
||||||
AND headers.eth_node_fingerprint = $3`
|
|
||||||
err = db.Select(&result, query, startingBlockNumber, endingBlockNumber, db.Node.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetCheckedColumnNames(db *postgres.DB) ([]string, error) {
|
|
||||||
// Query returns `[]driver.Value`, nullable polymorphic interface
|
|
||||||
var queryResult []driver.Value
|
|
||||||
columnNamesQuery :=
|
|
||||||
`SELECT column_name FROM information_schema.columns
|
|
||||||
WHERE table_schema = 'public'
|
|
||||||
AND table_name = 'checked_headers'
|
|
||||||
AND column_name <> 'id'
|
|
||||||
AND column_name <> 'header_id';`
|
|
||||||
|
|
||||||
err := db.Select(&queryResult, columnNamesQuery)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transform column names from `driver.Value` to strings
|
|
||||||
var columnNames []string
|
|
||||||
for _, result := range queryResult {
|
|
||||||
if columnName, ok := result.(string); ok {
|
|
||||||
columnNames = append(columnNames, columnName)
|
|
||||||
} else {
|
|
||||||
return []string{}, fmt.Errorf("incorrect value for checked_headers column name")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return columnNames, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Builds a SQL string that checks if any column should be checked/rechecked.
|
|
||||||
// Defaults to FALSE when no columns are provided.
|
|
||||||
// Ex: ["columnA", "columnB"] => "NOT (columnA!=0 AND columnB!=0)"
|
|
||||||
// [] => "FALSE"
|
|
||||||
func CreateHeaderCheckedPredicateSQL(boolColumns []string, recheckHeaders constants.TransformerExecution) string {
|
|
||||||
if len(boolColumns) == 0 {
|
|
||||||
return "FALSE"
|
|
||||||
}
|
|
||||||
|
|
||||||
if recheckHeaders {
|
|
||||||
return createHeaderCheckedPredicateSQLForRecheckedHeaders(boolColumns)
|
|
||||||
} else {
|
|
||||||
return createHeaderCheckedPredicateSQLForMissingHeaders(boolColumns)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func createHeaderCheckedPredicateSQLForMissingHeaders(boolColumns []string) string {
|
|
||||||
var result bytes.Buffer
|
|
||||||
result.WriteString(" (")
|
|
||||||
|
|
||||||
// Loop excluding last column name
|
|
||||||
for _, column := range boolColumns[:len(boolColumns)-1] {
|
|
||||||
result.WriteString(fmt.Sprintf("%v=0 OR ", column))
|
|
||||||
}
|
|
||||||
|
|
||||||
result.WriteString(fmt.Sprintf("%v=0)", boolColumns[len(boolColumns)-1]))
|
|
||||||
|
|
||||||
return result.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func createHeaderCheckedPredicateSQLForRecheckedHeaders(boolColumns []string) string {
|
|
||||||
var result bytes.Buffer
|
|
||||||
result.WriteString(" (")
|
|
||||||
|
|
||||||
// Loop excluding last column name
|
|
||||||
for _, column := range boolColumns[:len(boolColumns)-1] {
|
|
||||||
result.WriteString(fmt.Sprintf("%v<%s OR ", column, constants.RecheckHeaderCap))
|
|
||||||
}
|
|
||||||
|
|
||||||
// No trailing "OR" for the last column name
|
|
||||||
result.WriteString(fmt.Sprintf("%v<%s)", boolColumns[len(boolColumns)-1], constants.RecheckHeaderCap))
|
|
||||||
|
|
||||||
return result.String()
|
|
||||||
}
|
|
||||||
|
@ -17,35 +17,25 @@
|
|||||||
package repository_test
|
package repository_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/repository"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
|
||||||
shared "github.com/vulcanize/vulcanizedb/libraries/shared/repository"
|
|
||||||
r2 "github.com/vulcanize/vulcanizedb/pkg/contract_watcher/header/repository"
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||||
"github.com/vulcanize/vulcanizedb/test_config"
|
"github.com/vulcanize/vulcanizedb/test_config"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Repository", func() {
|
var _ = Describe("", func() {
|
||||||
|
Describe("MarkContractWatcherHeaderCheckedInTransaction", func() {
|
||||||
var (
|
var (
|
||||||
checkedHeadersColumn string
|
checkedHeadersColumn string
|
||||||
db *postgres.DB
|
db *postgres.DB
|
||||||
)
|
)
|
||||||
|
|
||||||
Describe("MarkHeaderChecked", func() {
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||||
test_config.CleanTestDB(db)
|
test_config.CleanTestDB(db)
|
||||||
|
|
||||||
checkedHeadersColumn = "test_column_checked"
|
checkedHeadersColumn = "test_column_checked"
|
||||||
_, migrateErr := db.Exec(`ALTER TABLE public.checked_headers
|
_, migrateErr := db.Exec(`ALTER TABLE public.checked_headers
|
||||||
ADD COLUMN ` + checkedHeadersColumn + ` integer`)
|
ADD COLUMN ` + checkedHeadersColumn + ` integer`)
|
||||||
@ -57,247 +47,21 @@ var _ = Describe("Repository", func() {
|
|||||||
Expect(cleanupMigrateErr).NotTo(HaveOccurred())
|
Expect(cleanupMigrateErr).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
It("marks passed column as checked for passed header", func() {
|
It("marks passed header as checked within a passed transaction", func() {
|
||||||
headerRepository := repositories.NewHeaderRepository(db)
|
|
||||||
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
|
||||||
Expect(headerErr).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
err := shared.MarkHeaderChecked(headerID, db, checkedHeadersColumn)
|
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
var checkedCount int
|
|
||||||
fetchErr := db.Get(&checkedCount, `SELECT `+checkedHeadersColumn+` FROM public.checked_headers LIMIT 1`)
|
|
||||||
Expect(fetchErr).NotTo(HaveOccurred())
|
|
||||||
Expect(checkedCount).To(Equal(1))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Describe("MarkHeaderCheckedInTransaction", func() {
|
|
||||||
BeforeEach(func() {
|
|
||||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
|
||||||
test_config.CleanTestDB(db)
|
|
||||||
|
|
||||||
checkedHeadersColumn = "test_column_checked"
|
|
||||||
_, migrateErr := db.Exec(`ALTER TABLE public.checked_headers
|
|
||||||
ADD COLUMN ` + checkedHeadersColumn + ` integer`)
|
|
||||||
Expect(migrateErr).NotTo(HaveOccurred())
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
_, cleanupMigrateErr := db.Exec(`ALTER TABLE public.checked_headers DROP COLUMN ` + checkedHeadersColumn)
|
|
||||||
Expect(cleanupMigrateErr).NotTo(HaveOccurred())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("marks passed column as checked for passed header within a passed transaction", func() {
|
|
||||||
headerRepository := repositories.NewHeaderRepository(db)
|
headerRepository := repositories.NewHeaderRepository(db)
|
||||||
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
||||||
Expect(headerErr).NotTo(HaveOccurred())
|
Expect(headerErr).NotTo(HaveOccurred())
|
||||||
tx, txErr := db.Beginx()
|
tx, txErr := db.Beginx()
|
||||||
Expect(txErr).NotTo(HaveOccurred())
|
Expect(txErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
err := shared.MarkHeaderCheckedInTransaction(headerID, tx, checkedHeadersColumn)
|
err := repository.MarkContractWatcherHeaderCheckedInTransaction(headerID, tx, checkedHeadersColumn)
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
commitErr := tx.Commit()
|
commitErr := tx.Commit()
|
||||||
Expect(commitErr).NotTo(HaveOccurred())
|
Expect(commitErr).NotTo(HaveOccurred())
|
||||||
var checkedCount int
|
var checkedCount int
|
||||||
fetchErr := db.Get(&checkedCount, `SELECT `+checkedHeadersColumn+` FROM public.checked_headers LIMIT 1`)
|
fetchErr := db.Get(&checkedCount, `SELECT COUNT(*) FROM public.checked_headers WHERE header_id = $1`, headerID)
|
||||||
Expect(fetchErr).NotTo(HaveOccurred())
|
Expect(fetchErr).NotTo(HaveOccurred())
|
||||||
Expect(checkedCount).To(Equal(1))
|
Expect(checkedCount).To(Equal(1))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("MissingHeaders", func() {
|
|
||||||
var (
|
|
||||||
headerRepository datastore.HeaderRepository
|
|
||||||
startingBlockNumber int64
|
|
||||||
endingBlockNumber int64
|
|
||||||
eventSpecificBlockNumber int64
|
|
||||||
outOfRangeBlockNumber int64
|
|
||||||
blockNumbers []int64
|
|
||||||
headerIDs []int64
|
|
||||||
notCheckedSQL string
|
|
||||||
err error
|
|
||||||
hr r2.HeaderRepository
|
|
||||||
columnNames []string
|
|
||||||
)
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
|
||||||
test_config.CleanTestDB(db)
|
|
||||||
headerRepository = repositories.NewHeaderRepository(db)
|
|
||||||
hr = r2.NewHeaderRepository(db)
|
|
||||||
hr.AddCheckColumns(getExpectedColumnNames())
|
|
||||||
|
|
||||||
columnNames, err = shared.GetCheckedColumnNames(db)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
notCheckedSQL = shared.CreateHeaderCheckedPredicateSQL(columnNames, constants.HeaderMissing)
|
|
||||||
|
|
||||||
startingBlockNumber = rand.Int63()
|
|
||||||
eventSpecificBlockNumber = startingBlockNumber + 1
|
|
||||||
endingBlockNumber = startingBlockNumber + 2
|
|
||||||
outOfRangeBlockNumber = endingBlockNumber + 1
|
|
||||||
|
|
||||||
blockNumbers = []int64{startingBlockNumber, eventSpecificBlockNumber, endingBlockNumber, outOfRangeBlockNumber}
|
|
||||||
|
|
||||||
headerIDs = []int64{}
|
|
||||||
for _, n := range blockNumbers {
|
|
||||||
headerID, err := headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(n))
|
|
||||||
headerIDs = append(headerIDs, headerID)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
test_config.CleanCheckedHeadersTable(db, getExpectedColumnNames())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("only treats headers as checked if the event specific logs have been checked", func() {
|
|
||||||
//add a checked_header record, but don't mark it check for any of the columns
|
|
||||||
_, err = db.Exec(`INSERT INTO public.checked_headers (header_id) VALUES ($1)`, headerIDs[1])
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
headers, err := shared.MissingHeaders(startingBlockNumber, endingBlockNumber, db, notCheckedSQL)
|
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(len(headers)).To(Equal(3))
|
|
||||||
Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber)))
|
|
||||||
Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber)))
|
|
||||||
Expect(headers[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber)))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("only returns headers associated with the current node", func() {
|
|
||||||
dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
|
|
||||||
headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
|
|
||||||
for _, n := range blockNumbers {
|
|
||||||
_, err = headerRepositoryTwo.CreateOrUpdateHeader(fakes.GetFakeHeader(n + 10))
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
nodeOneMissingHeaders, err := shared.MissingHeaders(startingBlockNumber, endingBlockNumber, db, notCheckedSQL)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(len(nodeOneMissingHeaders)).To(Equal(3))
|
|
||||||
Expect(nodeOneMissingHeaders[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(eventSpecificBlockNumber), Equal(endingBlockNumber)))
|
|
||||||
Expect(nodeOneMissingHeaders[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(eventSpecificBlockNumber), Equal(endingBlockNumber)))
|
|
||||||
Expect(nodeOneMissingHeaders[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(startingBlockNumber), Equal(eventSpecificBlockNumber), Equal(endingBlockNumber)))
|
|
||||||
|
|
||||||
nodeTwoMissingHeaders, err := shared.MissingHeaders(startingBlockNumber, endingBlockNumber+10, dbTwo, notCheckedSQL)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(len(nodeTwoMissingHeaders)).To(Equal(3))
|
|
||||||
Expect(nodeTwoMissingHeaders[0].BlockNumber).To(Or(Equal(startingBlockNumber+10), Equal(eventSpecificBlockNumber+10), Equal(endingBlockNumber+10)))
|
|
||||||
Expect(nodeTwoMissingHeaders[1].BlockNumber).To(Or(Equal(startingBlockNumber+10), Equal(eventSpecificBlockNumber+10), Equal(endingBlockNumber+10)))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("handles an ending block of -1 ", func() {
|
|
||||||
endingBlock := int64(-1)
|
|
||||||
headers, err := shared.MissingHeaders(startingBlockNumber, endingBlock, db, notCheckedSQL)
|
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(len(headers)).To(Equal(4))
|
|
||||||
Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber), Equal(outOfRangeBlockNumber)))
|
|
||||||
Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber), Equal(outOfRangeBlockNumber)))
|
|
||||||
Expect(headers[2].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber), Equal(outOfRangeBlockNumber)))
|
|
||||||
Expect(headers[3].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber), Equal(eventSpecificBlockNumber), Equal(outOfRangeBlockNumber)))
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
It("when a the `notCheckedSQL` argument allows for rechecks it returns headers where the checked count is less than the maximum", func() {
|
|
||||||
columnName := columnNames[0]
|
|
||||||
recheckedSQL := shared.CreateHeaderCheckedPredicateSQL([]string{columnName}, constants.HeaderRecheck)
|
|
||||||
// mark every header checked at least once
|
|
||||||
// header 4 is marked the maximum number of times, it it is not longer checked
|
|
||||||
|
|
||||||
maxCheckCount, intConversionErr := strconv.Atoi(constants.RecheckHeaderCap)
|
|
||||||
Expect(intConversionErr).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
markHeaderOneErr := shared.MarkHeaderChecked(headerIDs[0], db, columnName)
|
|
||||||
Expect(markHeaderOneErr).NotTo(HaveOccurred())
|
|
||||||
markHeaderTwoErr := shared.MarkHeaderChecked(headerIDs[1], db, columnName)
|
|
||||||
Expect(markHeaderTwoErr).NotTo(HaveOccurred())
|
|
||||||
markHeaderThreeErr := shared.MarkHeaderChecked(headerIDs[2], db, columnName)
|
|
||||||
Expect(markHeaderThreeErr).NotTo(HaveOccurred())
|
|
||||||
for i := 0; i <= maxCheckCount; i++ {
|
|
||||||
markHeaderFourErr := shared.MarkHeaderChecked(headerIDs[3], db, columnName)
|
|
||||||
Expect(markHeaderFourErr).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
headers, err := shared.MissingHeaders(1, -1, db, recheckedSQL)
|
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(len(headers)).To(Equal(3))
|
|
||||||
Expect(headers[0].Id).To(Or(Equal(headerIDs[0]), Equal(headerIDs[1]), Equal(headerIDs[2])))
|
|
||||||
Expect(headers[1].Id).To(Or(Equal(headerIDs[0]), Equal(headerIDs[1]), Equal(headerIDs[2])))
|
|
||||||
Expect(headers[2].Id).To(Or(Equal(headerIDs[0]), Equal(headerIDs[1]), Equal(headerIDs[2])))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Describe("GetCheckedColumnNames", func() {
|
|
||||||
It("gets the column names from checked_headers", func() {
|
|
||||||
db := test_config.NewTestDB(test_config.NewTestNode())
|
|
||||||
hr := r2.NewHeaderRepository(db)
|
|
||||||
hr.AddCheckColumns(getExpectedColumnNames())
|
|
||||||
test_config.CleanTestDB(db)
|
|
||||||
expectedColumnNames := getExpectedColumnNames()
|
|
||||||
actualColumnNames, err := shared.GetCheckedColumnNames(db)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(actualColumnNames).To(Equal(expectedColumnNames))
|
|
||||||
test_config.CleanCheckedHeadersTable(db, getExpectedColumnNames())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Describe("CreateHeaderCheckedPredicateSQL", func() {
|
|
||||||
Describe("for headers that haven't been checked for logs", func() {
|
|
||||||
It("generates a correct SQL string for one column", func() {
|
|
||||||
columns := []string{"columnA"}
|
|
||||||
expected := " (columnA=0)"
|
|
||||||
actual := shared.CreateHeaderCheckedPredicateSQL(columns, constants.HeaderMissing)
|
|
||||||
Expect(actual).To(Equal(expected))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("generates a correct SQL string for several columns", func() {
|
|
||||||
columns := []string{"columnA", "columnB"}
|
|
||||||
expected := " (columnA=0 OR columnB=0)"
|
|
||||||
actual := shared.CreateHeaderCheckedPredicateSQL(columns, constants.HeaderMissing)
|
|
||||||
Expect(actual).To(Equal(expected))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("defaults to FALSE when there are no columns", func() {
|
|
||||||
expected := "FALSE"
|
|
||||||
actual := shared.CreateHeaderCheckedPredicateSQL([]string{}, constants.HeaderMissing)
|
|
||||||
Expect(actual).To(Equal(expected))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Describe("for headers that are being rechecked for logs", func() {
|
|
||||||
It("generates a correct SQL string for rechecking headers for one column", func() {
|
|
||||||
columns := []string{"columnA"}
|
|
||||||
expected := fmt.Sprintf(" (columnA<%s)", constants.RecheckHeaderCap)
|
|
||||||
actual := shared.CreateHeaderCheckedPredicateSQL(columns, constants.HeaderRecheck)
|
|
||||||
Expect(actual).To(Equal(expected))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("generates a correct SQL string for rechecking headers for several columns", func() {
|
|
||||||
columns := []string{"columnA", "columnB"}
|
|
||||||
expected := fmt.Sprintf(" (columnA<%s OR columnB<%s)", constants.RecheckHeaderCap, constants.RecheckHeaderCap)
|
|
||||||
actual := shared.CreateHeaderCheckedPredicateSQL(columns, constants.HeaderRecheck)
|
|
||||||
Expect(actual).To(Equal(expected))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("defaults to FALSE when there are no columns", func() {
|
|
||||||
expected := "FALSE"
|
|
||||||
actual := shared.CreateHeaderCheckedPredicateSQL([]string{}, constants.HeaderRecheck)
|
|
||||||
Expect(actual).To(Equal(expected))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
func getExpectedColumnNames() []string {
|
|
||||||
return []string{
|
|
||||||
"column_1",
|
|
||||||
"column_2",
|
|
||||||
"column_3",
|
|
||||||
"column_4",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -17,41 +17,54 @@
|
|||||||
package test_data
|
package test_data
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type GenericModel struct{}
|
type GenericModel struct{}
|
||||||
type GenericEntity struct{}
|
type GenericEntity struct{}
|
||||||
|
|
||||||
var startingBlockNumber = rand.Int63()
|
var startingBlockNumber = rand.Int63()
|
||||||
var topic = "0x" + randomString(64)
|
var topic0 = "0x" + randomString(64)
|
||||||
var address = "0x" + randomString(38)
|
|
||||||
|
|
||||||
var GenericTestLogs = []types.Log{{
|
var GenericTestLog = func() types.Log {
|
||||||
Address: common.HexToAddress(address),
|
return types.Log{
|
||||||
Topics: []common.Hash{common.HexToHash(topic)},
|
Address: fakeAddress(),
|
||||||
|
Topics: []common.Hash{common.HexToHash(topic0), fakeHash()},
|
||||||
|
Data: hexutil.MustDecode(fakeHash().Hex()),
|
||||||
BlockNumber: uint64(startingBlockNumber),
|
BlockNumber: uint64(startingBlockNumber),
|
||||||
}}
|
TxHash: fakeHash(),
|
||||||
|
TxIndex: uint(rand.Int31()),
|
||||||
|
BlockHash: fakeHash(),
|
||||||
|
Index: uint(rand.Int31()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var GenericTestConfig = transformer.EventTransformerConfig{
|
var GenericTestConfig = transformer.EventTransformerConfig{
|
||||||
TransformerName: "generic-test-transformer",
|
TransformerName: "generic-test-transformer",
|
||||||
ContractAddresses: []string{address},
|
ContractAddresses: []string{fakeAddress().Hex()},
|
||||||
ContractAbi: randomString(100),
|
ContractAbi: randomString(100),
|
||||||
Topic: topic,
|
Topic: topic0,
|
||||||
StartingBlockNumber: startingBlockNumber,
|
StartingBlockNumber: startingBlockNumber,
|
||||||
EndingBlockNumber: startingBlockNumber + 1,
|
EndingBlockNumber: startingBlockNumber + 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func fakeAddress() common.Address {
|
||||||
|
return common.HexToAddress("0x" + randomString(40))
|
||||||
|
}
|
||||||
|
|
||||||
|
func fakeHash() common.Hash {
|
||||||
|
return common.HexToHash("0x" + randomString(64))
|
||||||
|
}
|
||||||
|
|
||||||
func randomString(length int) string {
|
func randomString(length int) string {
|
||||||
var seededRand = rand.New(
|
var seededRand = rand.New(
|
||||||
rand.NewSource(time.Now().UnixNano()))
|
rand.NewSource(time.Now().UnixNano()))
|
||||||
charset := "abcdefghijklmnopqrstuvwxyz1234567890"
|
charset := "abcdef1234567890"
|
||||||
b := make([]byte, length)
|
b := make([]byte, length)
|
||||||
for i := range b {
|
for i := range b {
|
||||||
b[i] = charset[seededRand.Intn(len(charset))]
|
b[i] = charset[seededRand.Intn(len(charset))]
|
||||||
|
@ -18,14 +18,12 @@ package transformer
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
)
|
)
|
||||||
|
|
||||||
type EventTransformer interface {
|
type EventTransformer interface {
|
||||||
Execute(logs []types.Log, header core.Header) error
|
Execute(logs []core.HeaderSyncLog) error
|
||||||
GetConfig() EventTransformerConfig
|
GetConfig() EventTransformerConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,137 +17,110 @@
|
|||||||
package watcher
|
package watcher
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transactions"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/chunker"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/repository"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/logs"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transactions"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const NoNewDataPause = time.Second * 7
|
||||||
|
|
||||||
type EventWatcher struct {
|
type EventWatcher struct {
|
||||||
Transformers []transformer.EventTransformer
|
blockChain core.BlockChain
|
||||||
BlockChain core.BlockChain
|
db *postgres.DB
|
||||||
DB *postgres.DB
|
LogDelegator logs.ILogDelegator
|
||||||
Fetcher fetcher.ILogFetcher
|
LogExtractor logs.ILogExtractor
|
||||||
Chunker chunker.Chunker
|
|
||||||
Addresses []common.Address
|
|
||||||
Topics []common.Hash
|
|
||||||
StartingBlock *int64
|
|
||||||
Syncer transactions.ITransactionsSyncer
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEventWatcher(db *postgres.DB, bc core.BlockChain) EventWatcher {
|
func NewEventWatcher(db *postgres.DB, bc core.BlockChain) EventWatcher {
|
||||||
logChunker := chunker.NewLogChunker()
|
extractor := &logs.LogExtractor{
|
||||||
logFetcher := fetcher.NewLogFetcher(bc)
|
CheckedHeadersRepository: repositories.NewCheckedHeadersRepository(db),
|
||||||
transactionSyncer := transactions.NewTransactionsSyncer(db, bc)
|
CheckedLogsRepository: repositories.NewCheckedLogsRepository(db),
|
||||||
|
Fetcher: fetcher.NewLogFetcher(bc),
|
||||||
|
LogRepository: repositories.NewHeaderSyncLogRepository(db),
|
||||||
|
Syncer: transactions.NewTransactionsSyncer(db, bc),
|
||||||
|
}
|
||||||
|
logTransformer := &logs.LogDelegator{
|
||||||
|
Chunker: chunker.NewLogChunker(),
|
||||||
|
LogRepository: repositories.NewHeaderSyncLogRepository(db),
|
||||||
|
}
|
||||||
return EventWatcher{
|
return EventWatcher{
|
||||||
BlockChain: bc,
|
blockChain: bc,
|
||||||
DB: db,
|
db: db,
|
||||||
Fetcher: logFetcher,
|
LogExtractor: extractor,
|
||||||
Chunker: logChunker,
|
LogDelegator: logTransformer,
|
||||||
Syncer: transactionSyncer,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds transformers to the watcher and updates the chunker, so that it will consider the new transformers.
|
// Adds transformers to the watcher so that their logs will be extracted and delegated.
|
||||||
func (watcher *EventWatcher) AddTransformers(initializers []transformer.EventTransformerInitializer) {
|
func (watcher *EventWatcher) AddTransformers(initializers []transformer.EventTransformerInitializer) error {
|
||||||
var contractAddresses []common.Address
|
|
||||||
var topic0s []common.Hash
|
|
||||||
var configs []transformer.EventTransformerConfig
|
|
||||||
|
|
||||||
for _, initializer := range initializers {
|
for _, initializer := range initializers {
|
||||||
t := initializer(watcher.DB)
|
t := initializer(watcher.db)
|
||||||
watcher.Transformers = append(watcher.Transformers, t)
|
|
||||||
|
|
||||||
config := t.GetConfig()
|
watcher.LogDelegator.AddTransformer(t)
|
||||||
configs = append(configs, config)
|
err := watcher.LogExtractor.AddTransformerConfig(t.GetConfig())
|
||||||
|
|
||||||
if watcher.StartingBlock == nil {
|
|
||||||
watcher.StartingBlock = &config.StartingBlockNumber
|
|
||||||
} else if earlierStartingBlockNumber(config.StartingBlockNumber, *watcher.StartingBlock) {
|
|
||||||
watcher.StartingBlock = &config.StartingBlockNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
addresses := transformer.HexStringsToAddresses(config.ContractAddresses)
|
|
||||||
contractAddresses = append(contractAddresses, addresses...)
|
|
||||||
topic0s = append(topic0s, common.HexToHash(config.Topic))
|
|
||||||
}
|
|
||||||
|
|
||||||
watcher.Addresses = append(watcher.Addresses, contractAddresses...)
|
|
||||||
watcher.Topics = append(watcher.Topics, topic0s...)
|
|
||||||
watcher.Chunker.AddConfigs(configs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (watcher *EventWatcher) Execute(recheckHeaders constants.TransformerExecution) error {
|
|
||||||
if watcher.Transformers == nil {
|
|
||||||
return fmt.Errorf("No transformers added to watcher")
|
|
||||||
}
|
|
||||||
|
|
||||||
checkedColumnNames, err := repository.GetCheckedColumnNames(watcher.DB)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
notCheckedSQL := repository.CreateHeaderCheckedPredicateSQL(checkedColumnNames, recheckHeaders)
|
|
||||||
|
|
||||||
missingHeaders, err := repository.MissingHeaders(*watcher.StartingBlock, -1, watcher.DB, notCheckedSQL)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Error("Couldn't fetch missing headers in watcher: ", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, header := range missingHeaders {
|
|
||||||
// TODO Extend FetchLogs for doing several blocks at a time
|
|
||||||
logs, err := watcher.Fetcher.FetchLogs(watcher.Addresses, watcher.Topics, header)
|
|
||||||
if err != nil {
|
|
||||||
logrus.WithFields(logrus.Fields{
|
|
||||||
"headerId": header.Id,
|
|
||||||
"headerHash": header.Hash,
|
|
||||||
"blockNumber": header.BlockNumber,
|
|
||||||
}).Errorf("Couldn't fetch logs for header: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
transactionsSyncErr := watcher.Syncer.SyncTransactions(header.Id, logs)
|
|
||||||
if transactionsSyncErr != nil {
|
|
||||||
logrus.Errorf("error syncing transactions: %s", transactionsSyncErr.Error())
|
|
||||||
return transactionsSyncErr
|
|
||||||
}
|
|
||||||
|
|
||||||
transformErr := watcher.transformLogs(logs, header)
|
|
||||||
if transformErr != nil {
|
|
||||||
logrus.Error("Could not transform logs: ", transformErr)
|
|
||||||
return transformErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (watcher *EventWatcher) transformLogs(logs []types.Log, header core.Header) error {
|
|
||||||
chunkedLogs := watcher.Chunker.ChunkLogs(logs)
|
|
||||||
|
|
||||||
// Can't quit early and mark as checked if there are no logs. If we are running continuousLogSync,
|
|
||||||
// not all logs we're interested in might have been fetched.
|
|
||||||
for _, t := range watcher.Transformers {
|
|
||||||
transformerName := t.GetConfig().TransformerName
|
|
||||||
logChunk := chunkedLogs[transformerName]
|
|
||||||
err := t.Execute(logChunk, header)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("%v transformer failed to execute in watcher: %v", transformerName, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func earlierStartingBlockNumber(transformerBlock, watcherBlock int64) bool {
|
// Extracts and delegates watched log events.
|
||||||
return transformerBlock < watcherBlock
|
func (watcher *EventWatcher) Execute(recheckHeaders constants.TransformerExecution) error {
|
||||||
|
delegateErrsChan := make(chan error)
|
||||||
|
extractErrsChan := make(chan error)
|
||||||
|
defer close(delegateErrsChan)
|
||||||
|
defer close(extractErrsChan)
|
||||||
|
|
||||||
|
go watcher.extractLogs(recheckHeaders, extractErrsChan)
|
||||||
|
go watcher.delegateLogs(delegateErrsChan)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case delegateErr := <-delegateErrsChan:
|
||||||
|
logrus.Errorf("error delegating logs in event watcher: %s", delegateErr.Error())
|
||||||
|
return delegateErr
|
||||||
|
case extractErr := <-extractErrsChan:
|
||||||
|
logrus.Errorf("error extracting logs in event watcher: %s", extractErr.Error())
|
||||||
|
return extractErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (watcher *EventWatcher) extractLogs(recheckHeaders constants.TransformerExecution, errs chan error) {
|
||||||
|
err := watcher.LogExtractor.ExtractLogs(recheckHeaders)
|
||||||
|
if err != nil && err != logs.ErrNoUncheckedHeaders {
|
||||||
|
errs <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == logs.ErrNoUncheckedHeaders {
|
||||||
|
time.Sleep(NoNewDataPause)
|
||||||
|
watcher.extractLogs(recheckHeaders, errs)
|
||||||
|
} else {
|
||||||
|
watcher.extractLogs(recheckHeaders, errs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (watcher *EventWatcher) delegateLogs(errs chan error) {
|
||||||
|
err := watcher.LogDelegator.DelegateLogs()
|
||||||
|
if err != nil && err != logs.ErrNoLogs {
|
||||||
|
errs <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == logs.ErrNoLogs {
|
||||||
|
time.Sleep(NoNewDataPause)
|
||||||
|
watcher.delegateLogs(errs)
|
||||||
|
} else {
|
||||||
|
watcher.delegateLogs(errs)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,220 +18,161 @@ package watcher_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/constants"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/logs"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
|
||||||
"github.com/vulcanize/vulcanizedb/libraries/shared/watcher"
|
"github.com/vulcanize/vulcanizedb/libraries/shared/watcher"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||||
"github.com/vulcanize/vulcanizedb/test_config"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Watcher", func() {
|
var errExecuteClosed = errors.New("this error means the mocks were finished executing")
|
||||||
It("initialises correctly", func() {
|
|
||||||
db := test_config.NewTestDB(core.Node{ID: "testNode"})
|
|
||||||
bc := fakes.NewMockBlockChain()
|
|
||||||
|
|
||||||
w := watcher.NewEventWatcher(db, bc)
|
var _ = Describe("Event Watcher", func() {
|
||||||
|
|
||||||
Expect(w.DB).To(Equal(db))
|
|
||||||
Expect(w.Fetcher).NotTo(BeNil())
|
|
||||||
Expect(w.Chunker).NotTo(BeNil())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("adds transformers", func() {
|
|
||||||
w := watcher.NewEventWatcher(nil, nil)
|
|
||||||
fakeTransformer := &mocks.MockTransformer{}
|
|
||||||
fakeTransformer.SetTransformerConfig(mocks.FakeTransformerConfig)
|
|
||||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
|
||||||
|
|
||||||
Expect(len(w.Transformers)).To(Equal(1))
|
|
||||||
Expect(w.Transformers).To(ConsistOf(fakeTransformer))
|
|
||||||
Expect(w.Topics).To(Equal([]common.Hash{common.HexToHash("FakeTopic")}))
|
|
||||||
Expect(w.Addresses).To(Equal([]common.Address{common.HexToAddress("FakeAddress")}))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("adds transformers from multiple sources", func() {
|
|
||||||
w := watcher.NewEventWatcher(nil, nil)
|
|
||||||
fakeTransformer1 := &mocks.MockTransformer{}
|
|
||||||
fakeTransformer1.SetTransformerConfig(mocks.FakeTransformerConfig)
|
|
||||||
|
|
||||||
fakeTransformer2 := &mocks.MockTransformer{}
|
|
||||||
fakeTransformer2.SetTransformerConfig(mocks.FakeTransformerConfig)
|
|
||||||
|
|
||||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer1.FakeTransformerInitializer})
|
|
||||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer2.FakeTransformerInitializer})
|
|
||||||
|
|
||||||
Expect(len(w.Transformers)).To(Equal(2))
|
|
||||||
Expect(w.Topics).To(Equal([]common.Hash{common.HexToHash("FakeTopic"),
|
|
||||||
common.HexToHash("FakeTopic")}))
|
|
||||||
Expect(w.Addresses).To(Equal([]common.Address{common.HexToAddress("FakeAddress"),
|
|
||||||
common.HexToAddress("FakeAddress")}))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("calculates earliest starting block number", func() {
|
|
||||||
fakeTransformer1 := &mocks.MockTransformer{}
|
|
||||||
fakeTransformer1.SetTransformerConfig(transformer.EventTransformerConfig{StartingBlockNumber: 5})
|
|
||||||
|
|
||||||
fakeTransformer2 := &mocks.MockTransformer{}
|
|
||||||
fakeTransformer2.SetTransformerConfig(transformer.EventTransformerConfig{StartingBlockNumber: 3})
|
|
||||||
|
|
||||||
w := watcher.NewEventWatcher(nil, nil)
|
|
||||||
w.AddTransformers([]transformer.EventTransformerInitializer{
|
|
||||||
fakeTransformer1.FakeTransformerInitializer,
|
|
||||||
fakeTransformer2.FakeTransformerInitializer,
|
|
||||||
})
|
|
||||||
|
|
||||||
Expect(*w.StartingBlock).To(Equal(int64(3)))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("returns an error when run without transformers", func() {
|
|
||||||
w := watcher.NewEventWatcher(nil, nil)
|
|
||||||
err := w.Execute(constants.HeaderMissing)
|
|
||||||
Expect(err).To(MatchError("No transformers added to watcher"))
|
|
||||||
})
|
|
||||||
|
|
||||||
Describe("with missing headers", func() {
|
|
||||||
var (
|
var (
|
||||||
db *postgres.DB
|
delegator *mocks.MockLogDelegator
|
||||||
w watcher.EventWatcher
|
extractor *mocks.MockLogExtractor
|
||||||
mockBlockChain fakes.MockBlockChain
|
eventWatcher *watcher.EventWatcher
|
||||||
headerRepository repositories.HeaderRepository
|
|
||||||
repository mocks.MockWatcherRepository
|
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
db = test_config.NewTestDB(test_config.NewTestNode())
|
delegator = &mocks.MockLogDelegator{}
|
||||||
test_config.CleanTestDB(db)
|
extractor = &mocks.MockLogExtractor{}
|
||||||
mockBlockChain = fakes.MockBlockChain{}
|
eventWatcher = &watcher.EventWatcher{
|
||||||
headerRepository = repositories.NewHeaderRepository(db)
|
LogDelegator: delegator,
|
||||||
_, err := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
LogExtractor: extractor,
|
||||||
Expect(err).NotTo(HaveOccurred())
|
}
|
||||||
|
|
||||||
repository = mocks.MockWatcherRepository{}
|
|
||||||
w = watcher.NewEventWatcher(db, &mockBlockChain)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("syncs transactions for fetched logs", func() {
|
Describe("AddTransformers", func() {
|
||||||
fakeTransformer := &mocks.MockTransformer{}
|
var (
|
||||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
fakeTransformerOne, fakeTransformerTwo *mocks.MockEventTransformer
|
||||||
repository.SetMissingHeaders([]core.Header{fakes.FakeHeader})
|
)
|
||||||
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
|
|
||||||
w.Syncer = mockTransactionSyncer
|
|
||||||
|
|
||||||
err := w.Execute(constants.HeaderMissing)
|
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(mockTransactionSyncer.SyncTransactionsCalled).To(BeTrue())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("returns error if syncing transactions fails", func() {
|
|
||||||
fakeTransformer := &mocks.MockTransformer{}
|
|
||||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
|
||||||
repository.SetMissingHeaders([]core.Header{fakes.FakeHeader})
|
|
||||||
mockTransactionSyncer := &fakes.MockTransactionSyncer{}
|
|
||||||
mockTransactionSyncer.SyncTransactionsError = fakes.FakeError
|
|
||||||
w.Syncer = mockTransactionSyncer
|
|
||||||
|
|
||||||
err := w.Execute(constants.HeaderMissing)
|
|
||||||
|
|
||||||
Expect(err).To(HaveOccurred())
|
|
||||||
Expect(err).To(MatchError(fakes.FakeError))
|
|
||||||
})
|
|
||||||
|
|
||||||
It("executes each transformer", func() {
|
|
||||||
fakeTransformer := &mocks.MockTransformer{}
|
|
||||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
|
||||||
repository.SetMissingHeaders([]core.Header{fakes.FakeHeader})
|
|
||||||
|
|
||||||
err := w.Execute(constants.HeaderMissing)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(fakeTransformer.ExecuteWasCalled).To(BeTrue())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("returns an error if transformer returns an error", func() {
|
|
||||||
fakeTransformer := &mocks.MockTransformer{ExecuteError: errors.New("Something bad happened")}
|
|
||||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
|
||||||
repository.SetMissingHeaders([]core.Header{fakes.FakeHeader})
|
|
||||||
|
|
||||||
err := w.Execute(constants.HeaderMissing)
|
|
||||||
Expect(err).To(HaveOccurred())
|
|
||||||
Expect(fakeTransformer.ExecuteWasCalled).To(BeFalse())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("passes only relevant logs to each transformer", func() {
|
|
||||||
transformerA := &mocks.MockTransformer{}
|
|
||||||
transformerB := &mocks.MockTransformer{}
|
|
||||||
|
|
||||||
configA := transformer.EventTransformerConfig{TransformerName: "transformerA",
|
|
||||||
ContractAddresses: []string{"0x000000000000000000000000000000000000000A"},
|
|
||||||
Topic: "0xA"}
|
|
||||||
configB := transformer.EventTransformerConfig{TransformerName: "transformerB",
|
|
||||||
ContractAddresses: []string{"0x000000000000000000000000000000000000000b"},
|
|
||||||
Topic: "0xB"}
|
|
||||||
|
|
||||||
transformerA.SetTransformerConfig(configA)
|
|
||||||
transformerB.SetTransformerConfig(configB)
|
|
||||||
|
|
||||||
logA := types.Log{Address: common.HexToAddress("0xA"),
|
|
||||||
Topics: []common.Hash{common.HexToHash("0xA")}}
|
|
||||||
logB := types.Log{Address: common.HexToAddress("0xB"),
|
|
||||||
Topics: []common.Hash{common.HexToHash("0xB")}}
|
|
||||||
mockBlockChain.SetGetEthLogsWithCustomQueryReturnLogs([]types.Log{logA, logB})
|
|
||||||
|
|
||||||
repository.SetMissingHeaders([]core.Header{fakes.FakeHeader})
|
|
||||||
w = watcher.NewEventWatcher(db, &mockBlockChain)
|
|
||||||
w.AddTransformers([]transformer.EventTransformerInitializer{
|
|
||||||
transformerA.FakeTransformerInitializer, transformerB.FakeTransformerInitializer})
|
|
||||||
|
|
||||||
err := w.Execute(constants.HeaderMissing)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(transformerA.PassedLogs).To(Equal([]types.Log{logA}))
|
|
||||||
Expect(transformerB.PassedLogs).To(Equal([]types.Log{logB}))
|
|
||||||
})
|
|
||||||
|
|
||||||
Describe("uses the LogFetcher correctly:", func() {
|
|
||||||
var fakeTransformer mocks.MockTransformer
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
repository.SetMissingHeaders([]core.Header{fakes.FakeHeader})
|
fakeTransformerOne = &mocks.MockEventTransformer{}
|
||||||
fakeTransformer = mocks.MockTransformer{}
|
fakeTransformerOne.SetTransformerConfig(mocks.FakeTransformerConfig)
|
||||||
})
|
fakeTransformerTwo = &mocks.MockEventTransformer{}
|
||||||
|
fakeTransformerTwo.SetTransformerConfig(mocks.FakeTransformerConfig)
|
||||||
|
initializers := []transformer.EventTransformerInitializer{
|
||||||
|
fakeTransformerOne.FakeTransformerInitializer,
|
||||||
|
fakeTransformerTwo.FakeTransformerInitializer,
|
||||||
|
}
|
||||||
|
|
||||||
It("fetches logs for added transformers", func() {
|
err := eventWatcher.AddTransformers(initializers)
|
||||||
addresses := []string{"0xA", "0xB"}
|
|
||||||
topic := "0x1"
|
|
||||||
fakeTransformer.SetTransformerConfig(transformer.EventTransformerConfig{
|
|
||||||
Topic: topic, ContractAddresses: addresses})
|
|
||||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
|
||||||
|
|
||||||
err := w.Execute(constants.HeaderMissing)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
fakeHash := common.HexToHash(fakes.FakeHeader.Hash)
|
It("adds initialized transformer to log delegator", func() {
|
||||||
mockBlockChain.AssertGetEthLogsWithCustomQueryCalledWith(ethereum.FilterQuery{
|
expectedTransformers := []transformer.EventTransformer{
|
||||||
BlockHash: &fakeHash,
|
fakeTransformerOne,
|
||||||
Addresses: transformer.HexStringsToAddresses(addresses),
|
fakeTransformerTwo,
|
||||||
Topics: [][]common.Hash{{common.HexToHash(topic)}},
|
}
|
||||||
|
Expect(delegator.AddedTransformers).To(Equal(expectedTransformers))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("adds transformer config to log extractor", func() {
|
||||||
|
expectedConfigs := []transformer.EventTransformerConfig{
|
||||||
|
mocks.FakeTransformerConfig,
|
||||||
|
mocks.FakeTransformerConfig,
|
||||||
|
}
|
||||||
|
Expect(extractor.AddedConfigs).To(Equal(expectedConfigs))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
It("propagates log fetcher errors", func() {
|
Describe("Execute", func() {
|
||||||
fetcherError := errors.New("FetcherError")
|
|
||||||
mockBlockChain.SetGetEthLogsWithCustomQueryErr(fetcherError)
|
|
||||||
|
|
||||||
w.AddTransformers([]transformer.EventTransformerInitializer{fakeTransformer.FakeTransformerInitializer})
|
It("extracts watched logs", func(done Done) {
|
||||||
err := w.Execute(constants.HeaderMissing)
|
delegator.DelegateErrors = []error{logs.ErrNoLogs}
|
||||||
Expect(err).To(MatchError(fetcherError))
|
extractor.ExtractLogsErrors = []error{nil, errExecuteClosed}
|
||||||
|
|
||||||
|
err := eventWatcher.Execute(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(MatchError(errExecuteClosed))
|
||||||
|
Eventually(func() bool {
|
||||||
|
return extractor.ExtractLogsCount > 0
|
||||||
|
}).Should(BeTrue())
|
||||||
|
close(done)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("returns error if extracting logs fails", func(done Done) {
|
||||||
|
delegator.DelegateErrors = []error{logs.ErrNoLogs}
|
||||||
|
extractor.ExtractLogsErrors = []error{fakes.FakeError}
|
||||||
|
|
||||||
|
err := eventWatcher.Execute(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
close(done)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("extracts watched logs again if missing headers found", func(done Done) {
|
||||||
|
delegator.DelegateErrors = []error{logs.ErrNoLogs}
|
||||||
|
extractor.ExtractLogsErrors = []error{nil, errExecuteClosed}
|
||||||
|
|
||||||
|
err := eventWatcher.Execute(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(MatchError(errExecuteClosed))
|
||||||
|
Eventually(func() bool {
|
||||||
|
return extractor.ExtractLogsCount > 1
|
||||||
|
}).Should(BeTrue())
|
||||||
|
close(done)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if extracting logs fails on subsequent run", func(done Done) {
|
||||||
|
delegator.DelegateErrors = []error{logs.ErrNoLogs}
|
||||||
|
extractor.ExtractLogsErrors = []error{nil, fakes.FakeError}
|
||||||
|
|
||||||
|
err := eventWatcher.Execute(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
close(done)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("delegates untransformed logs", func() {
|
||||||
|
delegator.DelegateErrors = []error{nil, errExecuteClosed}
|
||||||
|
extractor.ExtractLogsErrors = []error{logs.ErrNoUncheckedHeaders}
|
||||||
|
|
||||||
|
err := eventWatcher.Execute(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(MatchError(errExecuteClosed))
|
||||||
|
Eventually(func() bool {
|
||||||
|
return delegator.DelegateCallCount > 0
|
||||||
|
}).Should(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if delegating logs fails", func(done Done) {
|
||||||
|
delegator.DelegateErrors = []error{fakes.FakeError}
|
||||||
|
extractor.ExtractLogsErrors = []error{logs.ErrNoUncheckedHeaders}
|
||||||
|
|
||||||
|
err := eventWatcher.Execute(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
close(done)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("delegates logs again if untransformed logs found", func(done Done) {
|
||||||
|
delegator.DelegateErrors = []error{nil, nil, nil, errExecuteClosed}
|
||||||
|
extractor.ExtractLogsErrors = []error{logs.ErrNoUncheckedHeaders}
|
||||||
|
|
||||||
|
err := eventWatcher.Execute(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(MatchError(errExecuteClosed))
|
||||||
|
Eventually(func() bool {
|
||||||
|
return delegator.DelegateCallCount > 1
|
||||||
|
}).Should(BeTrue())
|
||||||
|
close(done)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns error if delegating logs fails on subsequent run", func(done Done) {
|
||||||
|
delegator.DelegateErrors = []error{nil, fakes.FakeError}
|
||||||
|
extractor.ExtractLogsErrors = []error{logs.ErrNoUncheckedHeaders}
|
||||||
|
|
||||||
|
err := eventWatcher.Execute(constants.HeaderUnchecked)
|
||||||
|
|
||||||
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
close(done)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -81,7 +81,7 @@ func (r *blockRetriever) retrieveFirstBlockFromLogs(contractAddr string) (int64,
|
|||||||
var firstBlock int
|
var firstBlock int
|
||||||
err := r.db.Get(
|
err := r.db.Get(
|
||||||
&firstBlock,
|
&firstBlock,
|
||||||
"SELECT block_number FROM logs WHERE lower(address) = $1 ORDER BY block_number ASC LIMIT 1",
|
"SELECT block_number FROM full_sync_logs WHERE lower(address) = $1 ORDER BY block_number ASC LIMIT 1",
|
||||||
contractAddr,
|
contractAddr,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ var _ = Describe("Block Retriever", func() {
|
|||||||
Receipt: core.Receipt{
|
Receipt: core.Receipt{
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
||||||
ContractAddress: constants.TusdContractAddress,
|
ContractAddress: constants.TusdContractAddress,
|
||||||
Logs: []core.Log{},
|
Logs: []core.FullSyncLog{},
|
||||||
},
|
},
|
||||||
TxIndex: 0,
|
TxIndex: 0,
|
||||||
Value: "0",
|
Value: "0",
|
||||||
@ -92,7 +92,7 @@ var _ = Describe("Block Retriever", func() {
|
|||||||
Receipt: core.Receipt{
|
Receipt: core.Receipt{
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
||||||
ContractAddress: constants.TusdContractAddress,
|
ContractAddress: constants.TusdContractAddress,
|
||||||
Logs: []core.Log{{
|
Logs: []core.FullSyncLog{{
|
||||||
BlockNumber: 3,
|
BlockNumber: 3,
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
||||||
Address: constants.TusdContractAddress,
|
Address: constants.TusdContractAddress,
|
||||||
@ -136,7 +136,7 @@ var _ = Describe("Block Retriever", func() {
|
|||||||
Receipt: core.Receipt{
|
Receipt: core.Receipt{
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
||||||
ContractAddress: "",
|
ContractAddress: "",
|
||||||
Logs: []core.Log{{
|
Logs: []core.FullSyncLog{{
|
||||||
BlockNumber: 2,
|
BlockNumber: 2,
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
||||||
Address: constants.DaiContractAddress,
|
Address: constants.DaiContractAddress,
|
||||||
@ -167,7 +167,7 @@ var _ = Describe("Block Retriever", func() {
|
|||||||
Receipt: core.Receipt{
|
Receipt: core.Receipt{
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
||||||
ContractAddress: "",
|
ContractAddress: "",
|
||||||
Logs: []core.Log{{
|
Logs: []core.FullSyncLog{{
|
||||||
BlockNumber: 3,
|
BlockNumber: 3,
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad234hfs",
|
||||||
Address: constants.DaiContractAddress,
|
Address: constants.DaiContractAddress,
|
||||||
|
@ -39,7 +39,7 @@ import (
|
|||||||
|
|
||||||
type TransferLog struct {
|
type TransferLog struct {
|
||||||
Id int64 `db:"id"`
|
Id int64 `db:"id"`
|
||||||
VulvanizeLogId int64 `db:"vulcanize_log_id"`
|
VulcanizeLogId int64 `db:"vulcanize_log_id"`
|
||||||
TokenName string `db:"token_name"`
|
TokenName string `db:"token_name"`
|
||||||
Block int64 `db:"block"`
|
Block int64 `db:"block"`
|
||||||
Tx string `db:"tx"`
|
Tx string `db:"tx"`
|
||||||
@ -50,7 +50,7 @@ type TransferLog struct {
|
|||||||
|
|
||||||
type NewOwnerLog struct {
|
type NewOwnerLog struct {
|
||||||
Id int64 `db:"id"`
|
Id int64 `db:"id"`
|
||||||
VulvanizeLogId int64 `db:"vulcanize_log_id"`
|
VulcanizeLogId int64 `db:"vulcanize_log_id"`
|
||||||
TokenName string `db:"token_name"`
|
TokenName string `db:"token_name"`
|
||||||
Block int64 `db:"block"`
|
Block int64 `db:"block"`
|
||||||
Tx string `db:"tx"`
|
Tx string `db:"tx"`
|
||||||
@ -138,18 +138,18 @@ func SetupTusdRepo(vulcanizeLogId *int64, wantedEvents, wantedMethods []string)
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
receiptRepository := repositories.FullSyncReceiptRepository{DB: db}
|
receiptRepository := repositories.FullSyncReceiptRepository{DB: db}
|
||||||
logRepository := repositories.LogRepository{DB: db}
|
logRepository := repositories.FullSyncLogRepository{DB: db}
|
||||||
blockRepository := *repositories.NewBlockRepository(db)
|
blockRepository := *repositories.NewBlockRepository(db)
|
||||||
|
|
||||||
blockNumber := rand.Int63()
|
blockNumber := rand.Int63()
|
||||||
blockId := CreateBlock(blockNumber, blockRepository)
|
blockId := CreateBlock(blockNumber, blockRepository)
|
||||||
|
|
||||||
receipts := []core.Receipt{{Logs: []core.Log{{}}}}
|
receipts := []core.Receipt{{Logs: []core.FullSyncLog{{}}}}
|
||||||
|
|
||||||
err = receiptRepository.CreateReceiptsAndLogs(blockId, receipts)
|
err = receiptRepository.CreateReceiptsAndLogs(blockId, receipts)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
err = logRepository.Get(vulcanizeLogId, `SELECT id FROM logs`)
|
err = logRepository.Get(vulcanizeLogId, `SELECT id FROM full_sync_logs`)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
info := SetupTusdContract(wantedEvents, wantedMethods)
|
info := SetupTusdContract(wantedEvents, wantedMethods)
|
||||||
@ -184,18 +184,18 @@ func SetupENSRepo(vulcanizeLogId *int64, wantedEvents, wantedMethods []string) (
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
receiptRepository := repositories.FullSyncReceiptRepository{DB: db}
|
receiptRepository := repositories.FullSyncReceiptRepository{DB: db}
|
||||||
logRepository := repositories.LogRepository{DB: db}
|
logRepository := repositories.FullSyncLogRepository{DB: db}
|
||||||
blockRepository := *repositories.NewBlockRepository(db)
|
blockRepository := *repositories.NewBlockRepository(db)
|
||||||
|
|
||||||
blockNumber := rand.Int63()
|
blockNumber := rand.Int63()
|
||||||
blockId := CreateBlock(blockNumber, blockRepository)
|
blockId := CreateBlock(blockNumber, blockRepository)
|
||||||
|
|
||||||
receipts := []core.Receipt{{Logs: []core.Log{{}}}}
|
receipts := []core.Receipt{{Logs: []core.FullSyncLog{{}}}}
|
||||||
|
|
||||||
err = receiptRepository.CreateReceiptsAndLogs(blockId, receipts)
|
err = receiptRepository.CreateReceiptsAndLogs(blockId, receipts)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
err = logRepository.Get(vulcanizeLogId, `SELECT id FROM logs`)
|
err = logRepository.Get(vulcanizeLogId, `SELECT id FROM full_sync_logs`)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
info := SetupENSContract(wantedEvents, wantedMethods)
|
info := SetupENSContract(wantedEvents, wantedMethods)
|
||||||
@ -221,6 +221,7 @@ func SetupENSContract(wantedEvents, wantedMethods []string) *contract.Contract {
|
|||||||
}.Init()
|
}.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: tear down/setup DB from migrations so this doesn't alter the schema between tests
|
||||||
func TearDown(db *postgres.DB) {
|
func TearDown(db *postgres.DB) {
|
||||||
tx, err := db.Beginx()
|
tx, err := db.Beginx()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -234,7 +235,7 @@ func TearDown(db *postgres.DB) {
|
|||||||
_, err = tx.Exec(`DELETE FROM headers`)
|
_, err = tx.Exec(`DELETE FROM headers`)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
_, err = tx.Exec(`DELETE FROM logs`)
|
_, err = tx.Exec(`DELETE FROM full_sync_logs`)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
_, err = tx.Exec(`DELETE FROM log_filters`)
|
_, err = tx.Exec(`DELETE FROM log_filters`)
|
||||||
@ -255,7 +256,10 @@ func TearDown(db *postgres.DB) {
|
|||||||
_, err = tx.Exec(`DROP TABLE checked_headers`)
|
_, err = tx.Exec(`DROP TABLE checked_headers`)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
_, err = tx.Exec(`CREATE TABLE checked_headers (id SERIAL PRIMARY KEY, header_id INTEGER UNIQUE NOT NULL REFERENCES headers (id) ON DELETE CASCADE);`)
|
_, err = tx.Exec(`CREATE TABLE checked_headers (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
header_id INTEGER UNIQUE NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
|
||||||
|
check_count INTEGER NOT NULL DEFAULT 1);`)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
_, err = tx.Exec(`DROP SCHEMA IF EXISTS full_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e CASCADE`)
|
_, err = tx.Exec(`DROP SCHEMA IF EXISTS full_0x8dd5fbce2f6a956c3022ba3663759011dd51e73e CASCADE`)
|
||||||
|
@ -41,7 +41,7 @@ var TransferBlock1 = core.Block{
|
|||||||
Receipt: core.Receipt{
|
Receipt: core.Receipt{
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654aaa",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654aaa",
|
||||||
ContractAddress: "",
|
ContractAddress: "",
|
||||||
Logs: []core.Log{{
|
Logs: []core.FullSyncLog{{
|
||||||
BlockNumber: 6194633,
|
BlockNumber: 6194633,
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654aaa",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654aaa",
|
||||||
Address: constants.TusdContractAddress,
|
Address: constants.TusdContractAddress,
|
||||||
@ -71,7 +71,7 @@ var TransferBlock2 = core.Block{
|
|||||||
Receipt: core.Receipt{
|
Receipt: core.Receipt{
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654eee",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654eee",
|
||||||
ContractAddress: "",
|
ContractAddress: "",
|
||||||
Logs: []core.Log{{
|
Logs: []core.FullSyncLog{{
|
||||||
BlockNumber: 6194634,
|
BlockNumber: 6194634,
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654eee",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654eee",
|
||||||
Address: constants.TusdContractAddress,
|
Address: constants.TusdContractAddress,
|
||||||
@ -101,7 +101,7 @@ var NewOwnerBlock1 = core.Block{
|
|||||||
Receipt: core.Receipt{
|
Receipt: core.Receipt{
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654bbb",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654bbb",
|
||||||
ContractAddress: "",
|
ContractAddress: "",
|
||||||
Logs: []core.Log{{
|
Logs: []core.FullSyncLog{{
|
||||||
BlockNumber: 6194635,
|
BlockNumber: 6194635,
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654bbb",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654bbb",
|
||||||
Address: constants.EnsContractAddress,
|
Address: constants.EnsContractAddress,
|
||||||
@ -131,7 +131,7 @@ var NewOwnerBlock2 = core.Block{
|
|||||||
Receipt: core.Receipt{
|
Receipt: core.Receipt{
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654lll",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654lll",
|
||||||
ContractAddress: "",
|
ContractAddress: "",
|
||||||
Logs: []core.Log{{
|
Logs: []core.FullSyncLog{{
|
||||||
BlockNumber: 6194636,
|
BlockNumber: 6194636,
|
||||||
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654lll",
|
TxHash: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad654lll",
|
||||||
Address: constants.EnsContractAddress,
|
Address: constants.EnsContractAddress,
|
||||||
|
@ -140,7 +140,7 @@ func (r *eventRepository) persistHeaderSyncLogs(logs []types.Log, eventInfo type
|
|||||||
|
|
||||||
// Mark header as checked for this eventId
|
// Mark header as checked for this eventId
|
||||||
eventId := strings.ToLower(eventInfo.Name + "_" + contractAddr)
|
eventId := strings.ToLower(eventInfo.Name + "_" + contractAddr)
|
||||||
err = repository.MarkHeaderCheckedInTransaction(logs[0].Id, tx, eventId) // This assumes all logs are from same block
|
err = repository.MarkContractWatcherHeaderCheckedInTransaction(logs[0].Id, tx, eventId) // This assumes all logs are from same block
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tx.Rollback()
|
tx.Rollback()
|
||||||
return err
|
return err
|
||||||
@ -231,7 +231,7 @@ func (r *eventRepository) newEventTable(tableID string, event types.Event) error
|
|||||||
for _, field := range event.Fields {
|
for _, field := range event.Fields {
|
||||||
pgStr = pgStr + fmt.Sprintf(" %s_ %s NOT NULL,", strings.ToLower(field.Name), field.PgType)
|
pgStr = pgStr + fmt.Sprintf(" %s_ %s NOT NULL,", strings.ToLower(field.Name), field.PgType)
|
||||||
}
|
}
|
||||||
pgStr = pgStr + " CONSTRAINT log_index_fk FOREIGN KEY (vulcanize_log_id) REFERENCES logs (id) ON DELETE CASCADE)"
|
pgStr = pgStr + " CONSTRAINT log_index_fk FOREIGN KEY (vulcanize_log_id) REFERENCES full_sync_logs (id) ON DELETE CASCADE)"
|
||||||
case types.HeaderSync:
|
case types.HeaderSync:
|
||||||
pgStr = pgStr + "(id SERIAL, header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE, token_name CHARACTER VARYING(66) NOT NULL, raw_log JSONB, log_idx INTEGER NOT NULL, tx_idx INTEGER NOT NULL,"
|
pgStr = pgStr + "(id SERIAL, header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE, token_name CHARACTER VARYING(66) NOT NULL, raw_log JSONB, log_idx INTEGER NOT NULL, tx_idx INTEGER NOT NULL,"
|
||||||
|
|
||||||
|
@ -158,7 +158,7 @@ var _ = Describe("Repository", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
expectedLog := test_helpers.TransferLog{
|
expectedLog := test_helpers.TransferLog{
|
||||||
Id: 1,
|
Id: 1,
|
||||||
VulvanizeLogId: vulcanizeLogId,
|
VulcanizeLogId: vulcanizeLogId,
|
||||||
TokenName: "TrueUSD",
|
TokenName: "TrueUSD",
|
||||||
Block: 5488076,
|
Block: 5488076,
|
||||||
Tx: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
Tx: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
||||||
@ -180,7 +180,7 @@ var _ = Describe("Repository", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
expectedLog := test_helpers.TransferLog{
|
expectedLog := test_helpers.TransferLog{
|
||||||
Id: 1,
|
Id: 1,
|
||||||
VulvanizeLogId: vulcanizeLogId,
|
VulcanizeLogId: vulcanizeLogId,
|
||||||
TokenName: "TrueUSD",
|
TokenName: "TrueUSD",
|
||||||
Block: 5488076,
|
Block: 5488076,
|
||||||
Tx: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
Tx: "0x135391a0962a63944e5908e6fedfff90fb4be3e3290a21017861099bad6546ae",
|
||||||
|
@ -31,7 +31,7 @@ type BlockChain interface {
|
|||||||
GetEthLogsWithCustomQuery(query ethereum.FilterQuery) ([]types.Log, error)
|
GetEthLogsWithCustomQuery(query ethereum.FilterQuery) ([]types.Log, error)
|
||||||
GetHeaderByNumber(blockNumber int64) (Header, error)
|
GetHeaderByNumber(blockNumber int64) (Header, error)
|
||||||
GetHeadersByNumbers(blockNumbers []int64) ([]Header, error)
|
GetHeadersByNumbers(blockNumbers []int64) ([]Header, error)
|
||||||
GetLogs(contract Contract, startingBlockNumber *big.Int, endingBlockNumber *big.Int) ([]Log, error)
|
GetFullSyncLogs(contract Contract, startingBlockNumber *big.Int, endingBlockNumber *big.Int) ([]FullSyncLog, error)
|
||||||
GetTransactions(transactionHashes []common.Hash) ([]TransactionModel, error)
|
GetTransactions(transactionHashes []common.Hash) ([]TransactionModel, error)
|
||||||
LastBlock() (*big.Int, error)
|
LastBlock() (*big.Int, error)
|
||||||
Node() Node
|
Node() Node
|
||||||
|
@ -16,7 +16,9 @@
|
|||||||
|
|
||||||
package core
|
package core
|
||||||
|
|
||||||
type Log struct {
|
import "github.com/ethereum/go-ethereum/core/types"
|
||||||
|
|
||||||
|
type FullSyncLog struct {
|
||||||
BlockNumber int64
|
BlockNumber int64
|
||||||
TxHash string
|
TxHash string
|
||||||
Address string
|
Address string
|
||||||
@ -24,3 +26,10 @@ type Log struct {
|
|||||||
Index int64
|
Index int64
|
||||||
Data string
|
Data string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type HeaderSyncLog struct {
|
||||||
|
ID int64
|
||||||
|
HeaderID int64 `db:"header_id"`
|
||||||
|
Log types.Log
|
||||||
|
Transformed bool
|
||||||
|
}
|
||||||
|
@ -21,7 +21,7 @@ type Receipt struct {
|
|||||||
ContractAddress string `db:"contract_address"`
|
ContractAddress string `db:"contract_address"`
|
||||||
CumulativeGasUsed uint64 `db:"cumulative_gas_used"`
|
CumulativeGasUsed uint64 `db:"cumulative_gas_used"`
|
||||||
GasUsed uint64 `db:"gas_used"`
|
GasUsed uint64 `db:"gas_used"`
|
||||||
Logs []Log
|
Logs []FullSyncLog
|
||||||
StateRoot string `db:"state_root"`
|
StateRoot string `db:"state_root"`
|
||||||
Status int
|
Status int
|
||||||
TxHash string `db:"tx_hash"`
|
TxHash string `db:"tx_hash"`
|
||||||
|
@ -126,16 +126,16 @@ var _ = Describe("Postgres DB", func() {
|
|||||||
It("does not commit log if log is invalid", func() {
|
It("does not commit log if log is invalid", func() {
|
||||||
//badTxHash violates db tx_hash field length
|
//badTxHash violates db tx_hash field length
|
||||||
badTxHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
badTxHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
||||||
badLog := core.Log{
|
badLog := core.FullSyncLog{
|
||||||
Address: "x123",
|
Address: "x123",
|
||||||
BlockNumber: 1,
|
BlockNumber: 1,
|
||||||
TxHash: badTxHash,
|
TxHash: badTxHash,
|
||||||
}
|
}
|
||||||
node := core.Node{GenesisBlock: "GENESIS", NetworkID: 1, ID: "x123", ClientName: "geth"}
|
node := core.Node{GenesisBlock: "GENESIS", NetworkID: 1, ID: "x123", ClientName: "geth"}
|
||||||
db, _ := postgres.NewDB(test_config.DBConfig, node)
|
db, _ := postgres.NewDB(test_config.DBConfig, node)
|
||||||
logRepository := repositories.LogRepository{DB: db}
|
logRepository := repositories.FullSyncLogRepository{DB: db}
|
||||||
|
|
||||||
err := logRepository.CreateLogs([]core.Log{badLog}, 123)
|
err := logRepository.CreateLogs([]core.FullSyncLog{badLog}, 123)
|
||||||
|
|
||||||
Expect(err).ToNot(BeNil())
|
Expect(err).ToNot(BeNil())
|
||||||
savedBlock, err := logRepository.GetLogs("x123", 1)
|
savedBlock, err := logRepository.GetLogs("x123", 1)
|
||||||
|
@ -19,10 +19,8 @@ package repositories
|
|||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
log "github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
@ -81,7 +79,7 @@ func (blockRepository BlockRepository) MissingBlockNumbers(startingBlockNumber i
|
|||||||
startingBlockNumber,
|
startingBlockNumber,
|
||||||
highestBlockNumber, nodeId)
|
highestBlockNumber, nodeId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("MissingBlockNumbers: error getting blocks: ", err)
|
logrus.Error("MissingBlockNumbers: error getting blocks: ", err)
|
||||||
}
|
}
|
||||||
return numbers
|
return numbers
|
||||||
}
|
}
|
||||||
@ -112,7 +110,7 @@ func (blockRepository BlockRepository) GetBlock(blockNumber int64) (core.Block,
|
|||||||
case sql.ErrNoRows:
|
case sql.ErrNoRows:
|
||||||
return core.Block{}, datastore.ErrBlockDoesNotExist(blockNumber)
|
return core.Block{}, datastore.ErrBlockDoesNotExist(blockNumber)
|
||||||
default:
|
default:
|
||||||
log.Error("GetBlock: error loading blocks: ", err)
|
logrus.Error("GetBlock: error loading blocks: ", err)
|
||||||
return savedBlock, err
|
return savedBlock, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -151,7 +149,7 @@ func (blockRepository BlockRepository) insertBlock(block core.Block) (int64, err
|
|||||||
if insertBlockErr != nil {
|
if insertBlockErr != nil {
|
||||||
rollbackErr := tx.Rollback()
|
rollbackErr := tx.Rollback()
|
||||||
if rollbackErr != nil {
|
if rollbackErr != nil {
|
||||||
log.Error("failed to rollback transaction: ", rollbackErr)
|
logrus.Error("failed to rollback transaction: ", rollbackErr)
|
||||||
}
|
}
|
||||||
return 0, postgres.ErrDBInsertFailed(insertBlockErr)
|
return 0, postgres.ErrDBInsertFailed(insertBlockErr)
|
||||||
}
|
}
|
||||||
@ -167,7 +165,7 @@ func (blockRepository BlockRepository) insertBlock(block core.Block) (int64, err
|
|||||||
if insertTxErr != nil {
|
if insertTxErr != nil {
|
||||||
rollbackErr := tx.Rollback()
|
rollbackErr := tx.Rollback()
|
||||||
if rollbackErr != nil {
|
if rollbackErr != nil {
|
||||||
log.Warn("failed to rollback transaction: ", rollbackErr)
|
logrus.Warn("failed to rollback transaction: ", rollbackErr)
|
||||||
}
|
}
|
||||||
return 0, postgres.ErrDBInsertFailed(insertTxErr)
|
return 0, postgres.ErrDBInsertFailed(insertTxErr)
|
||||||
}
|
}
|
||||||
@ -176,7 +174,7 @@ func (blockRepository BlockRepository) insertBlock(block core.Block) (int64, err
|
|||||||
if commitErr != nil {
|
if commitErr != nil {
|
||||||
rollbackErr := tx.Rollback()
|
rollbackErr := tx.Rollback()
|
||||||
if rollbackErr != nil {
|
if rollbackErr != nil {
|
||||||
log.Warn("failed to rollback transaction: ", rollbackErr)
|
logrus.Warn("failed to rollback transaction: ", rollbackErr)
|
||||||
}
|
}
|
||||||
return 0, commitErr
|
return 0, commitErr
|
||||||
}
|
}
|
||||||
@ -268,10 +266,10 @@ func (blockRepository BlockRepository) getBlockHash(block core.Block) (string, b
|
|||||||
return retrievedBlockHash, blockExists(retrievedBlockHash)
|
return retrievedBlockHash, blockExists(retrievedBlockHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (blockRepository BlockRepository) createLogs(tx *sqlx.Tx, logs []core.Log, receiptId int64) error {
|
func (blockRepository BlockRepository) createLogs(tx *sqlx.Tx, logs []core.FullSyncLog, receiptId int64) error {
|
||||||
for _, tlog := range logs {
|
for _, tlog := range logs {
|
||||||
_, err := tx.Exec(
|
_, err := tx.Exec(
|
||||||
`INSERT INTO logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
|
`INSERT INTO full_sync_logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
|
||||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||||
`,
|
`,
|
||||||
tlog.BlockNumber, tlog.Address, tlog.TxHash, tlog.Index, tlog.Topics[0], tlog.Topics[1], tlog.Topics[2], tlog.Topics[3], tlog.Data, receiptId,
|
tlog.BlockNumber, tlog.Address, tlog.TxHash, tlog.Index, tlog.Topics[0], tlog.Topics[1], tlog.Topics[2], tlog.Topics[3], tlog.Data, receiptId,
|
||||||
@ -305,7 +303,7 @@ func (blockRepository BlockRepository) loadBlock(blockRows *sqlx.Row) (core.Bloc
|
|||||||
var block b
|
var block b
|
||||||
err := blockRows.StructScan(&block)
|
err := blockRows.StructScan(&block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("loadBlock: error loading block: ", err)
|
logrus.Error("loadBlock: error loading block: ", err)
|
||||||
return core.Block{}, err
|
return core.Block{}, err
|
||||||
}
|
}
|
||||||
transactionRows, err := blockRepository.database.Queryx(`
|
transactionRows, err := blockRepository.database.Queryx(`
|
||||||
@ -323,7 +321,7 @@ func (blockRepository BlockRepository) loadBlock(blockRows *sqlx.Row) (core.Bloc
|
|||||||
WHERE block_id = $1
|
WHERE block_id = $1
|
||||||
ORDER BY hash`, block.ID)
|
ORDER BY hash`, block.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("loadBlock: error fetting transactions: ", err)
|
logrus.Error("loadBlock: error fetting transactions: ", err)
|
||||||
return core.Block{}, err
|
return core.Block{}, err
|
||||||
}
|
}
|
||||||
block.Transactions = blockRepository.LoadTransactions(transactionRows)
|
block.Transactions = blockRepository.LoadTransactions(transactionRows)
|
||||||
@ -336,7 +334,7 @@ func (blockRepository BlockRepository) LoadTransactions(transactionRows *sqlx.Ro
|
|||||||
var transaction core.TransactionModel
|
var transaction core.TransactionModel
|
||||||
err := transactionRows.StructScan(&transaction)
|
err := transactionRows.StructScan(&transaction)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
logrus.Fatal(err)
|
||||||
}
|
}
|
||||||
transactions = append(transactions, transaction)
|
transactions = append(transactions, transaction)
|
||||||
}
|
}
|
||||||
|
@ -159,11 +159,12 @@ var _ = Describe("Saving blocks", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("saves one uncle associated to the block", func() {
|
It("saves one uncle associated to the block", func() {
|
||||||
|
fakeUncle := fakes.GetFakeUncle(common.BytesToHash([]byte{1, 2, 3}).String(), "100000")
|
||||||
block := core.Block{
|
block := core.Block{
|
||||||
Hash: fakes.FakeHash.String(),
|
Hash: fakes.FakeHash.String(),
|
||||||
Number: 123,
|
Number: 123,
|
||||||
Transactions: []core.TransactionModel{fakes.FakeTransaction},
|
Transactions: []core.TransactionModel{fakes.FakeTransaction},
|
||||||
Uncles: []core.Uncle{fakes.GetFakeUncle(common.BytesToHash([]byte{1, 2, 3}).String(), "100000")},
|
Uncles: []core.Uncle{fakeUncle},
|
||||||
UnclesReward: "156250000000000000",
|
UnclesReward: "156250000000000000",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,20 +180,20 @@ var _ = Describe("Saving blocks", func() {
|
|||||||
err := db.Get(&uncleModel, `SELECT hash, reward, miner, raw, block_timestamp FROM uncles
|
err := db.Get(&uncleModel, `SELECT hash, reward, miner, raw, block_timestamp FROM uncles
|
||||||
WHERE block_id = $1 AND hash = $2`, id, common.BytesToHash([]byte{1, 2, 3}).Hex())
|
WHERE block_id = $1 AND hash = $2`, id, common.BytesToHash([]byte{1, 2, 3}).Hex())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(uncleModel.Hash).To(Equal(common.BytesToHash([]byte{1, 2, 3}).Hex()))
|
Expect(uncleModel.Hash).To(Equal(fakeUncle.Hash))
|
||||||
Expect(uncleModel.Reward).To(Equal("100000"))
|
Expect(uncleModel.Reward).To(Equal(fakeUncle.Reward))
|
||||||
Expect(uncleModel.Miner).To(Equal(fakes.FakeAddress.Hex()))
|
Expect(uncleModel.Miner).To(Equal(fakeUncle.Miner))
|
||||||
Expect(uncleModel.Timestamp).To(Equal("111111111"))
|
Expect(uncleModel.Timestamp).To(Equal(fakeUncle.Timestamp))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("saves two uncles associated to the block", func() {
|
It("saves two uncles associated to the block", func() {
|
||||||
|
fakeUncleOne := fakes.GetFakeUncle(common.BytesToHash([]byte{1, 2, 3}).String(), "100000")
|
||||||
|
fakeUncleTwo := fakes.GetFakeUncle(common.BytesToHash([]byte{3, 2, 1}).String(), "90000")
|
||||||
block := core.Block{
|
block := core.Block{
|
||||||
Hash: fakes.FakeHash.String(),
|
Hash: fakes.FakeHash.String(),
|
||||||
Number: 123,
|
Number: 123,
|
||||||
Transactions: []core.TransactionModel{fakes.FakeTransaction},
|
Transactions: []core.TransactionModel{fakes.FakeTransaction},
|
||||||
Uncles: []core.Uncle{
|
Uncles: []core.Uncle{fakeUncleOne, fakeUncleTwo},
|
||||||
fakes.GetFakeUncle(common.BytesToHash([]byte{1, 2, 3}).String(), "100000"),
|
|
||||||
fakes.GetFakeUncle(common.BytesToHash([]byte{3, 2, 1}).String(), "90000")},
|
|
||||||
UnclesReward: "312500000000000000",
|
UnclesReward: "312500000000000000",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -210,18 +211,18 @@ var _ = Describe("Saving blocks", func() {
|
|||||||
err := db.Get(&uncleModel, `SELECT hash, reward, miner, raw, block_timestamp FROM uncles
|
err := db.Get(&uncleModel, `SELECT hash, reward, miner, raw, block_timestamp FROM uncles
|
||||||
WHERE block_id = $1 AND hash = $2`, id, common.BytesToHash([]byte{1, 2, 3}).Hex())
|
WHERE block_id = $1 AND hash = $2`, id, common.BytesToHash([]byte{1, 2, 3}).Hex())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(uncleModel.Hash).To(Equal(common.BytesToHash([]byte{1, 2, 3}).Hex()))
|
Expect(uncleModel.Hash).To(Equal(fakeUncleOne.Hash))
|
||||||
Expect(uncleModel.Reward).To(Equal("100000"))
|
Expect(uncleModel.Reward).To(Equal(fakeUncleOne.Reward))
|
||||||
Expect(uncleModel.Miner).To(Equal(fakes.FakeAddress.Hex()))
|
Expect(uncleModel.Miner).To(Equal(fakeUncleOne.Miner))
|
||||||
Expect(uncleModel.Timestamp).To(Equal("111111111"))
|
Expect(uncleModel.Timestamp).To(Equal(fakeUncleOne.Timestamp))
|
||||||
|
|
||||||
err = db.Get(&uncleModel, `SELECT hash, reward, miner, raw, block_timestamp FROM uncles
|
err = db.Get(&uncleModel, `SELECT hash, reward, miner, raw, block_timestamp FROM uncles
|
||||||
WHERE block_id = $1 AND hash = $2`, id, common.BytesToHash([]byte{3, 2, 1}).Hex())
|
WHERE block_id = $1 AND hash = $2`, id, common.BytesToHash([]byte{3, 2, 1}).Hex())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(uncleModel.Hash).To(Equal(common.BytesToHash([]byte{3, 2, 1}).Hex()))
|
Expect(uncleModel.Hash).To(Equal(fakeUncleTwo.Hash))
|
||||||
Expect(uncleModel.Reward).To(Equal("90000"))
|
Expect(uncleModel.Reward).To(Equal(fakeUncleTwo.Reward))
|
||||||
Expect(uncleModel.Miner).To(Equal(fakes.FakeAddress.Hex()))
|
Expect(uncleModel.Miner).To(Equal(fakeUncleTwo.Miner))
|
||||||
Expect(uncleModel.Timestamp).To(Equal("111111111"))
|
Expect(uncleModel.Timestamp).To(Equal(fakeUncleTwo.Timestamp))
|
||||||
})
|
})
|
||||||
|
|
||||||
It(`replaces blocks and transactions associated to the block
|
It(`replaces blocks and transactions associated to the block
|
||||||
|
@ -0,0 +1,72 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package repositories
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
insertCheckedHeaderQuery = `UPDATE public.headers SET check_count = (SELECT check_count WHERE id = $1) + 1 WHERE id = $1`
|
||||||
|
)
|
||||||
|
|
||||||
|
type CheckedHeadersRepository struct {
|
||||||
|
db *postgres.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCheckedHeadersRepository(db *postgres.DB) CheckedHeadersRepository {
|
||||||
|
return CheckedHeadersRepository{db: db}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment check_count for header
|
||||||
|
func (repo CheckedHeadersRepository) MarkHeaderChecked(headerID int64) error {
|
||||||
|
_, err := repo.db.Exec(insertCheckedHeaderQuery, headerID)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Zero out check count for headers with block number >= startingBlockNumber
|
||||||
|
func (repo CheckedHeadersRepository) MarkHeadersUnchecked(startingBlockNumber int64) error {
|
||||||
|
_, err := repo.db.Exec(`UPDATE public.headers SET check_count = 0 WHERE block_number >= $1`, startingBlockNumber)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return header if check_count < passed checkCount
|
||||||
|
func (repo CheckedHeadersRepository) UncheckedHeaders(startingBlockNumber, endingBlockNumber, checkCount int64) ([]core.Header, error) {
|
||||||
|
var result []core.Header
|
||||||
|
var query string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if endingBlockNumber == -1 {
|
||||||
|
query = `SELECT id, block_number, hash
|
||||||
|
FROM headers
|
||||||
|
WHERE check_count < $2
|
||||||
|
AND block_number >= $1
|
||||||
|
AND eth_node_fingerprint = $3`
|
||||||
|
err = repo.db.Select(&result, query, startingBlockNumber, checkCount, repo.db.Node.ID)
|
||||||
|
} else {
|
||||||
|
query = `SELECT id, block_number, hash
|
||||||
|
FROM headers
|
||||||
|
WHERE check_count < $3
|
||||||
|
AND block_number >= $1
|
||||||
|
AND block_number <= $2
|
||||||
|
AND eth_node_fingerprint = $4`
|
||||||
|
err = repo.db.Select(&result, query, startingBlockNumber, endingBlockNumber, checkCount, repo.db.Node.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, err
|
||||||
|
}
|
@ -0,0 +1,271 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package repositories_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||||
|
"github.com/vulcanize/vulcanizedb/test_config"
|
||||||
|
"math/rand"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Checked Headers repository", func() {
|
||||||
|
var (
|
||||||
|
db *postgres.DB
|
||||||
|
repo datastore.CheckedHeadersRepository
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||||
|
test_config.CleanTestDB(db)
|
||||||
|
repo = repositories.NewCheckedHeadersRepository(db)
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
closeErr := db.Close()
|
||||||
|
Expect(closeErr).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("MarkHeaderChecked", func() {
|
||||||
|
It("marks passed header as checked on insert", func() {
|
||||||
|
headerRepository := repositories.NewHeaderRepository(db)
|
||||||
|
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
||||||
|
Expect(headerErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
err := repo.MarkHeaderChecked(headerID)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
var checkedCount int
|
||||||
|
fetchErr := db.Get(&checkedCount, `SELECT check_count FROM public.headers WHERE id = $1`, headerID)
|
||||||
|
Expect(fetchErr).NotTo(HaveOccurred())
|
||||||
|
Expect(checkedCount).To(Equal(1))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("increments check count on update", func() {
|
||||||
|
headerRepository := repositories.NewHeaderRepository(db)
|
||||||
|
headerID, headerErr := headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
||||||
|
Expect(headerErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
insertErr := repo.MarkHeaderChecked(headerID)
|
||||||
|
Expect(insertErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
updateErr := repo.MarkHeaderChecked(headerID)
|
||||||
|
Expect(updateErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
var checkedCount int
|
||||||
|
fetchErr := db.Get(&checkedCount, `SELECT check_count FROM public.headers WHERE id = $1`, headerID)
|
||||||
|
Expect(fetchErr).NotTo(HaveOccurred())
|
||||||
|
Expect(checkedCount).To(Equal(2))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("MarkHeadersUnchecked", func() {
|
||||||
|
It("removes rows for headers <= starting block number", func() {
|
||||||
|
blockNumberOne := rand.Int63()
|
||||||
|
blockNumberTwo := blockNumberOne + 1
|
||||||
|
blockNumberThree := blockNumberOne + 2
|
||||||
|
fakeHeaderOne := fakes.GetFakeHeader(blockNumberOne)
|
||||||
|
fakeHeaderTwo := fakes.GetFakeHeader(blockNumberTwo)
|
||||||
|
fakeHeaderThree := fakes.GetFakeHeader(blockNumberThree)
|
||||||
|
headerRepository := repositories.NewHeaderRepository(db)
|
||||||
|
// insert three headers with incrementing block number
|
||||||
|
headerIdOne, insertHeaderOneErr := headerRepository.CreateOrUpdateHeader(fakeHeaderOne)
|
||||||
|
Expect(insertHeaderOneErr).NotTo(HaveOccurred())
|
||||||
|
headerIdTwo, insertHeaderTwoErr := headerRepository.CreateOrUpdateHeader(fakeHeaderTwo)
|
||||||
|
Expect(insertHeaderTwoErr).NotTo(HaveOccurred())
|
||||||
|
headerIdThree, insertHeaderThreeErr := headerRepository.CreateOrUpdateHeader(fakeHeaderThree)
|
||||||
|
Expect(insertHeaderThreeErr).NotTo(HaveOccurred())
|
||||||
|
// mark all headers checked
|
||||||
|
markHeaderOneCheckedErr := repo.MarkHeaderChecked(headerIdOne)
|
||||||
|
Expect(markHeaderOneCheckedErr).NotTo(HaveOccurred())
|
||||||
|
markHeaderTwoCheckedErr := repo.MarkHeaderChecked(headerIdTwo)
|
||||||
|
Expect(markHeaderTwoCheckedErr).NotTo(HaveOccurred())
|
||||||
|
markHeaderThreeCheckedErr := repo.MarkHeaderChecked(headerIdThree)
|
||||||
|
Expect(markHeaderThreeCheckedErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
// mark headers unchecked since blockNumberTwo
|
||||||
|
err := repo.MarkHeadersUnchecked(blockNumberTwo)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
var headerOneCheckCount, headerTwoCheckCount, headerThreeCheckCount int
|
||||||
|
getHeaderOneErr := db.Get(&headerOneCheckCount, `SELECT check_count FROM public.headers WHERE id = $1`, headerIdOne)
|
||||||
|
Expect(getHeaderOneErr).NotTo(HaveOccurred())
|
||||||
|
Expect(headerOneCheckCount).To(Equal(1))
|
||||||
|
getHeaderTwoErr := db.Get(&headerTwoCheckCount, `SELECT check_count FROM public.headers WHERE id = $1`, headerIdTwo)
|
||||||
|
Expect(getHeaderTwoErr).NotTo(HaveOccurred())
|
||||||
|
Expect(headerTwoCheckCount).To(BeZero())
|
||||||
|
getHeaderThreeErr := db.Get(&headerThreeCheckCount, `SELECT check_count FROM public.headers WHERE id = $1`, headerIdThree)
|
||||||
|
Expect(getHeaderThreeErr).NotTo(HaveOccurred())
|
||||||
|
Expect(headerThreeCheckCount).To(BeZero())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("UncheckedHeaders", func() {
|
||||||
|
var (
|
||||||
|
headerRepository datastore.HeaderRepository
|
||||||
|
startingBlockNumber int64
|
||||||
|
endingBlockNumber int64
|
||||||
|
middleBlockNumber int64
|
||||||
|
outOfRangeBlockNumber int64
|
||||||
|
blockNumbers []int64
|
||||||
|
headerIDs []int64
|
||||||
|
err error
|
||||||
|
uncheckedCheckCount = int64(1)
|
||||||
|
recheckCheckCount = int64(2)
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
headerRepository = repositories.NewHeaderRepository(db)
|
||||||
|
|
||||||
|
startingBlockNumber = rand.Int63()
|
||||||
|
middleBlockNumber = startingBlockNumber + 1
|
||||||
|
endingBlockNumber = startingBlockNumber + 2
|
||||||
|
outOfRangeBlockNumber = endingBlockNumber + 1
|
||||||
|
|
||||||
|
blockNumbers = []int64{startingBlockNumber, middleBlockNumber, endingBlockNumber, outOfRangeBlockNumber}
|
||||||
|
|
||||||
|
headerIDs = []int64{}
|
||||||
|
for _, n := range blockNumbers {
|
||||||
|
headerID, err := headerRepository.CreateOrUpdateHeader(fakes.GetFakeHeader(n))
|
||||||
|
headerIDs = append(headerIDs, headerID)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("when ending block is specified", func() {
|
||||||
|
It("excludes headers that are out of range", func() {
|
||||||
|
headers, err := repo.UncheckedHeaders(startingBlockNumber, endingBlockNumber, uncheckedCheckCount)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
headerBlockNumbers := getBlockNumbers(headers)
|
||||||
|
Expect(headerBlockNumbers).To(ConsistOf(startingBlockNumber, middleBlockNumber, endingBlockNumber))
|
||||||
|
Expect(headerBlockNumbers).NotTo(ContainElement(outOfRangeBlockNumber))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("excludes headers that have been checked more than the check count", func() {
|
||||||
|
_, err = db.Exec(`UPDATE public.headers SET check_count = 1 WHERE id = $1`, headerIDs[1])
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
headers, err := repo.UncheckedHeaders(startingBlockNumber, endingBlockNumber, uncheckedCheckCount)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
headerBlockNumbers := getBlockNumbers(headers)
|
||||||
|
Expect(headerBlockNumbers).To(ConsistOf(startingBlockNumber, endingBlockNumber))
|
||||||
|
Expect(headerBlockNumbers).NotTo(ContainElement(middleBlockNumber))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("does not exclude headers that have been checked less than the check count", func() {
|
||||||
|
_, err = db.Exec(`UPDATE public.headers SET check_count = 1 WHERE id = $1`, headerIDs[1])
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
headers, err := repo.UncheckedHeaders(startingBlockNumber, endingBlockNumber, recheckCheckCount)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
headerBlockNumbers := getBlockNumbers(headers)
|
||||||
|
Expect(headerBlockNumbers).To(ConsistOf(startingBlockNumber, middleBlockNumber, endingBlockNumber))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("only returns headers associated with the current node", func() {
|
||||||
|
dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
|
||||||
|
headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
|
||||||
|
repoTwo := repositories.NewCheckedHeadersRepository(dbTwo)
|
||||||
|
for _, n := range blockNumbers {
|
||||||
|
_, err = headerRepositoryTwo.CreateOrUpdateHeader(fakes.GetFakeHeader(n + 10))
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeOneMissingHeaders, err := repo.UncheckedHeaders(startingBlockNumber, endingBlockNumber, uncheckedCheckCount)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
nodeOneHeaderBlockNumbers := getBlockNumbers(nodeOneMissingHeaders)
|
||||||
|
Expect(nodeOneHeaderBlockNumbers).To(ConsistOf(startingBlockNumber, middleBlockNumber, endingBlockNumber))
|
||||||
|
|
||||||
|
nodeTwoMissingHeaders, err := repoTwo.UncheckedHeaders(startingBlockNumber, endingBlockNumber+10, uncheckedCheckCount)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
nodeTwoHeaderBlockNumbers := getBlockNumbers(nodeTwoMissingHeaders)
|
||||||
|
Expect(nodeTwoHeaderBlockNumbers).To(ConsistOf(startingBlockNumber+10, middleBlockNumber+10, endingBlockNumber+10))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("when ending block is -1", func() {
|
||||||
|
var endingBlock = int64(-1)
|
||||||
|
|
||||||
|
It("includes all non-checked headers when ending block is -1 ", func() {
|
||||||
|
headers, err := repo.UncheckedHeaders(startingBlockNumber, endingBlock, uncheckedCheckCount)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
headerBlockNumbers := getBlockNumbers(headers)
|
||||||
|
Expect(headerBlockNumbers).To(ConsistOf(startingBlockNumber, middleBlockNumber, endingBlockNumber, outOfRangeBlockNumber))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("excludes headers that have been checked more than the check count", func() {
|
||||||
|
_, err = db.Exec(`UPDATE public.headers SET check_count = 1 WHERE id = $1`, headerIDs[1])
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
headers, err := repo.UncheckedHeaders(startingBlockNumber, endingBlock, uncheckedCheckCount)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
headerBlockNumbers := getBlockNumbers(headers)
|
||||||
|
Expect(headerBlockNumbers).To(ConsistOf(startingBlockNumber, endingBlockNumber, outOfRangeBlockNumber))
|
||||||
|
Expect(headerBlockNumbers).NotTo(ContainElement(middleBlockNumber))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("does not exclude headers that have been checked less than the check count", func() {
|
||||||
|
_, err = db.Exec(`UPDATE public.headers SET check_count = 1 WHERE id = $1`, headerIDs[1])
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
headers, err := repo.UncheckedHeaders(startingBlockNumber, endingBlock, recheckCheckCount)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
headerBlockNumbers := getBlockNumbers(headers)
|
||||||
|
Expect(headerBlockNumbers).To(ConsistOf(startingBlockNumber, middleBlockNumber, endingBlockNumber, outOfRangeBlockNumber))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("only returns headers associated with the current node", func() {
|
||||||
|
dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
|
||||||
|
headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
|
||||||
|
repoTwo := repositories.NewCheckedHeadersRepository(dbTwo)
|
||||||
|
for _, n := range blockNumbers {
|
||||||
|
_, err = headerRepositoryTwo.CreateOrUpdateHeader(fakes.GetFakeHeader(n + 10))
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeOneMissingHeaders, err := repo.UncheckedHeaders(startingBlockNumber, endingBlock, uncheckedCheckCount)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
nodeOneBlockNumbers := getBlockNumbers(nodeOneMissingHeaders)
|
||||||
|
Expect(nodeOneBlockNumbers).To(ConsistOf(startingBlockNumber, middleBlockNumber, endingBlockNumber, outOfRangeBlockNumber))
|
||||||
|
|
||||||
|
nodeTwoMissingHeaders, err := repoTwo.UncheckedHeaders(startingBlockNumber, endingBlock, uncheckedCheckCount)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
nodeTwoBlockNumbers := getBlockNumbers(nodeTwoMissingHeaders)
|
||||||
|
Expect(nodeTwoBlockNumbers).To(ConsistOf(startingBlockNumber+10, middleBlockNumber+10, endingBlockNumber+10, outOfRangeBlockNumber+10))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
func getBlockNumbers(headers []core.Header) []int64 {
|
||||||
|
var headerBlockNumbers []int64
|
||||||
|
for _, header := range headers {
|
||||||
|
headerBlockNumbers = append(headerBlockNumbers, header.BlockNumber)
|
||||||
|
}
|
||||||
|
return headerBlockNumbers
|
||||||
|
}
|
@ -0,0 +1,69 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package repositories
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CheckedLogsRepository struct {
|
||||||
|
db *postgres.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCheckedLogsRepository(db *postgres.DB) CheckedLogsRepository {
|
||||||
|
return CheckedLogsRepository{db: db}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return whether a given address + topic0 has been fetched on a previous run of vDB
|
||||||
|
func (repository CheckedLogsRepository) AlreadyWatchingLog(addresses []string, topic0 string) (bool, error) {
|
||||||
|
for _, address := range addresses {
|
||||||
|
var addressExists bool
|
||||||
|
getAddressExistsErr := repository.db.Get(&addressExists, `SELECT EXISTS(SELECT 1 FROM public.watched_logs WHERE contract_address = $1)`, address)
|
||||||
|
if getAddressExistsErr != nil {
|
||||||
|
return false, getAddressExistsErr
|
||||||
|
}
|
||||||
|
if !addressExists {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var topicZeroExists bool
|
||||||
|
getTopicZeroExistsErr := repository.db.Get(&topicZeroExists, `SELECT EXISTS(SELECT 1 FROM public.watched_logs WHERE topic_zero = $1)`, topic0)
|
||||||
|
if getTopicZeroExistsErr != nil {
|
||||||
|
return false, getTopicZeroExistsErr
|
||||||
|
}
|
||||||
|
return topicZeroExists, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Persist that a given address + topic0 has is being fetched on this run of vDB
|
||||||
|
func (repository CheckedLogsRepository) MarkLogWatched(addresses []string, topic0 string) error {
|
||||||
|
tx, txErr := repository.db.Beginx()
|
||||||
|
if txErr != nil {
|
||||||
|
return txErr
|
||||||
|
}
|
||||||
|
for _, address := range addresses {
|
||||||
|
_, insertErr := tx.Exec(`INSERT INTO public.watched_logs (contract_address, topic_zero) VALUES ($1, $2)`, address, topic0)
|
||||||
|
if insertErr != nil {
|
||||||
|
rollbackErr := tx.Rollback()
|
||||||
|
if rollbackErr != nil {
|
||||||
|
logrus.Errorf("error rolling back transaction inserting checked logs: %s", rollbackErr.Error())
|
||||||
|
}
|
||||||
|
return insertErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
@ -0,0 +1,115 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package repositories_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||||
|
"github.com/vulcanize/vulcanizedb/test_config"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Checked logs repository", func() {
|
||||||
|
var (
|
||||||
|
db *postgres.DB
|
||||||
|
fakeAddress = fakes.FakeAddress.Hex()
|
||||||
|
fakeAddresses = []string{fakeAddress}
|
||||||
|
fakeTopicZero = fakes.FakeHash.Hex()
|
||||||
|
repository datastore.CheckedLogsRepository
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||||
|
test_config.CleanTestDB(db)
|
||||||
|
repository = repositories.NewCheckedLogsRepository(db)
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
closeErr := db.Close()
|
||||||
|
Expect(closeErr).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("AlreadyWatchingLog", func() {
|
||||||
|
It("returns true if all addresses and the topic0 are already present in the db", func() {
|
||||||
|
_, insertErr := db.Exec(`INSERT INTO public.watched_logs (contract_address, topic_zero) VALUES ($1, $2)`, fakeAddress, fakeTopicZero)
|
||||||
|
Expect(insertErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
hasBeenChecked, err := repository.AlreadyWatchingLog(fakeAddresses, fakeTopicZero)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(hasBeenChecked).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns true if addresses and topic0 were fetched because of a combination of other transformers", func() {
|
||||||
|
anotherFakeAddress := common.HexToAddress("0x" + fakes.RandomString(40)).Hex()
|
||||||
|
anotherFakeTopicZero := common.HexToHash("0x" + fakes.RandomString(64)).Hex()
|
||||||
|
// insert row with matching address but different topic0
|
||||||
|
_, insertOneErr := db.Exec(`INSERT INTO public.watched_logs (contract_address, topic_zero) VALUES ($1, $2)`, fakeAddress, anotherFakeTopicZero)
|
||||||
|
Expect(insertOneErr).NotTo(HaveOccurred())
|
||||||
|
// insert row with matching topic0 but different address
|
||||||
|
_, insertTwoErr := db.Exec(`INSERT INTO public.watched_logs (contract_address, topic_zero) VALUES ($1, $2)`, anotherFakeAddress, fakeTopicZero)
|
||||||
|
Expect(insertTwoErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
hasBeenChecked, err := repository.AlreadyWatchingLog(fakeAddresses, fakeTopicZero)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(hasBeenChecked).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns false if any address has not been checked", func() {
|
||||||
|
anotherFakeAddress := common.HexToAddress("0x" + fakes.RandomString(40)).Hex()
|
||||||
|
_, insertErr := db.Exec(`INSERT INTO public.watched_logs (contract_address, topic_zero) VALUES ($1, $2)`, fakeAddress, fakeTopicZero)
|
||||||
|
Expect(insertErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
hasBeenChecked, err := repository.AlreadyWatchingLog(append(fakeAddresses, anotherFakeAddress), fakeTopicZero)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(hasBeenChecked).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns false if topic0 has not been checked", func() {
|
||||||
|
anotherFakeTopicZero := common.HexToHash("0x" + fakes.RandomString(64)).Hex()
|
||||||
|
_, insertErr := db.Exec(`INSERT INTO public.watched_logs (contract_address, topic_zero) VALUES ($1, $2)`, fakeAddress, anotherFakeTopicZero)
|
||||||
|
Expect(insertErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
hasBeenChecked, err := repository.AlreadyWatchingLog(fakeAddresses, fakeTopicZero)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(hasBeenChecked).To(BeFalse())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("MarkLogWatched", func() {
|
||||||
|
It("adds a row for all of transformer's addresses + topic0", func() {
|
||||||
|
anotherFakeAddress := common.HexToAddress("0x" + fakes.RandomString(40)).Hex()
|
||||||
|
err := repository.MarkLogWatched(append(fakeAddresses, anotherFakeAddress), fakeTopicZero)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
var comboOneExists, comboTwoExists bool
|
||||||
|
getComboOneErr := db.Get(&comboOneExists, `SELECT EXISTS(SELECT 1 FROM public.watched_logs WHERE contract_address = $1 AND topic_zero = $2)`, fakeAddress, fakeTopicZero)
|
||||||
|
Expect(getComboOneErr).NotTo(HaveOccurred())
|
||||||
|
Expect(comboOneExists).To(BeTrue())
|
||||||
|
getComboTwoErr := db.Get(&comboTwoExists, `SELECT EXISTS(SELECT 1 FROM public.watched_logs WHERE contract_address = $1 AND topic_zero = $2)`, anotherFakeAddress, fakeTopicZero)
|
||||||
|
Expect(getComboTwoErr).NotTo(HaveOccurred())
|
||||||
|
Expect(comboTwoExists).To(BeTrue())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
@ -17,23 +17,21 @@
|
|||||||
package repositories
|
package repositories
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LogRepository struct {
|
type FullSyncLogRepository struct {
|
||||||
*postgres.DB
|
*postgres.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logRepository LogRepository) CreateLogs(lgs []core.Log, receiptId int64) error {
|
func (repository FullSyncLogRepository) CreateLogs(lgs []core.FullSyncLog, receiptId int64) error {
|
||||||
tx, _ := logRepository.DB.Beginx()
|
tx, _ := repository.DB.Beginx()
|
||||||
for _, tlog := range lgs {
|
for _, tlog := range lgs {
|
||||||
_, insertLogErr := tx.Exec(
|
_, insertLogErr := tx.Exec(
|
||||||
`INSERT INTO logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
|
`INSERT INTO full_sync_logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
|
||||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||||
`,
|
`,
|
||||||
tlog.BlockNumber, tlog.Address, tlog.TxHash, tlog.Index, tlog.Topics[0], tlog.Topics[1], tlog.Topics[2], tlog.Topics[3], tlog.Data, receiptId,
|
tlog.BlockNumber, tlog.Address, tlog.TxHash, tlog.Index, tlog.Topics[0], tlog.Topics[1], tlog.Topics[2], tlog.Topics[3], tlog.Data, receiptId,
|
||||||
@ -57,8 +55,8 @@ func (logRepository LogRepository) CreateLogs(lgs []core.Log, receiptId int64) e
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logRepository LogRepository) GetLogs(address string, blockNumber int64) ([]core.Log, error) {
|
func (repository FullSyncLogRepository) GetLogs(address string, blockNumber int64) ([]core.FullSyncLog, error) {
|
||||||
logRows, err := logRepository.DB.Query(
|
logRows, err := repository.DB.Query(
|
||||||
`SELECT block_number,
|
`SELECT block_number,
|
||||||
address,
|
address,
|
||||||
tx_hash,
|
tx_hash,
|
||||||
@ -68,17 +66,17 @@ func (logRepository LogRepository) GetLogs(address string, blockNumber int64) ([
|
|||||||
topic2,
|
topic2,
|
||||||
topic3,
|
topic3,
|
||||||
data
|
data
|
||||||
FROM logs
|
FROM full_sync_logs
|
||||||
WHERE address = $1 AND block_number = $2
|
WHERE address = $1 AND block_number = $2
|
||||||
ORDER BY block_number DESC`, address, blockNumber)
|
ORDER BY block_number DESC`, address, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []core.Log{}, err
|
return []core.FullSyncLog{}, err
|
||||||
}
|
}
|
||||||
return logRepository.loadLogs(logRows)
|
return repository.loadLogs(logRows)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logRepository LogRepository) loadLogs(logsRows *sql.Rows) ([]core.Log, error) {
|
func (repository FullSyncLogRepository) loadLogs(logsRows *sql.Rows) ([]core.FullSyncLog, error) {
|
||||||
var lgs []core.Log
|
var lgs []core.FullSyncLog
|
||||||
for logsRows.Next() {
|
for logsRows.Next() {
|
||||||
var blockNumber int64
|
var blockNumber int64
|
||||||
var address string
|
var address string
|
||||||
@ -89,9 +87,9 @@ func (logRepository LogRepository) loadLogs(logsRows *sql.Rows) ([]core.Log, err
|
|||||||
err := logsRows.Scan(&blockNumber, &address, &txHash, &index, &topics[0], &topics[1], &topics[2], &topics[3], &data)
|
err := logsRows.Scan(&blockNumber, &address, &txHash, &index, &topics[0], &topics[1], &topics[2], &topics[3], &data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Error("loadLogs: Error scanning a row in logRows: ", err)
|
logrus.Error("loadLogs: Error scanning a row in logRows: ", err)
|
||||||
return []core.Log{}, err
|
return []core.FullSyncLog{}, err
|
||||||
}
|
}
|
||||||
lg := core.Log{
|
lg := core.FullSyncLog{
|
||||||
BlockNumber: blockNumber,
|
BlockNumber: blockNumber,
|
||||||
TxHash: txHash,
|
TxHash: txHash,
|
||||||
Address: address,
|
Address: address,
|
@ -29,11 +29,11 @@ import (
|
|||||||
"github.com/vulcanize/vulcanizedb/test_config"
|
"github.com/vulcanize/vulcanizedb/test_config"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Logs Repository", func() {
|
var _ = Describe("Full sync log Repository", func() {
|
||||||
Describe("Saving logs", func() {
|
Describe("Saving logs", func() {
|
||||||
var db *postgres.DB
|
var db *postgres.DB
|
||||||
var blockRepository datastore.BlockRepository
|
var blockRepository datastore.BlockRepository
|
||||||
var logsRepository datastore.LogRepository
|
var logsRepository datastore.FullSyncLogRepository
|
||||||
var receiptRepository datastore.FullSyncReceiptRepository
|
var receiptRepository datastore.FullSyncReceiptRepository
|
||||||
var node core.Node
|
var node core.Node
|
||||||
|
|
||||||
@ -47,7 +47,7 @@ var _ = Describe("Logs Repository", func() {
|
|||||||
db = test_config.NewTestDB(node)
|
db = test_config.NewTestDB(node)
|
||||||
test_config.CleanTestDB(db)
|
test_config.CleanTestDB(db)
|
||||||
blockRepository = repositories.NewBlockRepository(db)
|
blockRepository = repositories.NewBlockRepository(db)
|
||||||
logsRepository = repositories.LogRepository{DB: db}
|
logsRepository = repositories.FullSyncLogRepository{DB: db}
|
||||||
receiptRepository = repositories.FullSyncReceiptRepository{DB: db}
|
receiptRepository = repositories.FullSyncReceiptRepository{DB: db}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -59,7 +59,7 @@ var _ = Describe("Logs Repository", func() {
|
|||||||
receiptId, err := receiptRepository.CreateFullSyncReceiptInTx(blockId, core.Receipt{}, tx)
|
receiptId, err := receiptRepository.CreateFullSyncReceiptInTx(blockId, core.Receipt{}, tx)
|
||||||
tx.Commit()
|
tx.Commit()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
err = logsRepository.CreateLogs([]core.Log{{
|
err = logsRepository.CreateLogs([]core.FullSyncLog{{
|
||||||
BlockNumber: blockNumber,
|
BlockNumber: blockNumber,
|
||||||
Index: 0,
|
Index: 0,
|
||||||
Address: "x123",
|
Address: "x123",
|
||||||
@ -98,7 +98,7 @@ var _ = Describe("Logs Repository", func() {
|
|||||||
tx.Commit()
|
tx.Commit()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
err = logsRepository.CreateLogs([]core.Log{{
|
err = logsRepository.CreateLogs([]core.FullSyncLog{{
|
||||||
BlockNumber: blockNumber,
|
BlockNumber: blockNumber,
|
||||||
Index: 0,
|
Index: 0,
|
||||||
Address: "x123",
|
Address: "x123",
|
||||||
@ -108,7 +108,7 @@ var _ = Describe("Logs Repository", func() {
|
|||||||
}}, receiptId)
|
}}, receiptId)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
err = logsRepository.CreateLogs([]core.Log{{
|
err = logsRepository.CreateLogs([]core.FullSyncLog{{
|
||||||
BlockNumber: blockNumber,
|
BlockNumber: blockNumber,
|
||||||
Index: 1,
|
Index: 1,
|
||||||
Address: "x123",
|
Address: "x123",
|
||||||
@ -118,7 +118,7 @@ var _ = Describe("Logs Repository", func() {
|
|||||||
}}, receiptId)
|
}}, receiptId)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
err = logsRepository.CreateLogs([]core.Log{{
|
err = logsRepository.CreateLogs([]core.FullSyncLog{{
|
||||||
BlockNumber: 2,
|
BlockNumber: 2,
|
||||||
Index: 0,
|
Index: 0,
|
||||||
Address: "x123",
|
Address: "x123",
|
||||||
@ -161,8 +161,7 @@ var _ = Describe("Logs Repository", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("saves the logs attached to a receipt", func() {
|
It("saves the logs attached to a receipt", func() {
|
||||||
|
logs := []core.FullSyncLog{{
|
||||||
logs := []core.Log{{
|
|
||||||
Address: "0x8a4774fe82c63484afef97ca8d89a6ea5e21f973",
|
Address: "0x8a4774fe82c63484afef97ca8d89a6ea5e21f973",
|
||||||
BlockNumber: 4745407,
|
BlockNumber: 4745407,
|
||||||
Data: "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000645a68669900000000000000000000000000000000000000000000003397684ab5869b0000000000000000000000000000000000000000000000000000000000005a36053200000000000000000000000099041f808d598b782d5a3e498681c2452a31da08",
|
Data: "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000645a68669900000000000000000000000000000000000000000000003397684ab5869b0000000000000000000000000000000000000000000000000000000000005a36053200000000000000000000000099041f808d598b782d5a3e498681c2452a31da08",
|
||||||
@ -215,7 +214,7 @@ var _ = Describe("Logs Repository", func() {
|
|||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
expected := logs[1:]
|
expected := logs[1:]
|
||||||
Expect(retrievedLogs).To(Equal(expected))
|
Expect(retrievedLogs).To(ConsistOf(expected))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
@ -20,7 +20,6 @@ import (
|
|||||||
"database/sql"
|
"database/sql"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
@ -53,10 +52,25 @@ func (receiptRepository FullSyncReceiptRepository) CreateReceiptsAndLogs(blockId
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createLogs(logs []core.Log, receiptId int64, tx *sqlx.Tx) error {
|
func createReceipt(receipt core.Receipt, blockId int64, tx *sqlx.Tx) (int64, error) {
|
||||||
|
var receiptId int64
|
||||||
|
err := tx.QueryRow(
|
||||||
|
`INSERT INTO full_sync_receipts
|
||||||
|
(contract_address, tx_hash, cumulative_gas_used, gas_used, state_root, status, block_id)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||||
|
RETURNING id`,
|
||||||
|
receipt.ContractAddress, receipt.TxHash, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.StateRoot, receipt.Status, blockId,
|
||||||
|
).Scan(&receiptId)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error("createReceipt: Error inserting: ", err)
|
||||||
|
}
|
||||||
|
return receiptId, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func createLogs(logs []core.FullSyncLog, receiptId int64, tx *sqlx.Tx) error {
|
||||||
for _, log := range logs {
|
for _, log := range logs {
|
||||||
_, err := tx.Exec(
|
_, err := tx.Exec(
|
||||||
`INSERT INTO logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
|
`INSERT INTO full_sync_logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
|
||||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||||
`,
|
`,
|
||||||
log.BlockNumber, log.Address, log.TxHash, log.Index, log.Topics[0], log.Topics[1], log.Topics[2], log.Topics[3], log.Data, receiptId,
|
log.BlockNumber, log.Address, log.TxHash, log.Index, log.Topics[0], log.Topics[1], log.Topics[2], log.Topics[3], log.Data, receiptId,
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
|
|
||||||
var _ = Describe("Receipt Repository", func() {
|
var _ = Describe("Receipt Repository", func() {
|
||||||
var blockRepository datastore.BlockRepository
|
var blockRepository datastore.BlockRepository
|
||||||
var logRepository datastore.LogRepository
|
var logRepository datastore.FullSyncLogRepository
|
||||||
var receiptRepository datastore.FullSyncReceiptRepository
|
var receiptRepository datastore.FullSyncReceiptRepository
|
||||||
var db *postgres.DB
|
var db *postgres.DB
|
||||||
var node core.Node
|
var node core.Node
|
||||||
@ -43,7 +43,7 @@ var _ = Describe("Receipt Repository", func() {
|
|||||||
db = test_config.NewTestDB(node)
|
db = test_config.NewTestDB(node)
|
||||||
test_config.CleanTestDB(db)
|
test_config.CleanTestDB(db)
|
||||||
blockRepository = repositories.NewBlockRepository(db)
|
blockRepository = repositories.NewBlockRepository(db)
|
||||||
logRepository = repositories.LogRepository{DB: db}
|
logRepository = repositories.FullSyncLogRepository{DB: db}
|
||||||
receiptRepository = repositories.FullSyncReceiptRepository{DB: db}
|
receiptRepository = repositories.FullSyncReceiptRepository{DB: db}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ var _ = Describe("Receipt Repository", func() {
|
|||||||
txHashTwo := "0xTxHashTwo"
|
txHashTwo := "0xTxHashTwo"
|
||||||
addressOne := "0xAddressOne"
|
addressOne := "0xAddressOne"
|
||||||
addressTwo := "0xAddressTwo"
|
addressTwo := "0xAddressTwo"
|
||||||
logsOne := []core.Log{{
|
logsOne := []core.FullSyncLog{{
|
||||||
Address: addressOne,
|
Address: addressOne,
|
||||||
BlockNumber: blockNumber,
|
BlockNumber: blockNumber,
|
||||||
TxHash: txHashOne,
|
TxHash: txHashOne,
|
||||||
@ -65,7 +65,7 @@ var _ = Describe("Receipt Repository", func() {
|
|||||||
BlockNumber: blockNumber,
|
BlockNumber: blockNumber,
|
||||||
TxHash: txHashOne,
|
TxHash: txHashOne,
|
||||||
}}
|
}}
|
||||||
logsTwo := []core.Log{{
|
logsTwo := []core.FullSyncLog{{
|
||||||
BlockNumber: blockNumber,
|
BlockNumber: blockNumber,
|
||||||
TxHash: txHashTwo,
|
TxHash: txHashTwo,
|
||||||
Address: addressTwo,
|
Address: addressTwo,
|
||||||
@ -112,7 +112,7 @@ var _ = Describe("Receipt Repository", func() {
|
|||||||
ContractAddress: "0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae",
|
ContractAddress: "0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae",
|
||||||
CumulativeGasUsed: 7996119,
|
CumulativeGasUsed: 7996119,
|
||||||
GasUsed: 21000,
|
GasUsed: 21000,
|
||||||
Logs: []core.Log{},
|
Logs: []core.FullSyncLog{},
|
||||||
StateRoot: "0x88abf7e73128227370aa7baa3dd4e18d0af70e92ef1f9ef426942fbe2dddb733",
|
StateRoot: "0x88abf7e73128227370aa7baa3dd4e18d0af70e92ef1f9ef426942fbe2dddb733",
|
||||||
Status: 1,
|
Status: 1,
|
||||||
TxHash: "0xe340558980f89d5f86045ac11e5cc34e4bcec20f9f1e2a427aa39d87114e8223",
|
TxHash: "0xe340558980f89d5f86045ac11e5cc34e4bcec20f9f1e2a427aa39d87114e8223",
|
||||||
|
@ -0,0 +1,146 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package repositories
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
|
"github.com/lib/pq"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
|
)
|
||||||
|
|
||||||
|
const insertHeaderSyncLogQuery = `INSERT INTO header_sync_logs
|
||||||
|
(header_id, address, topics, data, block_number, block_hash, tx_index, tx_hash, log_index, raw)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) ON CONFLICT DO NOTHING`
|
||||||
|
|
||||||
|
type HeaderSyncLogRepository struct {
|
||||||
|
db *postgres.DB
|
||||||
|
addressRepository AddressRepository
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHeaderSyncLogRepository(db *postgres.DB) HeaderSyncLogRepository {
|
||||||
|
return HeaderSyncLogRepository{
|
||||||
|
db: db,
|
||||||
|
addressRepository: AddressRepository{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type headerSyncLog struct {
|
||||||
|
ID int64
|
||||||
|
HeaderID int64 `db:"header_id"`
|
||||||
|
Address int64
|
||||||
|
Topics pq.ByteaArray
|
||||||
|
Data []byte
|
||||||
|
BlockNumber uint64 `db:"block_number"`
|
||||||
|
BlockHash string `db:"block_hash"`
|
||||||
|
TxHash string `db:"tx_hash"`
|
||||||
|
TxIndex uint `db:"tx_index"`
|
||||||
|
LogIndex uint `db:"log_index"`
|
||||||
|
Transformed bool
|
||||||
|
Raw []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository HeaderSyncLogRepository) GetUntransformedHeaderSyncLogs() ([]core.HeaderSyncLog, error) {
|
||||||
|
rows, queryErr := repository.db.Queryx(`SELECT * FROM public.header_sync_logs WHERE transformed = false`)
|
||||||
|
if queryErr != nil {
|
||||||
|
return nil, queryErr
|
||||||
|
}
|
||||||
|
|
||||||
|
var results []core.HeaderSyncLog
|
||||||
|
for rows.Next() {
|
||||||
|
var rawLog headerSyncLog
|
||||||
|
scanErr := rows.StructScan(&rawLog)
|
||||||
|
if scanErr != nil {
|
||||||
|
return nil, scanErr
|
||||||
|
}
|
||||||
|
var logTopics []common.Hash
|
||||||
|
for _, topic := range rawLog.Topics {
|
||||||
|
logTopics = append(logTopics, common.BytesToHash(topic))
|
||||||
|
}
|
||||||
|
address, addrErr := repository.addressRepository.GetAddressById(repository.db, rawLog.Address)
|
||||||
|
if addrErr != nil {
|
||||||
|
return nil, addrErr
|
||||||
|
}
|
||||||
|
reconstructedLog := types.Log{
|
||||||
|
Address: common.HexToAddress(address),
|
||||||
|
Topics: logTopics,
|
||||||
|
Data: rawLog.Data,
|
||||||
|
BlockNumber: rawLog.BlockNumber,
|
||||||
|
TxHash: common.HexToHash(rawLog.TxHash),
|
||||||
|
TxIndex: rawLog.TxIndex,
|
||||||
|
BlockHash: common.HexToHash(rawLog.BlockHash),
|
||||||
|
Index: rawLog.LogIndex,
|
||||||
|
// TODO: revisit if not cascade deleting logs when header removed
|
||||||
|
// currently, fetched logs are cascade deleted if removed
|
||||||
|
Removed: false,
|
||||||
|
}
|
||||||
|
result := core.HeaderSyncLog{
|
||||||
|
ID: rawLog.ID,
|
||||||
|
HeaderID: rawLog.HeaderID,
|
||||||
|
Log: reconstructedLog,
|
||||||
|
Transformed: rawLog.Transformed,
|
||||||
|
}
|
||||||
|
// TODO: Consider returning each result async to avoid keeping large result sets in memory
|
||||||
|
results = append(results, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository HeaderSyncLogRepository) CreateHeaderSyncLogs(headerID int64, logs []types.Log) error {
|
||||||
|
tx, txErr := repository.db.Beginx()
|
||||||
|
if txErr != nil {
|
||||||
|
return txErr
|
||||||
|
}
|
||||||
|
for _, log := range logs {
|
||||||
|
err := repository.insertLog(headerID, log, tx)
|
||||||
|
if err != nil {
|
||||||
|
rollbackErr := tx.Rollback()
|
||||||
|
if rollbackErr != nil {
|
||||||
|
logrus.Errorf("failed to rollback header sync log insert: %s", rollbackErr.Error())
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository HeaderSyncLogRepository) insertLog(headerID int64, log types.Log, tx *sqlx.Tx) error {
|
||||||
|
topics := buildTopics(log)
|
||||||
|
raw, jsonErr := log.MarshalJSON()
|
||||||
|
if jsonErr != nil {
|
||||||
|
return jsonErr
|
||||||
|
}
|
||||||
|
addressID, addrErr := repository.addressRepository.GetOrCreateAddressInTransaction(tx, log.Address.Hex())
|
||||||
|
if addrErr != nil {
|
||||||
|
return addrErr
|
||||||
|
}
|
||||||
|
_, insertErr := tx.Exec(insertHeaderSyncLogQuery, headerID, addressID, topics, log.Data, log.BlockNumber,
|
||||||
|
log.BlockHash.Hex(), log.TxIndex, log.TxHash.Hex(), log.Index, raw)
|
||||||
|
return insertErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildTopics(log types.Log) pq.ByteaArray {
|
||||||
|
var topics pq.ByteaArray
|
||||||
|
for _, topic := range log.Topics {
|
||||||
|
topics = append(topics, topic.Bytes())
|
||||||
|
}
|
||||||
|
return topics
|
||||||
|
}
|
@ -0,0 +1,214 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package repositories_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/lib/pq"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||||
|
"github.com/vulcanize/vulcanizedb/test_config"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Header sync log repository", func() {
|
||||||
|
var (
|
||||||
|
db *postgres.DB
|
||||||
|
headerID int64
|
||||||
|
repository datastore.HeaderSyncLogRepository
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
db = test_config.NewTestDB(test_config.NewTestNode())
|
||||||
|
test_config.CleanTestDB(db)
|
||||||
|
headerRepository := repositories.NewHeaderRepository(db)
|
||||||
|
var headerErr error
|
||||||
|
headerID, headerErr = headerRepository.CreateOrUpdateHeader(fakes.FakeHeader)
|
||||||
|
Expect(headerErr).NotTo(HaveOccurred())
|
||||||
|
repository = repositories.NewHeaderSyncLogRepository(db)
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
closeErr := db.Close()
|
||||||
|
Expect(closeErr).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("CreateHeaderSyncLogs", func() {
|
||||||
|
type headerSyncLog struct {
|
||||||
|
ID int64
|
||||||
|
HeaderID int64 `db:"header_id"`
|
||||||
|
Address int64
|
||||||
|
Topics pq.ByteaArray
|
||||||
|
Data []byte
|
||||||
|
BlockNumber uint64 `db:"block_number"`
|
||||||
|
BlockHash string `db:"block_hash"`
|
||||||
|
TxHash string `db:"tx_hash"`
|
||||||
|
TxIndex uint `db:"tx_index"`
|
||||||
|
LogIndex uint `db:"log_index"`
|
||||||
|
Transformed bool
|
||||||
|
Raw []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
It("writes a log to the db", func() {
|
||||||
|
log := test_data.GenericTestLog()
|
||||||
|
|
||||||
|
err := repository.CreateHeaderSyncLogs(headerID, []types.Log{log})
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
var dbLog headerSyncLog
|
||||||
|
lookupErr := db.Get(&dbLog, `SELECT * FROM header_sync_logs`)
|
||||||
|
Expect(lookupErr).NotTo(HaveOccurred())
|
||||||
|
Expect(dbLog.ID).NotTo(BeZero())
|
||||||
|
Expect(dbLog.HeaderID).To(Equal(headerID))
|
||||||
|
addressRepository := repositories.AddressRepository{}
|
||||||
|
actualAddress, addressErr := addressRepository.GetAddressById(db, dbLog.Address)
|
||||||
|
Expect(addressErr).NotTo(HaveOccurred())
|
||||||
|
Expect(actualAddress).To(Equal(log.Address.Hex()))
|
||||||
|
Expect(dbLog.Topics[0]).To(Equal(log.Topics[0].Bytes()))
|
||||||
|
Expect(dbLog.Topics[1]).To(Equal(log.Topics[1].Bytes()))
|
||||||
|
Expect(dbLog.Data).To(Equal(log.Data))
|
||||||
|
Expect(dbLog.BlockNumber).To(Equal(log.BlockNumber))
|
||||||
|
Expect(dbLog.BlockHash).To(Equal(log.BlockHash.Hex()))
|
||||||
|
Expect(dbLog.TxIndex).To(Equal(log.TxIndex))
|
||||||
|
Expect(dbLog.TxHash).To(Equal(log.TxHash.Hex()))
|
||||||
|
Expect(dbLog.LogIndex).To(Equal(log.Index))
|
||||||
|
expectedRaw, jsonErr := log.MarshalJSON()
|
||||||
|
Expect(jsonErr).NotTo(HaveOccurred())
|
||||||
|
Expect(dbLog.Raw).To(MatchJSON(expectedRaw))
|
||||||
|
Expect(dbLog.Transformed).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("writes several logs to the db", func() {
|
||||||
|
log1 := test_data.GenericTestLog()
|
||||||
|
log2 := test_data.GenericTestLog()
|
||||||
|
logs := []types.Log{log1, log2}
|
||||||
|
|
||||||
|
err := repository.CreateHeaderSyncLogs(headerID, logs)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
var count int
|
||||||
|
lookupErr := db.Get(&count, `SELECT COUNT(*) FROM header_sync_logs`)
|
||||||
|
Expect(lookupErr).NotTo(HaveOccurred())
|
||||||
|
Expect(count).To(Equal(len(logs)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("persists record that can be unpacked into types.Log", func() {
|
||||||
|
// important if we want to decouple log persistence from transforming and still make use of
|
||||||
|
// tools on types.Log like abi.Unpack
|
||||||
|
log := test_data.GenericTestLog()
|
||||||
|
|
||||||
|
err := repository.CreateHeaderSyncLogs(headerID, []types.Log{log})
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
var dbLog headerSyncLog
|
||||||
|
lookupErr := db.Get(&dbLog, `SELECT * FROM header_sync_logs`)
|
||||||
|
Expect(lookupErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
var logTopics []common.Hash
|
||||||
|
for _, topic := range dbLog.Topics {
|
||||||
|
logTopics = append(logTopics, common.BytesToHash(topic))
|
||||||
|
}
|
||||||
|
|
||||||
|
addressRepository := repositories.AddressRepository{}
|
||||||
|
actualAddress, addressErr := addressRepository.GetAddressById(db, dbLog.Address)
|
||||||
|
Expect(addressErr).NotTo(HaveOccurred())
|
||||||
|
reconstructedLog := types.Log{
|
||||||
|
Address: common.HexToAddress(actualAddress),
|
||||||
|
Topics: logTopics,
|
||||||
|
Data: dbLog.Data,
|
||||||
|
BlockNumber: dbLog.BlockNumber,
|
||||||
|
TxHash: common.HexToHash(dbLog.TxHash),
|
||||||
|
TxIndex: dbLog.TxIndex,
|
||||||
|
BlockHash: common.HexToHash(dbLog.BlockHash),
|
||||||
|
Index: dbLog.LogIndex,
|
||||||
|
Removed: false,
|
||||||
|
}
|
||||||
|
Expect(reconstructedLog).To(Equal(log))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("does not duplicate logs", func() {
|
||||||
|
log := test_data.GenericTestLog()
|
||||||
|
|
||||||
|
err := repository.CreateHeaderSyncLogs(headerID, []types.Log{log, log})
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
var count int
|
||||||
|
lookupErr := db.Get(&count, `SELECT COUNT(*) FROM header_sync_logs`)
|
||||||
|
Expect(lookupErr).NotTo(HaveOccurred())
|
||||||
|
Expect(count).To(Equal(1))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("GetUntransformedHeaderSyncLogs", func() {
|
||||||
|
Describe("when there are no logs", func() {
|
||||||
|
It("returns empty collection", func() {
|
||||||
|
result, err := repository.GetUntransformedHeaderSyncLogs()
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(len(result)).To(BeZero())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("when there are logs", func() {
|
||||||
|
var log1, log2 types.Log
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
log1 = test_data.GenericTestLog()
|
||||||
|
log2 = test_data.GenericTestLog()
|
||||||
|
logs := []types.Log{log1, log2}
|
||||||
|
logsErr := repository.CreateHeaderSyncLogs(headerID, logs)
|
||||||
|
Expect(logsErr).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns persisted logs", func() {
|
||||||
|
result, err := repository.GetUntransformedHeaderSyncLogs()
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(len(result)).To(Equal(2))
|
||||||
|
Expect(result[0].Log).To(Or(Equal(log1), Equal(log2)))
|
||||||
|
Expect(result[1].Log).To(Or(Equal(log1), Equal(log2)))
|
||||||
|
Expect(result[0].Log).NotTo(Equal(result[1].Log))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("excludes logs that have been transformed", func() {
|
||||||
|
_, insertErr := db.Exec(`UPDATE public.header_sync_logs SET transformed = true WHERE tx_hash = $1`, log1.TxHash.Hex())
|
||||||
|
Expect(insertErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
result, err := repository.GetUntransformedHeaderSyncLogs()
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(len(result)).To(Equal(1))
|
||||||
|
Expect(result[0].Log).To(Equal(log2))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns empty collection if all logs transformed", func() {
|
||||||
|
_, insertErr := db.Exec(`UPDATE public.header_sync_logs SET transformed = true WHERE header_id = $1`, headerID)
|
||||||
|
Expect(insertErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
result, err := repository.GetUntransformedHeaderSyncLogs()
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(len(result)).To(BeZero())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
@ -32,7 +32,7 @@ var _ = Describe("Watched Events Repository", func() {
|
|||||||
var db *postgres.DB
|
var db *postgres.DB
|
||||||
var blocksRepository datastore.BlockRepository
|
var blocksRepository datastore.BlockRepository
|
||||||
var filterRepository datastore.FilterRepository
|
var filterRepository datastore.FilterRepository
|
||||||
var logRepository datastore.LogRepository
|
var logRepository datastore.FullSyncLogRepository
|
||||||
var receiptRepository datastore.FullSyncReceiptRepository
|
var receiptRepository datastore.FullSyncReceiptRepository
|
||||||
var watchedEventRepository datastore.WatchedEventRepository
|
var watchedEventRepository datastore.WatchedEventRepository
|
||||||
|
|
||||||
@ -41,7 +41,7 @@ var _ = Describe("Watched Events Repository", func() {
|
|||||||
test_config.CleanTestDB(db)
|
test_config.CleanTestDB(db)
|
||||||
blocksRepository = repositories.NewBlockRepository(db)
|
blocksRepository = repositories.NewBlockRepository(db)
|
||||||
filterRepository = repositories.FilterRepository{DB: db}
|
filterRepository = repositories.FilterRepository{DB: db}
|
||||||
logRepository = repositories.LogRepository{DB: db}
|
logRepository = repositories.FullSyncLogRepository{DB: db}
|
||||||
receiptRepository = repositories.FullSyncReceiptRepository{DB: db}
|
receiptRepository = repositories.FullSyncReceiptRepository{DB: db}
|
||||||
watchedEventRepository = repositories.WatchedEventRepository{DB: db}
|
watchedEventRepository = repositories.WatchedEventRepository{DB: db}
|
||||||
})
|
})
|
||||||
@ -54,7 +54,7 @@ var _ = Describe("Watched Events Repository", func() {
|
|||||||
Address: "0x123",
|
Address: "0x123",
|
||||||
Topics: core.Topics{0: "event1=10", 2: "event3=hello"},
|
Topics: core.Topics{0: "event1=10", 2: "event3=hello"},
|
||||||
}
|
}
|
||||||
logs := []core.Log{
|
logs := []core.FullSyncLog{
|
||||||
{
|
{
|
||||||
BlockNumber: 0,
|
BlockNumber: 0,
|
||||||
TxHash: "0x1",
|
TxHash: "0x1",
|
||||||
@ -108,7 +108,7 @@ var _ = Describe("Watched Events Repository", func() {
|
|||||||
Address: "0x123",
|
Address: "0x123",
|
||||||
Topics: core.Topics{0: "event1=10", 2: "event3=hello"},
|
Topics: core.Topics{0: "event1=10", 2: "event3=hello"},
|
||||||
}
|
}
|
||||||
logs := []core.Log{
|
logs := []core.FullSyncLog{
|
||||||
{
|
{
|
||||||
BlockNumber: 0,
|
BlockNumber: 0,
|
||||||
TxHash: "0x1",
|
TxHash: "0x1",
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package datastore
|
package datastore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/filters"
|
"github.com/vulcanize/vulcanizedb/pkg/filters"
|
||||||
@ -33,6 +34,17 @@ type BlockRepository interface {
|
|||||||
SetBlocksStatus(chainHead int64) error
|
SetBlocksStatus(chainHead int64) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CheckedHeadersRepository interface {
|
||||||
|
MarkHeaderChecked(headerID int64) error
|
||||||
|
MarkHeadersUnchecked(startingBlockNumber int64) error
|
||||||
|
UncheckedHeaders(startingBlockNumber, endingBlockNumber, checkCount int64) ([]core.Header, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type CheckedLogsRepository interface {
|
||||||
|
AlreadyWatchingLog(addresses []string, topic0 string) (bool, error)
|
||||||
|
MarkLogWatched(addresses []string, topic0 string) error
|
||||||
|
}
|
||||||
|
|
||||||
type ContractRepository interface {
|
type ContractRepository interface {
|
||||||
CreateContract(contract core.Contract) error
|
CreateContract(contract core.Contract) error
|
||||||
GetContract(contractHash string) (core.Contract, error)
|
GetContract(contractHash string) (core.Contract, error)
|
||||||
@ -44,6 +56,11 @@ type FilterRepository interface {
|
|||||||
GetFilter(name string) (filters.LogFilter, error)
|
GetFilter(name string) (filters.LogFilter, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type FullSyncLogRepository interface {
|
||||||
|
CreateLogs(logs []core.FullSyncLog, receiptId int64) error
|
||||||
|
GetLogs(address string, blockNumber int64) ([]core.FullSyncLog, error)
|
||||||
|
}
|
||||||
|
|
||||||
type HeaderRepository interface {
|
type HeaderRepository interface {
|
||||||
CreateOrUpdateHeader(header core.Header) (int64, error)
|
CreateOrUpdateHeader(header core.Header) (int64, error)
|
||||||
CreateTransactions(headerID int64, transactions []core.TransactionModel) error
|
CreateTransactions(headerID int64, transactions []core.TransactionModel) error
|
||||||
@ -51,9 +68,9 @@ type HeaderRepository interface {
|
|||||||
MissingBlockNumbers(startingBlockNumber, endingBlockNumber int64, nodeID string) ([]int64, error)
|
MissingBlockNumbers(startingBlockNumber, endingBlockNumber int64, nodeID string) ([]int64, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type LogRepository interface {
|
type HeaderSyncLogRepository interface {
|
||||||
CreateLogs(logs []core.Log, receiptId int64) error
|
GetUntransformedHeaderSyncLogs() ([]core.HeaderSyncLog, error)
|
||||||
GetLogs(address string, blockNumber int64) ([]core.Log, error)
|
CreateHeaderSyncLogs(headerID int64, logs []types.Log) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type FullSyncReceiptRepository interface {
|
type FullSyncReceiptRepository interface {
|
||||||
|
39
pkg/fakes/checked_logs_repository.go
Normal file
39
pkg/fakes/checked_logs_repository.go
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package fakes
|
||||||
|
|
||||||
|
type MockCheckedLogsRepository struct {
|
||||||
|
AlreadyWatchingLogAddresses []string
|
||||||
|
AlreadyWatchingLogError error
|
||||||
|
AlreadyWatchingLogReturn bool
|
||||||
|
AlreadyWatchingLogTopicZero string
|
||||||
|
MarkLogWatchedAddresses []string
|
||||||
|
MarkLogWatchedError error
|
||||||
|
MarkLogWatchedTopicZero string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository *MockCheckedLogsRepository) AlreadyWatchingLog(addresses []string, topic0 string) (bool, error) {
|
||||||
|
repository.AlreadyWatchingLogAddresses = addresses
|
||||||
|
repository.AlreadyWatchingLogTopicZero = topic0
|
||||||
|
return repository.AlreadyWatchingLogReturn, repository.AlreadyWatchingLogError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository *MockCheckedLogsRepository) MarkLogWatched(addresses []string, topic0 string) error {
|
||||||
|
repository.MarkLogWatchedAddresses = addresses
|
||||||
|
repository.MarkLogWatchedTopicZero = topic0
|
||||||
|
return repository.MarkLogWatchedError
|
||||||
|
}
|
@ -20,19 +20,19 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
|
"math/rand"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
FakeAddress = common.HexToAddress("0x1234567890abcdef")
|
FakeAddress = common.HexToAddress("0x" + RandomString(40))
|
||||||
FakeError = errors.New("failed")
|
FakeError = errors.New("failed")
|
||||||
FakeHash = common.BytesToHash([]byte{1, 2, 3, 4, 5})
|
FakeHash = common.BytesToHash([]byte{1, 2, 3, 4, 5})
|
||||||
fakeTimestamp = int64(111111111)
|
fakeTimestamp = rand.Int63n(1500000000)
|
||||||
)
|
)
|
||||||
|
|
||||||
var rawFakeHeader, _ = json.Marshal(types.Header{})
|
var rawFakeHeader, _ = json.Marshal(types.Header{})
|
||||||
@ -103,3 +103,15 @@ func GetFakeUncle(hash, reward string) core.Uncle {
|
|||||||
Timestamp: strconv.FormatInt(fakeTimestamp, 10),
|
Timestamp: strconv.FormatInt(fakeTimestamp, 10),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RandomString(length int) string {
|
||||||
|
var seededRand = rand.New(
|
||||||
|
rand.NewSource(time.Now().UnixNano()))
|
||||||
|
charset := "abcdef1234567890"
|
||||||
|
b := make([]byte, length)
|
||||||
|
for i := range b {
|
||||||
|
b[i] = charset[seededRand.Intn(len(charset))]
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
@ -107,8 +107,8 @@ func (chain *MockBlockChain) GetHeadersByNumbers(blockNumbers []int64) ([]core.H
|
|||||||
return headers, nil
|
return headers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (chain *MockBlockChain) GetLogs(contract core.Contract, startingBlockNumber, endingBlockNumber *big.Int) ([]core.Log, error) {
|
func (chain *MockBlockChain) GetFullSyncLogs(contract core.Contract, startingBlockNumber, endingBlockNumber *big.Int) ([]core.FullSyncLog, error) {
|
||||||
return []core.Log{}, nil
|
return []core.FullSyncLog{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (chain *MockBlockChain) GetTransactions(transactionHashes []common.Hash) ([]core.TransactionModel, error) {
|
func (chain *MockBlockChain) GetTransactions(transactionHashes []common.Hash) ([]core.TransactionModel, error) {
|
||||||
|
52
pkg/fakes/mock_checked_headers_repository.go
Normal file
52
pkg/fakes/mock_checked_headers_repository.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package fakes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MockCheckedHeadersRepository struct {
|
||||||
|
MarkHeaderCheckedHeaderID int64
|
||||||
|
MarkHeaderCheckedReturnError error
|
||||||
|
MarkHeadersUncheckedCalled bool
|
||||||
|
MarkHeadersUncheckedReturnError error
|
||||||
|
MarkHeadersUncheckedStartingBlockNumber int64
|
||||||
|
UncheckedHeadersCheckCount int64
|
||||||
|
UncheckedHeadersEndingBlockNumber int64
|
||||||
|
UncheckedHeadersReturnError error
|
||||||
|
UncheckedHeadersReturnHeaders []core.Header
|
||||||
|
UncheckedHeadersStartingBlockNumber int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository *MockCheckedHeadersRepository) MarkHeadersUnchecked(startingBlockNumber int64) error {
|
||||||
|
repository.MarkHeadersUncheckedCalled = true
|
||||||
|
repository.MarkHeadersUncheckedStartingBlockNumber = startingBlockNumber
|
||||||
|
return repository.MarkHeadersUncheckedReturnError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository *MockCheckedHeadersRepository) MarkHeaderChecked(headerID int64) error {
|
||||||
|
repository.MarkHeaderCheckedHeaderID = headerID
|
||||||
|
return repository.MarkHeaderCheckedReturnError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository *MockCheckedHeadersRepository) UncheckedHeaders(startingBlockNumber, endingBlockNumber, checkCount int64) ([]core.Header, error) {
|
||||||
|
repository.UncheckedHeadersStartingBlockNumber = startingBlockNumber
|
||||||
|
repository.UncheckedHeadersEndingBlockNumber = endingBlockNumber
|
||||||
|
repository.UncheckedHeadersCheckCount = checkCount
|
||||||
|
return repository.UncheckedHeadersReturnHeaders, repository.UncheckedHeadersReturnError
|
||||||
|
}
|
42
pkg/fakes/mock_header_sync_log_repository.go
Normal file
42
pkg/fakes/mock_header_sync_log_repository.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package fakes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MockHeaderSyncLogRepository struct {
|
||||||
|
CreateError error
|
||||||
|
GetCalled bool
|
||||||
|
GetError error
|
||||||
|
PassedHeaderID int64
|
||||||
|
PassedLogs []types.Log
|
||||||
|
ReturnLogs []core.HeaderSyncLog
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository *MockHeaderSyncLogRepository) GetUntransformedHeaderSyncLogs() ([]core.HeaderSyncLog, error) {
|
||||||
|
repository.GetCalled = true
|
||||||
|
return repository.ReturnLogs, repository.GetError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (repository *MockHeaderSyncLogRepository) CreateHeaderSyncLogs(headerID int64, logs []types.Log) error {
|
||||||
|
repository.PassedHeaderID = headerID
|
||||||
|
repository.PassedLogs = logs
|
||||||
|
return repository.CreateError
|
||||||
|
}
|
@ -86,7 +86,7 @@ func (blockChain *BlockChain) GetHeadersByNumbers(blockNumbers []int64) (header
|
|||||||
return blockChain.getPOWHeaders(blockNumbers)
|
return blockChain.getPOWHeaders(blockNumbers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (blockChain *BlockChain) GetLogs(contract core.Contract, startingBlockNumber, endingBlockNumber *big.Int) ([]core.Log, error) {
|
func (blockChain *BlockChain) GetFullSyncLogs(contract core.Contract, startingBlockNumber, endingBlockNumber *big.Int) ([]core.FullSyncLog, error) {
|
||||||
if endingBlockNumber == nil {
|
if endingBlockNumber == nil {
|
||||||
endingBlockNumber = startingBlockNumber
|
endingBlockNumber = startingBlockNumber
|
||||||
}
|
}
|
||||||
@ -99,9 +99,9 @@ func (blockChain *BlockChain) GetLogs(contract core.Contract, startingBlockNumbe
|
|||||||
}
|
}
|
||||||
gethLogs, err := blockChain.GetEthLogsWithCustomQuery(fc)
|
gethLogs, err := blockChain.GetEthLogsWithCustomQuery(fc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []core.Log{}, err
|
return []core.FullSyncLog{}, err
|
||||||
}
|
}
|
||||||
logs := vulcCommon.ToCoreLogs(gethLogs)
|
logs := vulcCommon.ToFullSyncLogs(gethLogs)
|
||||||
return logs, nil
|
return logs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,7 +154,7 @@ var _ = Describe("Geth blockchain", func() {
|
|||||||
startingBlockNumber := big.NewInt(1)
|
startingBlockNumber := big.NewInt(1)
|
||||||
endingBlockNumber := big.NewInt(2)
|
endingBlockNumber := big.NewInt(2)
|
||||||
|
|
||||||
_, err := blockChain.GetLogs(contract, startingBlockNumber, endingBlockNumber)
|
_, err := blockChain.GetFullSyncLogs(contract, startingBlockNumber, endingBlockNumber)
|
||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
expectedQuery := ethereum.FilterQuery{
|
expectedQuery := ethereum.FilterQuery{
|
||||||
@ -171,7 +171,7 @@ var _ = Describe("Geth blockchain", func() {
|
|||||||
startingBlockNumber := big.NewInt(1)
|
startingBlockNumber := big.NewInt(1)
|
||||||
endingBlockNumber := big.NewInt(2)
|
endingBlockNumber := big.NewInt(2)
|
||||||
|
|
||||||
_, err := blockChain.GetLogs(contract, startingBlockNumber, endingBlockNumber)
|
_, err := blockChain.GetFullSyncLogs(contract, startingBlockNumber, endingBlockNumber)
|
||||||
|
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
Expect(err).To(MatchError(fakes.FakeError))
|
Expect(err).To(MatchError(fakes.FakeError))
|
||||||
|
@ -26,8 +26,8 @@ import (
|
|||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ToCoreLogs(gethLogs []types.Log) []core.Log {
|
func ToFullSyncLogs(gethLogs []types.Log) []core.FullSyncLog {
|
||||||
var logs []core.Log
|
var logs []core.FullSyncLog
|
||||||
for _, log := range gethLogs {
|
for _, log := range gethLogs {
|
||||||
log := ToCoreLog(log)
|
log := ToCoreLog(log)
|
||||||
logs = append(logs, log)
|
logs = append(logs, log)
|
||||||
@ -43,10 +43,10 @@ func makeTopics(topics []common.Hash) core.Topics {
|
|||||||
return hexTopics
|
return hexTopics
|
||||||
}
|
}
|
||||||
|
|
||||||
func ToCoreLog(gethLog types.Log) core.Log {
|
func ToCoreLog(gethLog types.Log) core.FullSyncLog {
|
||||||
topics := gethLog.Topics
|
topics := gethLog.Topics
|
||||||
hexTopics := makeTopics(topics)
|
hexTopics := makeTopics(topics)
|
||||||
return core.Log{
|
return core.FullSyncLog{
|
||||||
Address: strings.ToLower(gethLog.Address.Hex()),
|
Address: strings.ToLower(gethLog.Address.Hex()),
|
||||||
BlockNumber: int64(gethLog.BlockNumber),
|
BlockNumber: int64(gethLog.BlockNumber),
|
||||||
Topics: hexTopics,
|
Topics: hexTopics,
|
@ -29,7 +29,7 @@ import (
|
|||||||
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
|
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Conversion of GethLog to core.Log", func() {
|
var _ = Describe("Conversion of GethLog to core.FullSyncLog", func() {
|
||||||
|
|
||||||
It("converts geth log to internal log format", func() {
|
It("converts geth log to internal log format", func() {
|
||||||
gethLog := types.Log{
|
gethLog := types.Log{
|
||||||
@ -46,7 +46,7 @@ var _ = Describe("Conversion of GethLog to core.Log", func() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
expected := core.Log{
|
expected := core.FullSyncLog{
|
||||||
Address: strings.ToLower(gethLog.Address.Hex()),
|
Address: strings.ToLower(gethLog.Address.Hex()),
|
||||||
BlockNumber: int64(gethLog.BlockNumber),
|
BlockNumber: int64(gethLog.BlockNumber),
|
||||||
Data: hexutil.Encode(gethLog.Data),
|
Data: hexutil.Encode(gethLog.Data),
|
||||||
@ -101,7 +101,7 @@ var _ = Describe("Conversion of GethLog to core.Log", func() {
|
|||||||
expectedOne := vulcCommon.ToCoreLog(gethLogOne)
|
expectedOne := vulcCommon.ToCoreLog(gethLogOne)
|
||||||
expectedTwo := vulcCommon.ToCoreLog(gethLogTwo)
|
expectedTwo := vulcCommon.ToCoreLog(gethLogTwo)
|
||||||
|
|
||||||
coreLogs := vulcCommon.ToCoreLogs([]types.Log{gethLogOne, gethLogTwo})
|
coreLogs := vulcCommon.ToFullSyncLogs([]types.Log{gethLogOne, gethLogTwo})
|
||||||
|
|
||||||
Expect(len(coreLogs)).To(Equal(2))
|
Expect(len(coreLogs)).To(Equal(2))
|
||||||
Expect(coreLogs[0]).To(Equal(expectedOne))
|
Expect(coreLogs[0]).To(Equal(expectedOne))
|
@ -73,8 +73,8 @@ func setContractAddress(gethReceipt *types.Receipt) string {
|
|||||||
return gethReceipt.ContractAddress.Hex()
|
return gethReceipt.ContractAddress.Hex()
|
||||||
}
|
}
|
||||||
|
|
||||||
func dereferenceLogs(gethReceipt *types.Receipt) []core.Log {
|
func dereferenceLogs(gethReceipt *types.Receipt) []core.FullSyncLog {
|
||||||
logs := []core.Log{}
|
logs := []core.FullSyncLog{}
|
||||||
for _, log := range gethReceipt.Logs {
|
for _, log := range gethReceipt.Logs {
|
||||||
logs = append(logs, ToCoreLog(*log))
|
logs = append(logs, ToCoreLog(*log))
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
|
|||||||
ContractAddress: "",
|
ContractAddress: "",
|
||||||
CumulativeGasUsed: 25000,
|
CumulativeGasUsed: 25000,
|
||||||
GasUsed: 21000,
|
GasUsed: 21000,
|
||||||
Logs: []core.Log{},
|
Logs: []core.FullSyncLog{},
|
||||||
StateRoot: "0x88abf7e73128227370aa7baa3dd4e18d0af70e92ef1f9ef426942fbe2dddb733",
|
StateRoot: "0x88abf7e73128227370aa7baa3dd4e18d0af70e92ef1f9ef426942fbe2dddb733",
|
||||||
Status: -99,
|
Status: -99,
|
||||||
TxHash: receipt.TxHash.Hex(),
|
TxHash: receipt.TxHash.Hex(),
|
||||||
@ -92,7 +92,7 @@ var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
|
|||||||
ContractAddress: receipt.ContractAddress.Hex(),
|
ContractAddress: receipt.ContractAddress.Hex(),
|
||||||
CumulativeGasUsed: 7996119,
|
CumulativeGasUsed: 7996119,
|
||||||
GasUsed: 21000,
|
GasUsed: 21000,
|
||||||
Logs: []core.Log{},
|
Logs: []core.FullSyncLog{},
|
||||||
StateRoot: "",
|
StateRoot: "",
|
||||||
Status: 1,
|
Status: 1,
|
||||||
TxHash: receipt.TxHash.Hex(),
|
TxHash: receipt.TxHash.Hex(),
|
||||||
|
@ -19,16 +19,14 @@ package test_config
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
|
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
log "github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||||
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
var TestConfig *viper.Viper
|
var TestConfig *viper.Viper
|
||||||
@ -50,7 +48,7 @@ func setTestConfig() {
|
|||||||
TestConfig.AddConfigPath("$GOPATH/src/github.com/vulcanize/vulcanizedb/environments/")
|
TestConfig.AddConfigPath("$GOPATH/src/github.com/vulcanize/vulcanizedb/environments/")
|
||||||
err := TestConfig.ReadInConfig()
|
err := TestConfig.ReadInConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
logrus.Fatal(err)
|
||||||
}
|
}
|
||||||
ipc := TestConfig.GetString("client.ipcPath")
|
ipc := TestConfig.GetString("client.ipcPath")
|
||||||
hn := TestConfig.GetString("database.hostname")
|
hn := TestConfig.GetString("database.hostname")
|
||||||
@ -73,7 +71,7 @@ func setInfuraConfig() {
|
|||||||
Infura.AddConfigPath("$GOPATH/src/github.com/vulcanize/vulcanizedb/environments/")
|
Infura.AddConfigPath("$GOPATH/src/github.com/vulcanize/vulcanizedb/environments/")
|
||||||
err := Infura.ReadInConfig()
|
err := Infura.ReadInConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
logrus.Fatal(err)
|
||||||
}
|
}
|
||||||
ipc := Infura.GetString("client.ipcpath")
|
ipc := Infura.GetString("client.ipcpath")
|
||||||
|
|
||||||
@ -83,7 +81,7 @@ func setInfuraConfig() {
|
|||||||
ipc = Infura.GetString("url")
|
ipc = Infura.GetString("url")
|
||||||
}
|
}
|
||||||
if ipc == "" {
|
if ipc == "" {
|
||||||
log.Fatal(errors.New("infura.toml IPC path or $INFURA_URL env variable need to be set"))
|
logrus.Fatal(errors.New("infura.toml IPC path or $INFURA_URL env variable need to be set"))
|
||||||
}
|
}
|
||||||
|
|
||||||
InfuraClient = config.Client{
|
InfuraClient = config.Client{
|
||||||
@ -109,16 +107,18 @@ func CleanTestDB(db *postgres.DB) {
|
|||||||
db.MustExec("DELETE FROM blocks")
|
db.MustExec("DELETE FROM blocks")
|
||||||
db.MustExec("DELETE FROM checked_headers")
|
db.MustExec("DELETE FROM checked_headers")
|
||||||
// can't delete from eth_nodes since this function is called after the required eth_node is persisted
|
// can't delete from eth_nodes since this function is called after the required eth_node is persisted
|
||||||
|
db.MustExec("DELETE FROM full_sync_logs")
|
||||||
|
db.MustExec("DELETE FROM full_sync_receipts")
|
||||||
db.MustExec("DELETE FROM full_sync_transactions")
|
db.MustExec("DELETE FROM full_sync_transactions")
|
||||||
db.MustExec("DELETE FROM goose_db_version")
|
db.MustExec("DELETE FROM goose_db_version")
|
||||||
db.MustExec("DELETE FROM headers")
|
db.MustExec("DELETE FROM header_sync_logs")
|
||||||
db.MustExec("DELETE FROM header_sync_transactions")
|
|
||||||
db.MustExec("DELETE FROM log_filters")
|
|
||||||
db.MustExec("DELETE FROM logs")
|
|
||||||
db.MustExec("DELETE FROM queued_storage")
|
|
||||||
db.MustExec("DELETE FROM full_sync_receipts")
|
|
||||||
db.MustExec("DELETE FROM header_sync_receipts")
|
db.MustExec("DELETE FROM header_sync_receipts")
|
||||||
|
db.MustExec("DELETE FROM header_sync_transactions")
|
||||||
|
db.MustExec("DELETE FROM headers")
|
||||||
|
db.MustExec("DELETE FROM log_filters")
|
||||||
|
db.MustExec("DELETE FROM queued_storage")
|
||||||
db.MustExec("DELETE FROM watched_contracts")
|
db.MustExec("DELETE FROM watched_contracts")
|
||||||
|
db.MustExec("DELETE FROM watched_logs")
|
||||||
}
|
}
|
||||||
|
|
||||||
func CleanCheckedHeadersTable(db *postgres.DB, columnNames []string) {
|
func CleanCheckedHeadersTable(db *postgres.DB, columnNames []string) {
|
||||||
|
Loading…
Reference in New Issue
Block a user