forked from cerc-io/ipld-eth-server
rename blocks table to eth_blocks so that we don't clash with the ipfs blocks table; dockerfile and startup_script for the seed node
This commit is contained in:
parent
e7cdd6247e
commit
4c81ca4d54
@ -1,5 +1,5 @@
|
|||||||
-- +goose Up
|
-- +goose Up
|
||||||
CREATE TABLE public.blocks (
|
CREATE TABLE public.eth_blocks (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
difficulty BIGINT,
|
difficulty BIGINT,
|
||||||
extra_data VARCHAR,
|
extra_data VARCHAR,
|
||||||
@ -20,4 +20,4 @@ CREATE TABLE public.blocks (
|
|||||||
|
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
DROP TABLE public.blocks;
|
DROP TABLE public.eth_blocks;
|
@ -1,7 +1,7 @@
|
|||||||
-- +goose Up
|
-- +goose Up
|
||||||
CREATE TABLE full_sync_transactions (
|
CREATE TABLE full_sync_transactions (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
block_id INTEGER NOT NULL REFERENCES blocks(id) ON DELETE CASCADE,
|
block_id INTEGER NOT NULL REFERENCES eth_blocks(id) ON DELETE CASCADE,
|
||||||
gas_limit NUMERIC,
|
gas_limit NUMERIC,
|
||||||
gas_price NUMERIC,
|
gas_price NUMERIC,
|
||||||
hash VARCHAR(66),
|
hash VARCHAR(66),
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
-- +goose Up
|
-- +goose Up
|
||||||
CREATE INDEX number_index ON blocks (number);
|
CREATE INDEX number_index ON eth_blocks (number);
|
||||||
|
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
-- +goose Up
|
-- +goose Up
|
||||||
ALTER TABLE blocks
|
ALTER TABLE eth_blocks
|
||||||
ADD COLUMN node_id INTEGER NOT NULL,
|
ADD COLUMN node_id INTEGER NOT NULL,
|
||||||
ADD CONSTRAINT node_fk
|
ADD CONSTRAINT node_fk
|
||||||
FOREIGN KEY (node_id)
|
FOREIGN KEY (node_id)
|
||||||
@ -7,5 +7,5 @@ REFERENCES nodes (id)
|
|||||||
ON DELETE CASCADE;
|
ON DELETE CASCADE;
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
ALTER TABLE blocks
|
ALTER TABLE eth_blocks
|
||||||
DROP COLUMN node_id;
|
DROP COLUMN node_id;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
-- +goose Up
|
-- +goose Up
|
||||||
ALTER TABLE blocks
|
ALTER TABLE eth_blocks
|
||||||
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
|
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
ALTER TABLE blocks
|
ALTER TABLE eth_blocks
|
||||||
DROP CONSTRAINT node_id_block_number_uc;
|
DROP CONSTRAINT node_id_block_number_uc;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
-- +goose Up
|
-- +goose Up
|
||||||
CREATE INDEX node_id_index ON blocks (node_id);
|
CREATE INDEX node_id_index ON eth_blocks (node_id);
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
DROP INDEX node_id_index;
|
DROP INDEX node_id_index;
|
||||||
|
@ -7,14 +7,14 @@ ALTER TABLE public.eth_nodes DROP CONSTRAINT node_uc;
|
|||||||
ALTER TABLE public.eth_nodes
|
ALTER TABLE public.eth_nodes
|
||||||
ADD CONSTRAINT eth_node_uc UNIQUE (genesis_block, network_id, eth_node_id);
|
ADD CONSTRAINT eth_node_uc UNIQUE (genesis_block, network_id, eth_node_id);
|
||||||
|
|
||||||
ALTER TABLE public.blocks RENAME COLUMN node_id TO eth_node_id;
|
ALTER TABLE public.eth_blocks RENAME COLUMN node_id TO eth_node_id;
|
||||||
|
|
||||||
ALTER TABLE public.blocks DROP CONSTRAINT node_id_block_number_uc;
|
ALTER TABLE public.eth_blocks DROP CONSTRAINT node_id_block_number_uc;
|
||||||
ALTER TABLE public.blocks
|
ALTER TABLE public.eth_blocks
|
||||||
ADD CONSTRAINT eth_node_id_block_number_uc UNIQUE (number, eth_node_id);
|
ADD CONSTRAINT eth_node_id_block_number_uc UNIQUE (number, eth_node_id);
|
||||||
|
|
||||||
ALTER TABLE public.blocks DROP CONSTRAINT node_fk;
|
ALTER TABLE public.eth_blocks DROP CONSTRAINT node_fk;
|
||||||
ALTER TABLE public.blocks
|
ALTER TABLE public.eth_blocks
|
||||||
ADD CONSTRAINT node_fk
|
ADD CONSTRAINT node_fk
|
||||||
FOREIGN KEY (eth_node_id) REFERENCES eth_nodes (id) ON DELETE CASCADE;
|
FOREIGN KEY (eth_node_id) REFERENCES eth_nodes (id) ON DELETE CASCADE;
|
||||||
|
|
||||||
@ -31,13 +31,13 @@ ALTER TABLE public.nodes
|
|||||||
ALTER TABLE public.nodes
|
ALTER TABLE public.nodes
|
||||||
ADD CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id);
|
ADD CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id);
|
||||||
|
|
||||||
ALTER TABLE public.blocks RENAME COLUMN eth_node_id TO node_id;
|
ALTER TABLE public.eth_blocks RENAME COLUMN eth_node_id TO node_id;
|
||||||
|
|
||||||
ALTER TABLE public.blocks DROP CONSTRAINT eth_node_id_block_number_uc;
|
ALTER TABLE public.eth_blocks DROP CONSTRAINT eth_node_id_block_number_uc;
|
||||||
ALTER TABLE public.blocks
|
ALTER TABLE public.eth_blocks
|
||||||
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
|
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
|
||||||
|
|
||||||
ALTER TABLE public.blocks DROP CONSTRAINT node_fk;
|
ALTER TABLE public.eth_blocks DROP CONSTRAINT node_fk;
|
||||||
ALTER TABLE public.blocks
|
ALTER TABLE public.eth_blocks
|
||||||
ADD CONSTRAINT node_fk
|
ADD CONSTRAINT node_fk
|
||||||
FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE;
|
FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE;
|
||||||
|
@ -11,9 +11,9 @@ ALTER TABLE full_sync_receipts
|
|||||||
ALTER COLUMN block_id SET NOT NULL;
|
ALTER COLUMN block_id SET NOT NULL;
|
||||||
|
|
||||||
ALTER TABLE full_sync_receipts
|
ALTER TABLE full_sync_receipts
|
||||||
ADD CONSTRAINT blocks_fk
|
ADD CONSTRAINT eth_blocks_fk
|
||||||
FOREIGN KEY (block_id)
|
FOREIGN KEY (block_id)
|
||||||
REFERENCES blocks (id)
|
REFERENCES eth_blocks (id)
|
||||||
ON DELETE CASCADE;
|
ON DELETE CASCADE;
|
||||||
|
|
||||||
ALTER TABLE full_sync_receipts
|
ALTER TABLE full_sync_receipts
|
||||||
|
@ -1,16 +1,16 @@
|
|||||||
-- +goose Up
|
-- +goose Up
|
||||||
ALTER TABLE blocks
|
ALTER TABLE eth_blocks
|
||||||
ADD COLUMN eth_node_fingerprint VARCHAR(128);
|
ADD COLUMN eth_node_fingerprint VARCHAR(128);
|
||||||
|
|
||||||
UPDATE blocks
|
UPDATE eth_blocks
|
||||||
SET eth_node_fingerprint = (
|
SET eth_node_fingerprint = (
|
||||||
SELECT eth_node_id FROM eth_nodes WHERE eth_nodes.id = blocks.eth_node_id
|
SELECT eth_node_id FROM eth_nodes WHERE eth_nodes.id = eth_blocks.eth_node_id
|
||||||
);
|
);
|
||||||
|
|
||||||
ALTER TABLE blocks
|
ALTER TABLE eth_blocks
|
||||||
ALTER COLUMN eth_node_fingerprint SET NOT NULL;
|
ALTER COLUMN eth_node_fingerprint SET NOT NULL;
|
||||||
|
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
ALTER TABLE blocks
|
ALTER TABLE eth_blocks
|
||||||
DROP COLUMN eth_node_fingerprint;
|
DROP COLUMN eth_node_fingerprint;
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
CREATE TABLE public.uncles (
|
CREATE TABLE public.uncles (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
hash VARCHAR(66) NOT NULL,
|
hash VARCHAR(66) NOT NULL,
|
||||||
block_id INTEGER NOT NULL REFERENCES blocks (id) ON DELETE CASCADE,
|
block_id INTEGER NOT NULL REFERENCES eth_blocks (id) ON DELETE CASCADE,
|
||||||
reward NUMERIC NOT NULL,
|
reward NUMERIC NOT NULL,
|
||||||
miner VARCHAR(42) NOT NULL,
|
miner VARCHAR(42) NOT NULL,
|
||||||
raw JSONB,
|
raw JSONB,
|
||||||
|
@ -1175,4 +1175,3 @@ ALTER TABLE ONLY public.uncles
|
|||||||
--
|
--
|
||||||
-- PostgreSQL database dump complete
|
-- PostgreSQL database dump complete
|
||||||
--
|
--
|
||||||
|
|
||||||
|
61
dockerfiles/seed_node/Dockerfile
Normal file
61
dockerfiles/seed_node/Dockerfile
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
FROM golang:alpine as builder
|
||||||
|
|
||||||
|
RUN apk --update --no-cache add make git g++
|
||||||
|
# DEBUG
|
||||||
|
RUN apk add busybox-extras
|
||||||
|
|
||||||
|
# Build statically linked vDB binary (wonky path because of Dep)
|
||||||
|
WORKDIR /go/src/github.com/vulcanize/vulcanizedb
|
||||||
|
ADD . .
|
||||||
|
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' .
|
||||||
|
|
||||||
|
# Build statically linked IPFS binary (statically linked and wonky path because we need to use custom fork)
|
||||||
|
WORKDIR /go/src/github.com/ipfs/go-ipfs
|
||||||
|
ADD . .
|
||||||
|
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' .
|
||||||
|
|
||||||
|
# Build statically linked Geth binary (statically linked and wonky path because we need to use custom fork)
|
||||||
|
WORKDIR /go/src/github.com/ethereum/go-ethereum
|
||||||
|
ADD . .
|
||||||
|
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' .
|
||||||
|
|
||||||
|
# Build migration tool
|
||||||
|
RUN go get -u -d github.com/pressly/goose/cmd/goose
|
||||||
|
WORKDIR /go/src/github.com/pressly/goose/cmd/goose
|
||||||
|
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -tags='no_mysql no_sqlite' -o goose
|
||||||
|
|
||||||
|
WORKDIR /go/src/github.com/vulcanize/vulcanizedb
|
||||||
|
|
||||||
|
# app container
|
||||||
|
FROM alpine
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
ARG USER
|
||||||
|
ARG config_file=environments/example.toml
|
||||||
|
ARG vdb_command=syncPublishScreenAndServe
|
||||||
|
ARG vdb_pg_connect="postgres://$USER@/vulcanize_public?sslmode=disable"
|
||||||
|
ARG vdb_dbname="vulcanize_public"
|
||||||
|
|
||||||
|
# setup environment
|
||||||
|
ENV VDB_COMMAND="$vdb_command"
|
||||||
|
ENV VDB_PG_CONNECT="$vdb_pg_connect"
|
||||||
|
|
||||||
|
RUN adduser -Du 5000 $USER
|
||||||
|
USER $USER
|
||||||
|
|
||||||
|
# chown first so dir is writable
|
||||||
|
# note: using $USER is merged, but not in the stable release yet
|
||||||
|
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/$config_file config.toml
|
||||||
|
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/seed_node/startup_script.sh .
|
||||||
|
|
||||||
|
# keep binaries immutable
|
||||||
|
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/vulcanizedb vulcanizedb
|
||||||
|
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
|
||||||
|
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations migrations/vulcanizedb
|
||||||
|
COPY --from=builder /go/src/github.com/ipfs/go-ipfs ipfs
|
||||||
|
COPY --from=builder /go/src/github.com/ethereum/go-ethereum geth
|
||||||
|
|
||||||
|
# DEBUG
|
||||||
|
COPY --from=builder /usr/bin/telnet /bin/telnet
|
||||||
|
|
||||||
|
CMD ["./startup_script.sh"]
|
37
dockerfiles/seed_node/startup_script.sh
Executable file
37
dockerfiles/seed_node/startup_script.sh
Executable file
@ -0,0 +1,37 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Runs the migrations and starts the syncPublishScreenAndServe service
|
||||||
|
|
||||||
|
# Exit if the variable tests fail
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Check the database variables are set
|
||||||
|
test $DATABASE_NAME
|
||||||
|
test $DATABASE_HOSTNAME
|
||||||
|
test $DATABASE_PORT
|
||||||
|
test $DATABASE_USER
|
||||||
|
test $DATABASE_PASSWORD
|
||||||
|
|
||||||
|
# Export our database variables so that the IPFS Postgres plugin can use them
|
||||||
|
export IPFS_PGHOST=$DATABASE_HOSTNAME
|
||||||
|
export IPFS_PGUSER=$DATABASE_USER
|
||||||
|
export IPFS_PGDATABASE=$DATABASE_NAME
|
||||||
|
export IPFS_PGPORT=$DATABASE_PORT
|
||||||
|
export IPFS_PGPASSWORD=$DATABASE_PASSWORD
|
||||||
|
|
||||||
|
# Construct the connection string for postgres
|
||||||
|
CONNECT_STRING=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
|
||||||
|
echo "Connecting with: $CONNECT_STRING"
|
||||||
|
|
||||||
|
set +e
|
||||||
|
|
||||||
|
# Run the DB migrations
|
||||||
|
./goose postgres "$CONNECT_STRING" up
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
# Fire up the services
|
||||||
|
ipfs ipfs init --profile=postgresds
|
||||||
|
geth --statediff --statediff.streamblock --ws --syncmode=full
|
||||||
|
./vulcanizedb syncPublishScreenAndServe --config environments/seedNodeStaging.toml &
|
||||||
|
else
|
||||||
|
echo "Could not run migrations. Are the database details correct?"
|
||||||
|
fi
|
||||||
|
wait
|
@ -64,7 +64,7 @@ func (r *blockRetriever) retrieveFirstBlockFromReceipts(contractAddr string) (in
|
|||||||
}
|
}
|
||||||
err := r.db.Get(
|
err := r.db.Get(
|
||||||
&firstBlock,
|
&firstBlock,
|
||||||
`SELECT number FROM blocks
|
`SELECT number FROM eth_blocks
|
||||||
WHERE id = (SELECT block_id FROM full_sync_receipts
|
WHERE id = (SELECT block_id FROM full_sync_receipts
|
||||||
WHERE contract_address_id = $1
|
WHERE contract_address_id = $1
|
||||||
ORDER BY block_id ASC
|
ORDER BY block_id ASC
|
||||||
@ -92,7 +92,7 @@ func (r *blockRetriever) RetrieveMostRecentBlock() (int64, error) {
|
|||||||
var lastBlock int64
|
var lastBlock int64
|
||||||
err := r.db.Get(
|
err := r.db.Get(
|
||||||
&lastBlock,
|
&lastBlock,
|
||||||
"SELECT number FROM blocks ORDER BY number DESC LIMIT 1",
|
"SELECT number FROM eth_blocks ORDER BY number DESC LIMIT 1",
|
||||||
)
|
)
|
||||||
|
|
||||||
return lastBlock, err
|
return lastBlock, err
|
||||||
|
@ -265,7 +265,7 @@ func TearDown(db *postgres.DB) {
|
|||||||
_, err = tx.Exec(`DELETE FROM addresses`)
|
_, err = tx.Exec(`DELETE FROM addresses`)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
_, err = tx.Exec(`DELETE FROM blocks`)
|
_, err = tx.Exec(`DELETE FROM eth_blocks`)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
_, err = tx.Exec(`DELETE FROM headers`)
|
_, err = tx.Exec(`DELETE FROM headers`)
|
||||||
|
@ -19,8 +19,10 @@ package repositories
|
|||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||||
@ -43,7 +45,7 @@ func NewBlockRepository(database *postgres.DB) *BlockRepository {
|
|||||||
func (blockRepository BlockRepository) SetBlocksStatus(chainHead int64) error {
|
func (blockRepository BlockRepository) SetBlocksStatus(chainHead int64) error {
|
||||||
cutoff := chainHead - blocksFromHeadBeforeFinal
|
cutoff := chainHead - blocksFromHeadBeforeFinal
|
||||||
_, err := blockRepository.database.Exec(`
|
_, err := blockRepository.database.Exec(`
|
||||||
UPDATE blocks SET is_final = TRUE
|
UPDATE eth_blocks SET is_final = TRUE
|
||||||
WHERE is_final = FALSE AND number < $1`,
|
WHERE is_final = FALSE AND number < $1`,
|
||||||
cutoff)
|
cutoff)
|
||||||
|
|
||||||
@ -74,7 +76,7 @@ func (blockRepository BlockRepository) MissingBlockNumbers(startingBlockNumber i
|
|||||||
FROM (
|
FROM (
|
||||||
SELECT generate_series($1::INT, $2::INT) AS all_block_numbers) series
|
SELECT generate_series($1::INT, $2::INT) AS all_block_numbers) series
|
||||||
WHERE all_block_numbers NOT IN (
|
WHERE all_block_numbers NOT IN (
|
||||||
SELECT number FROM blocks WHERE eth_node_fingerprint = $3
|
SELECT number FROM eth_blocks WHERE eth_node_fingerprint = $3
|
||||||
) `,
|
) `,
|
||||||
startingBlockNumber,
|
startingBlockNumber,
|
||||||
highestBlockNumber, nodeID)
|
highestBlockNumber, nodeID)
|
||||||
@ -102,7 +104,7 @@ func (blockRepository BlockRepository) GetBlock(blockNumber int64) (core.Block,
|
|||||||
extra_data,
|
extra_data,
|
||||||
reward,
|
reward,
|
||||||
uncles_reward
|
uncles_reward
|
||||||
FROM blocks
|
FROM eth_blocks
|
||||||
WHERE eth_node_id = $1 AND number = $2`, blockRepository.database.NodeID, blockNumber)
|
WHERE eth_node_id = $1 AND number = $2`, blockRepository.database.NodeID, blockNumber)
|
||||||
savedBlock, err := blockRepository.loadBlock(blockRows)
|
savedBlock, err := blockRepository.loadBlock(blockRows)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -124,7 +126,7 @@ func (blockRepository BlockRepository) insertBlock(block core.Block) (int64, err
|
|||||||
return 0, postgres.ErrBeginTransactionFailed(beginErr)
|
return 0, postgres.ErrBeginTransactionFailed(beginErr)
|
||||||
}
|
}
|
||||||
insertBlockErr := tx.QueryRow(
|
insertBlockErr := tx.QueryRow(
|
||||||
`INSERT INTO blocks
|
`INSERT INTO eth_blocks
|
||||||
(eth_node_id, number, gas_limit, gas_used, time, difficulty, hash, nonce, parent_hash, size, uncle_hash, is_final, miner, extra_data, reward, uncles_reward, eth_node_fingerprint)
|
(eth_node_id, number, gas_limit, gas_used, time, difficulty, hash, nonce, parent_hash, size, uncle_hash, is_final, miner, extra_data, reward, uncles_reward, eth_node_fingerprint)
|
||||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)
|
||||||
RETURNING id `,
|
RETURNING id `,
|
||||||
@ -260,7 +262,7 @@ func (blockRepository BlockRepository) getBlockHash(block core.Block) (string, b
|
|||||||
// TODO: handle possible error
|
// TODO: handle possible error
|
||||||
blockRepository.database.Get(&retrievedBlockHash,
|
blockRepository.database.Get(&retrievedBlockHash,
|
||||||
`SELECT hash
|
`SELECT hash
|
||||||
FROM blocks
|
FROM eth_blocks
|
||||||
WHERE number = $1 AND eth_node_id = $2`,
|
WHERE number = $1 AND eth_node_id = $2`,
|
||||||
block.Number, blockRepository.database.NodeID)
|
block.Number, blockRepository.database.NodeID)
|
||||||
return retrievedBlockHash, blockExists(retrievedBlockHash)
|
return retrievedBlockHash, blockExists(retrievedBlockHash)
|
||||||
@ -287,7 +289,7 @@ func blockExists(retrievedBlockHash string) bool {
|
|||||||
|
|
||||||
func (blockRepository BlockRepository) removeBlock(blockNumber int64) error {
|
func (blockRepository BlockRepository) removeBlock(blockNumber int64) error {
|
||||||
_, err := blockRepository.database.Exec(
|
_, err := blockRepository.database.Exec(
|
||||||
`DELETE FROM blocks WHERE number=$1 AND eth_node_id=$2`,
|
`DELETE FROM eth_blocks WHERE number=$1 AND eth_node_id=$2`,
|
||||||
blockNumber, blockRepository.database.NodeID)
|
blockNumber, blockRepository.database.NodeID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return postgres.ErrDBDeleteFailed(err)
|
return postgres.ErrDBDeleteFailed(err)
|
||||||
|
@ -67,6 +67,8 @@ func (ecr *EthCIDRetriever) RetrieveCIDs(streamFilters config.Subscription) ([]C
|
|||||||
}
|
}
|
||||||
log.Debug("backfill starting block:", streamFilters.StartingBlock)
|
log.Debug("backfill starting block:", streamFilters.StartingBlock)
|
||||||
log.Debug("backfill ending block:", endingBlock)
|
log.Debug("backfill ending block:", endingBlock)
|
||||||
|
// THIS IS SUPER EXPENSIVE HAVING TO CYCLE THROUGH EACH BLOCK, NEED BETTER WAY TO FETCH CIDS
|
||||||
|
// WHILE STILL MAINTAINING RELATION INFO ABOUT WHAT BLOCK THE CIDS BELONG TO
|
||||||
for i := streamFilters.StartingBlock; i <= endingBlock; i++ {
|
for i := streamFilters.StartingBlock; i <= endingBlock; i++ {
|
||||||
cw := CidWrapper{}
|
cw := CidWrapper{}
|
||||||
if !streamFilters.HeaderFilter.Off {
|
if !streamFilters.HeaderFilter.Off {
|
||||||
|
@ -88,6 +88,7 @@ func NewTestDB(node core.Node) *postgres.DB {
|
|||||||
func CleanTestDB(db *postgres.DB) {
|
func CleanTestDB(db *postgres.DB) {
|
||||||
db.MustExec("DELETE FROM addresses")
|
db.MustExec("DELETE FROM addresses")
|
||||||
db.MustExec("DELETE FROM blocks")
|
db.MustExec("DELETE FROM blocks")
|
||||||
|
db.MustExec("DELETE FROM eth_blocks")
|
||||||
db.MustExec("DELETE FROM checked_headers")
|
db.MustExec("DELETE FROM checked_headers")
|
||||||
// can't delete from eth_nodes since this function is called after the required eth_node is persisted
|
// can't delete from eth_nodes since this function is called after the required eth_node is persisted
|
||||||
db.MustExec("DELETE FROM full_sync_logs")
|
db.MustExec("DELETE FROM full_sync_logs")
|
||||||
|
Loading…
Reference in New Issue
Block a user