Compare commits

..

9 Commits

Author SHA1 Message Date
89d08ac1b9 Add eth.blob_hashes table (#8)
All checks were successful
Publish Docker image / Build and publish image (release) Successful in 20s
Adds `eth.blob_hashes`, indexed by blob transaction hash.

Currently based on #7

Reviewed-on: #8
2024-08-01 00:21:06 +00:00
d24142a301 Update schema.sql 2024-07-24 07:13:22 +00:00
9e0b69d23c Add CI sanity check
+ compose healthcheck
2024-07-24 07:13:22 +00:00
825a0bc235 Simplify startup script 2024-07-24 07:13:22 +00:00
e90bc38fdb Remove unused Makefile, workflows 2024-07-24 07:13:22 +00:00
ccfc2dbc84 Simplify Dockerfile (#9)
We don't actually need a Go based image, since this just installs the `goose` binary.

Reviewed-on: #9
2024-07-24 07:05:47 +00:00
fdd56e9803 Add withdrawals (EIP-4895) (#7)
All checks were successful
Publish Docker image / Build and publish image (release) Successful in 29s
Support for validator withdrawal objects:
- new table `eth.withdrawal_cids`
- new column `withdrawals_root` in `eth.header_cids`

Reviewed-on: #7
2024-06-25 11:24:00 +00:00
68a347e38d Add .gitea specific workflows. (#6)
Reviewed-on: #6
Co-authored-by: Thomas E Lackey <telackey@bozemanpass.com>
Co-committed-by: Thomas E Lackey <telackey@bozemanpass.com>
2024-01-22 19:16:56 +00:00
097804b1e9 Merge pull request 'update Dockerfile for ARM compatibility' (#4) from arm-dockerfile into v5
Reviewed-on: #4
2023-10-19 13:26:05 +00:00
11 changed files with 134 additions and 232 deletions

View File

@ -0,0 +1,23 @@
name: Basic test
on: [pull_request]
jobs:
basic-test:
name: Build and sanity check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build Docker images
run: docker compose build
- name: Run Docker containers
run: docker compose up -d
- name: Check migration version
timeout-minutes: 1
run: |
MIGRATION_VERSION=$(ls db/migrations/*.sql | wc -l)
while
version=$(docker compose run --rm migrations version 2>&1 | tail -1 | awk '{print $(NF)}')
[[ $version != $MIGRATION_VERSION ]]; do
echo "Incorrect version: $version"
echo "Retrying..."
done

View File

@ -21,6 +21,6 @@ jobs:
-t git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.tag}} -t git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.tag}}
- name: Push image tags - name: Push image tags
run: | run: |
echo ${{ secrets.GITEA_PUBLISH_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin echo ${{ secrets.CICD_PUBLISH_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
docker push git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.sha}} docker push git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.sha}}
docker push git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.tag}} docker push git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.tag}}

View File

@ -1,78 +0,0 @@
name: Docker Build
on: [pull_request]
jobs:
build:
name: Run docker build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Run docker build
run: docker compose build
test:
# Add a dummy job to avoid failing GitHub CI checks.
# Other checks to be added later.
name: Dummy job
runs-on: ubuntu-latest
steps:
- name: Run dummy job
run: echo "Empty dummy job"
# concise_migration_diff:
# name: Verify concise migration and generated schema
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v2
# - name: Run docker concise migration build
# run: make docker-concise-migration-build
# - name: Run database
# run: docker-compose -f docker-compose.test.yml up -d test-db
# - name: Test concise migration
# run: |
# sleep 10
# docker run --rm --network host -e DATABASE_USER=vdbm -e DATABASE_PASSWORD=password \
# -e DATABASE_HOSTNAME=127.0.0.1 -e DATABASE_PORT=8066 -e DATABASE_NAME=vulcanize_testing \
# vulcanize/concise-migration-build
# - name: Verify schema is latest
# run: |
# PGPASSWORD="password" pg_dump -h localhost -p 8066 -U vdbm vulcanize_testing --no-owner --schema-only > ./db/migration_schema.sql
# ./scripts/check_diff.sh ./db/migration_schema.sql db/schema.sql
# incremental_migration_diff:
# name: Compare conscise migration schema with incremental migration.
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v2
# - name: Run database
# run: docker-compose -f docker-compose.test.yml up -d test-db statediff-migrations
# - name: Test incremental migration
# run: |
# sleep 10
# docker run --rm --network host -e DATABASE_USER=vdbm -e DATABASE_PASSWORD=password \
# -e DATABASE_HOSTNAME=127.0.0.1 -e DATABASE_PORT=8066 -e DATABASE_NAME=vulcanize_testing \
# vulcanize/statediff-migrations:v0.9.0
# - name: Verify schema is latest
# run: |
# PGPASSWORD="password" pg_dump -h localhost -p 8066 -U vdbm vulcanize_testing --no-owner --schema-only > ./db/migration_schema.sql
# ./scripts/check_diff.sh db/schema.sql ./db/migration_schema.sql
# migration:
# name: Compare up and down migration
# env:
# GOPATH: /tmp/go
# strategy:
# matrix:
# go-version: [ 1.16.x ]
# os: [ ubuntu-latest ]
# runs-on: ${{ matrix.os }}
# steps:
# - name: Create GOPATH
# run: mkdir -p /tmp/go
# - name: Install Go
# uses: actions/setup-go@v2
# with:
# go-version: ${{ matrix.go-version }}
# - uses: actions/checkout@v2
# - name: Test migration
# run: |
# timeout 5m make test-migrations

View File

@ -1,12 +1,10 @@
FROM golang:1.18-alpine as builder FROM alpine as builder
ADD . /go/src/github.com/cerc-io/ipld-eth-db
# Get migration tool # Get migration tool
WORKDIR / WORKDIR /
ARG GOOSE_VER="v3.6.1" ARG GOOSE_VERSION="v3.6.1"
RUN arch=$(arch | sed s/aarch64/arm64/) && \ RUN arch=$(arch | sed s/aarch64/arm64/) && \
wget -O ./goose https://github.com/pressly/goose/releases/download/${GOOSE_VER}/goose_linux_${arch} wget -O ./goose https://github.com/pressly/goose/releases/download/${GOOSE_VERSION}/goose_linux_${arch}
RUN chmod +x ./goose RUN chmod +x ./goose
# app container # app container
@ -14,9 +12,8 @@ FROM alpine
WORKDIR /app WORKDIR /app
COPY --from=builder /go/src/github.com/cerc-io/ipld-eth-db/scripts/startup_script.sh .
COPY --from=builder /goose goose COPY --from=builder /goose goose
COPY --from=builder /go/src/github.com/cerc-io/ipld-eth-db/db/migrations migrations ADD scripts/startup_script.sh .
ADD db/migrations migrations
ENTRYPOINT ["/app/startup_script.sh"] ENTRYPOINT ["/app/startup_script.sh"]

121
Makefile
View File

@ -1,121 +0,0 @@
ifndef GOPATH
override GOPATH = $(HOME)/go
endif
BIN = $(GOPATH)/bin
# Tools
## Migration tool
GOOSE = $(BIN)/goose
$(BIN)/goose:
go get -u github.com/pressly/goose/cmd/goose
.PHONY: installtools
installtools: | $(GOOSE)
echo "Installing tools"
#Database
HOST_NAME = localhost
PORT = 5432
NAME =
USER = postgres
PASSWORD = password
CONNECT_STRING=postgresql://$(USER):$(PASSWORD)@$(HOST_NAME):$(PORT)/$(NAME)?sslmode=disable
# Parameter checks
## Check that DB variables are provided
.PHONY: checkdbvars
checkdbvars:
test -n "$(HOST_NAME)" # $$HOST_NAME
test -n "$(PORT)" # $$PORT
test -n "$(NAME)" # $$NAME
@echo $(CONNECT_STRING)
## Check that the migration variable (id/timestamp) is provided
.PHONY: checkmigration
checkmigration:
test -n "$(MIGRATION)" # $$MIGRATION
# Check that the migration name is provided
.PHONY: checkmigname
checkmigname:
test -n "$(NAME)" # $$NAME
# Migration operations
## Rollback the last migration
.PHONY: rollback
rollback: $(GOOSE) checkdbvars
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" down
pg_dump -O -s $(CONNECT_STRING) > schema.sql
## Rollback to a select migration (id/timestamp)
.PHONY: rollback_to
rollback_to: $(GOOSE) checkmigration checkdbvars
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" down-to "$(MIGRATION)"
## Rollback pre_batch_set
.PHONY: `rollback_pre_batch_set`
rollback_pre_batch_set: $(GOOSE) checkdbvars
$(GOOSE) -dir db/pre_batch_processing_migrations postgres "$(CONNECT_STRING)" down
## Rollback post_batch_set
.PHONY: rollback_post_batch_set
rollback_post_batch_set: $(GOOSE) checkdbvars
$(GOOSE) -dir db/post_batch_processing_migrations postgres "$(CONNECT_STRING)" down
## Apply the next up migration
.PHONY: migrate_up_by_one
migrate_up_by_one: $(GOOSE) checkdbvars
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" up-by-one
## Apply all migrations not already run
.PHONY: migrate
migrate: $(GOOSE) checkdbvars
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" up
pg_dump -O -s $(CONNECT_STRING) > schema.sql
## Apply all the migrations used to generate a UML diagram (containing FKs)
.PHONY: migrate_for_uml
migrate_for_uml: $(GOOSE) checkdbvars
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" up-to 00018
## Apply migrations to be ran before a batch processing
.PHONY: migrate_pre_batch_set
migrate_pre_batch_set: $(GOOSE) checkdbvars
$(GOOSE) -dir db/pre_batch_processing_migrations postgres "$(CONNECT_STRING)" up
## Apply migrations to be ran after a batch processing, one-by-one
.PHONY: migrate_post_batch_set_up_by_one
migrate_post_batch_set_up_by_one: $(GOOSE) checkdbvars
$(GOOSE) -dir db/post_batch_processing_migrations postgres "$(CONNECT_STRING)" up-by-one
## Apply migrations to be ran after a batch processing
.PHONY: migrate_post_batch_set
migrate_post_batch_set: $(GOOSE) checkdbvars
$(GOOSE) -dir db/post_batch_processing_migrations postgres "$(CONNECT_STRING)" up
## Create a new migration file
.PHONY: new_migration
new_migration: $(GOOSE) checkmigname
$(GOOSE) -dir db/migrations create $(NAME) sql
## Check which migrations are applied at the moment
.PHONY: migration_status
migration_status: $(GOOSE) checkdbvars
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" status
# Convert timestamped migrations to versioned (to be run in CI);
# merge timestamped files to prevent conflict
.PHONY: version_migrations
version_migrations:
$(GOOSE) -dir db/migrations fix
# Import a psql schema to the database
.PHONY: import
import:
test -n "$(NAME)" # $$NAME
psql $(NAME) < schema.sql
.PHONY: test-migrations
test-migrations: $(GOOSE)
./scripts/test_migration.sh

View File

@ -2,13 +2,14 @@ services:
migrations: migrations:
restart: on-failure restart: on-failure
depends_on: depends_on:
- ipld-eth-db ipld-eth-db:
condition: service_healthy
# Use local build # Use local build
build: build:
context: . context: .
dockerfile: Dockerfile dockerfile: Dockerfile
# Use an existing image # Use an existing image
image: cerc/ipld-eth-db image: cerc/ipld-eth-db:local
environment: environment:
DATABASE_USER: "vdbm" DATABASE_USER: "vdbm"
DATABASE_NAME: "cerc_testing" DATABASE_NAME: "cerc_testing"
@ -26,3 +27,8 @@ services:
POSTGRES_PASSWORD: "password" POSTGRES_PASSWORD: "password"
ports: ports:
- "127.0.0.1:8077:5432" - "127.0.0.1:8077:5432"
healthcheck:
test: ["CMD", "pg_isready", "-U", "vdbm"]
interval: 2s
timeout: 1s
retries: 3

View File

@ -15,6 +15,7 @@ CREATE TABLE IF NOT EXISTS eth.header_cids (
timestamp BIGINT NOT NULL, timestamp BIGINT NOT NULL,
coinbase VARCHAR(66) NOT NULL, coinbase VARCHAR(66) NOT NULL,
canonical BOOLEAN NOT NULL DEFAULT TRUE, canonical BOOLEAN NOT NULL DEFAULT TRUE,
withdrawals_root VARCHAR(66) NOT NULL,
PRIMARY KEY (block_hash, block_number) PRIMARY KEY (block_hash, block_number)
); );

View File

@ -0,0 +1,16 @@
-- +goose Up
CREATE TABLE IF NOT EXISTS eth.withdrawal_cids (
block_number BIGINT NOT NULL,
header_id VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
index INTEGER NOT NULL,
validator INTEGER NOT NULL,
address VARCHAR(66) NOT NULL,
amount NUMERIC NOT NULL,
PRIMARY KEY (index, header_id, block_number)
);
SELECT create_hypertable('eth.withdrawal_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
-- +goose Down
DROP TABLE eth.withdrawal_cids;

View File

@ -0,0 +1,12 @@
-- +goose Up
CREATE TABLE eth.blob_hashes (
tx_hash VARCHAR(66) NOT NULL,
index INTEGER NOT NULL,
blob_hash BYTEA NOT NULL
);
CREATE UNIQUE INDEX blob_hashes_tx_hash_index ON eth.blob_hashes(tx_hash, index);
-- +goose Down
DROP INDEX eth.blob_hashes_tx_hash_index;
DROP TABLE eth.blob_hashes;

View File

@ -2,8 +2,8 @@
-- PostgreSQL database dump -- PostgreSQL database dump
-- --
-- Dumped from database version 14.8 -- Dumped from database version 14.12
-- Dumped by pg_dump version 14.8 (Ubuntu 14.8-0ubuntu0.22.04.1) -- Dumped by pg_dump version 14.12
SET statement_timeout = 0; SET statement_timeout = 0;
SET lock_timeout = 0; SET lock_timeout = 0;
@ -190,6 +190,17 @@ SET default_tablespace = '';
SET default_table_access_method = heap; SET default_table_access_method = heap;
--
-- Name: blob_hashes; Type: TABLE; Schema: eth; Owner: -
--
CREATE TABLE eth.blob_hashes (
tx_hash character varying(66) NOT NULL,
index integer NOT NULL,
blob_hash bytea NOT NULL
);
-- --
-- Name: header_cids; Type: TABLE; Schema: eth; Owner: - -- Name: header_cids; Type: TABLE; Schema: eth; Owner: -
-- --
@ -209,7 +220,8 @@ CREATE TABLE eth.header_cids (
bloom bytea NOT NULL, bloom bytea NOT NULL,
"timestamp" bigint NOT NULL, "timestamp" bigint NOT NULL,
coinbase character varying(66) NOT NULL, coinbase character varying(66) NOT NULL,
canonical boolean DEFAULT true NOT NULL canonical boolean DEFAULT true NOT NULL,
withdrawals_root character varying(66) NOT NULL
); );
@ -333,6 +345,21 @@ CREATE TABLE eth.uncle_cids (
); );
--
-- Name: withdrawal_cids; Type: TABLE; Schema: eth; Owner: -
--
CREATE TABLE eth.withdrawal_cids (
block_number bigint NOT NULL,
header_id character varying(66) NOT NULL,
cid text NOT NULL,
index integer NOT NULL,
validator integer NOT NULL,
address character varying(66) NOT NULL,
amount numeric NOT NULL
);
-- --
-- Name: watched_addresses; Type: TABLE; Schema: eth_meta; Owner: - -- Name: watched_addresses; Type: TABLE; Schema: eth_meta; Owner: -
-- --
@ -490,6 +517,14 @@ ALTER TABLE ONLY eth.uncle_cids
ADD CONSTRAINT uncle_cids_pkey PRIMARY KEY (block_hash, block_number); ADD CONSTRAINT uncle_cids_pkey PRIMARY KEY (block_hash, block_number);
--
-- Name: withdrawal_cids withdrawal_cids_pkey; Type: CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.withdrawal_cids
ADD CONSTRAINT withdrawal_cids_pkey PRIMARY KEY (index, header_id, block_number);
-- --
-- Name: watched_addresses watched_addresses_pkey; Type: CONSTRAINT; Schema: eth_meta; Owner: - -- Name: watched_addresses watched_addresses_pkey; Type: CONSTRAINT; Schema: eth_meta; Owner: -
-- --
@ -530,6 +565,13 @@ ALTER TABLE ONLY public.nodes
ADD CONSTRAINT nodes_pkey PRIMARY KEY (node_id); ADD CONSTRAINT nodes_pkey PRIMARY KEY (node_id);
--
-- Name: blob_hashes_tx_hash_index; Type: INDEX; Schema: eth; Owner: -
--
CREATE UNIQUE INDEX blob_hashes_tx_hash_index ON eth.blob_hashes USING btree (tx_hash, index);
-- --
-- Name: header_block_number_index; Type: INDEX; Schema: eth; Owner: - -- Name: header_block_number_index; Type: INDEX; Schema: eth; Owner: -
-- --
@ -782,6 +824,13 @@ CREATE UNIQUE INDEX uncle_cid_block_number_index ON eth.uncle_cids USING btree (
CREATE INDEX uncle_header_id_index ON eth.uncle_cids USING btree (header_id); CREATE INDEX uncle_header_id_index ON eth.uncle_cids USING btree (header_id);
--
-- Name: withdrawal_cids_block_number_idx; Type: INDEX; Schema: eth; Owner: -
--
CREATE INDEX withdrawal_cids_block_number_idx ON eth.withdrawal_cids USING btree (block_number DESC);
-- --
-- Name: blocks_block_number_idx; Type: INDEX; Schema: ipld; Owner: - -- Name: blocks_block_number_idx; Type: INDEX; Schema: ipld; Owner: -
-- --
@ -793,49 +842,56 @@ CREATE INDEX blocks_block_number_idx ON ipld.blocks USING btree (block_number DE
-- Name: log_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: - -- Name: log_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: -
-- --
CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.log_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_internal.insert_blocker(); CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.log_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker();
-- --
-- Name: receipt_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: - -- Name: receipt_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: -
-- --
CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.receipt_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_internal.insert_blocker(); CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.receipt_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker();
-- --
-- Name: state_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: - -- Name: state_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: -
-- --
CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.state_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_internal.insert_blocker(); CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.state_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker();
-- --
-- Name: storage_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: - -- Name: storage_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: -
-- --
CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.storage_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_internal.insert_blocker(); CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.storage_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker();
-- --
-- Name: transaction_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: - -- Name: transaction_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: -
-- --
CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.transaction_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_internal.insert_blocker(); CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.transaction_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker();
-- --
-- Name: uncle_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: - -- Name: uncle_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: -
-- --
CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.uncle_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_internal.insert_blocker(); CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.uncle_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker();
--
-- Name: withdrawal_cids ts_insert_blocker; Type: TRIGGER; Schema: eth; Owner: -
--
CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON eth.withdrawal_cids FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker();
-- --
-- Name: blocks ts_insert_blocker; Type: TRIGGER; Schema: ipld; Owner: - -- Name: blocks ts_insert_blocker; Type: TRIGGER; Schema: ipld; Owner: -
-- --
CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON ipld.blocks FOR EACH ROW EXECUTE FUNCTION _timescaledb_internal.insert_blocker(); CREATE TRIGGER ts_insert_blocker BEFORE INSERT ON ipld.blocks FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker();
-- --

View File

@ -1,5 +1,4 @@
#!/bin/sh #!/bin/sh
# Runs the db migrations
set -e set -e
# Default command is "goose up" # Default command is "goose up"
@ -11,14 +10,5 @@ fi
VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
# Run the DB migrations # Run the DB migrations
echo "Connecting with: $VDB_PG_CONNECT" set -x
echo "Running database migrations" exec ./goose -dir migrations postgres "$VDB_PG_CONNECT" "$@"
./goose -dir migrations postgres "$VDB_PG_CONNECT" "$@"
# If the db migrations ran without err
if [[ $? -eq 0 ]]; then
echo "Migration process ran successfully"
else
echo "Could not run migrations. Are the database details correct?"
exit 1
fi