Run migrations on a single-node TimescaleDB setup
This commit is contained in:
parent
b64ad6e3f7
commit
49daf851cc
6
.github/workflows/on-pr.yaml
vendored
6
.github/workflows/on-pr.yaml
vendored
@ -3,6 +3,12 @@ name: Docker Build
|
|||||||
on: [pull_request]
|
on: [pull_request]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
test:
|
||||||
|
name: Dummy job
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Run dummy job
|
||||||
|
run: echo "Empty dummy job"
|
||||||
# concise_migration_diff:
|
# concise_migration_diff:
|
||||||
# name: Verify concise migration and generated schema
|
# name: Verify concise migration and generated schema
|
||||||
# runs-on: ubuntu-latest
|
# runs-on: ubuntu-latest
|
||||||
|
@ -17,10 +17,8 @@ WORKDIR /app
|
|||||||
|
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-db/scripts/startup_script.sh .
|
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-db/scripts/startup_script.sh .
|
||||||
|
|
||||||
# copy over files for multi-node setup
|
# copy over file for TimescaleDB setup
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-db/docker-compose.test.yml docker-multi-node/docker-compose.test.yml
|
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-db/docker-compose.test.yml docker-tsdb/docker-compose.test.yml
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-db/scripts/init-access-node.sh docker-multi-node/scripts/init-access-node.sh
|
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-db/scripts/init-data-node.sh docker-multi-node/scripts/init-data-node.sh
|
|
||||||
|
|
||||||
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
|
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-db/db/migrations migrations/vulcanizedb
|
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-db/db/migrations migrations/vulcanizedb
|
||||||
|
14
README.md
14
README.md
@ -12,22 +12,22 @@ Schemas and utils for IPLD ETH Postgres database
|
|||||||
docker-compose down -v --remove-orphans
|
docker-compose down -v --remove-orphans
|
||||||
```
|
```
|
||||||
|
|
||||||
* Spin up an access node and three data nodes using [docker-compose.test.yml](./docker-compose.test.yml):
|
* Spin up a TimescaleDB instance using [docker-compose.test.yml](./docker-compose.test.yml):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker-compose -f docker-compose.test.yml up
|
docker-compose -f docker-compose.test.yml up
|
||||||
```
|
```
|
||||||
|
|
||||||
Following final output should be seen on all the nodes:
|
Following final output should be seen:
|
||||||
|
|
||||||
```
|
```
|
||||||
LOG: TimescaleDB background worker launcher connected to shared catalogs
|
LOG: TimescaleDB background worker launcher connected to shared catalogs
|
||||||
```
|
```
|
||||||
|
|
||||||
* Edit [startup_script.sh](./scripts/startup_script.sh) to change the number of migrations to be run:
|
* Edit [startup_script.sh](./scripts/startup_script.sh) to change the number of migrations to be run if required:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up-to 24
|
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up-to 22
|
||||||
```
|
```
|
||||||
|
|
||||||
* In another `ipld-eth-db` terminal window, build an image `migrations-test` using [Dockerfile](./db/Dockerfile):
|
* In another `ipld-eth-db` terminal window, build an image `migrations-test` using [Dockerfile](./db/Dockerfile):
|
||||||
@ -36,10 +36,10 @@ Schemas and utils for IPLD ETH Postgres database
|
|||||||
docker build -t migrations-test -f ./db/Dockerfile .
|
docker build -t migrations-test -f ./db/Dockerfile .
|
||||||
```
|
```
|
||||||
|
|
||||||
* After the access and data nodes have spun up, start a container using `migrations-test` image to run the db migrations:
|
* Start a container using `migrations-test` image to run the db migrations:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Here, we are running the container using host network.
|
# Here, we are running the container using host network.
|
||||||
# So connect to access node on 127.0.0.1:8066
|
# So connect to TimescaleDB on 127.0.0.1:8066
|
||||||
docker run --rm --network host -e DATABASE_USER=postgres -e DATABASE_PASSWORD=password -e DATABASE_HOSTNAME=127.0.0.1 -e DATABASE_PORT=8066 -e DATABASE_NAME=vulcanize_testing_v4 migrations-test
|
docker run --rm --network host -e DATABASE_USER=vdbm -e DATABASE_PASSWORD=password -e DATABASE_HOSTNAME=127.0.0.1 -e DATABASE_PORT=8066 -e DATABASE_NAME=vulcanize_testing_v4 migrations-test
|
||||||
```
|
```
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
-- +goose NO TRANSACTION
|
|
||||||
-- +goose Up
|
|
||||||
-- this is generated by a script
|
|
||||||
SELECT add_data_node('dn3', host => 'data-node-3', port => 5432, database => 'vulcanize_testing_v4', password => 'password');
|
|
||||||
SELECT add_data_node('dn2', host => 'data-node-2', port => 5432, database => 'vulcanize_testing_v4', password => 'password');
|
|
||||||
SELECT add_data_node('dn1', host => 'data-node-1', port => 5432, database => 'vulcanize_testing_v4', password => 'password');
|
|
||||||
|
|
||||||
CALL distributed_exec($$ CREATE SCHEMA eth $$);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
-- this is generated by a script
|
|
||||||
CALL distributed_exec($$ DROP SCHEMA eth $$);
|
|
||||||
|
|
||||||
SELECT delete_data_node('dn1', force => true, if_attached => true);
|
|
||||||
SELECT delete_data_node('dn2', force => true, if_attached => true);
|
|
||||||
SELECT delete_data_node('dn3', force => true, if_attached => true);
|
|
68
db/migrations/00021_convert_to_hypertables.sql
Normal file
68
db/migrations/00021_convert_to_hypertables.sql
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
-- +goose Up
|
||||||
|
SELECT create_hypertable('public.blocks', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.header_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.uncle_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.transaction_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.receipt_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.state_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.storage_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.state_accounts', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.access_list_elements', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.log_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
|
||||||
|
-- update version
|
||||||
|
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.0-h')
|
||||||
|
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0-h', NOW());
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.0')
|
||||||
|
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0', NOW());
|
||||||
|
|
||||||
|
-- reversing conversion to hypertable requires migrating all data from every chunk back to a single table
|
||||||
|
-- create new regular tables
|
||||||
|
CREATE TABLE eth.log_cids_i (LIKE eth.log_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.access_list_elements_i (LIKE eth.access_list_elements INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.state_accounts_i (LIKE eth.state_accounts INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.storage_cids_i (LIKE eth.storage_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.state_cids_i (LIKE eth.state_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.receipt_cids_i (LIKE eth.receipt_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.transaction_cids_i (LIKE eth.transaction_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.uncle_cids_i (LIKE eth.uncle_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.header_cids_i (LIKE eth.header_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE public.blocks_i (LIKE public.blocks INCLUDING ALL);
|
||||||
|
|
||||||
|
-- migrate data
|
||||||
|
INSERT INTO eth.log_cids_i (SELECT * FROM eth.log_cids);
|
||||||
|
INSERT INTO eth.access_list_elements_i (SELECT * FROM eth.access_list_elements);
|
||||||
|
INSERT INTO eth.state_accounts_i (SELECT * FROM eth.state_accounts);
|
||||||
|
INSERT INTO eth.storage_cids_i (SELECT * FROM eth.storage_cids);
|
||||||
|
INSERT INTO eth.state_cids_i (SELECT * FROM eth.state_cids);
|
||||||
|
INSERT INTO eth.receipt_cids_i (SELECT * FROM eth.receipt_cids);
|
||||||
|
INSERT INTO eth.transaction_cids_i (SELECT * FROM eth.transaction_cids);
|
||||||
|
INSERT INTO eth.uncle_cids_i (SELECT * FROM eth.uncle_cids);
|
||||||
|
INSERT INTO eth.header_cids_i (SELECT * FROM eth.header_cids);
|
||||||
|
INSERT INTO public.blocks_i (SELECT * FROM public.blocks);
|
||||||
|
|
||||||
|
-- drop hypertables
|
||||||
|
DROP TABLE eth.log_cids;
|
||||||
|
DROP TABLE eth.access_list_elements;
|
||||||
|
DROP TABLE eth.state_accounts;
|
||||||
|
DROP TABLE eth.storage_cids;
|
||||||
|
DROP TABLE eth.state_cids;
|
||||||
|
DROP TABLE eth.receipt_cids;
|
||||||
|
DROP TABLE eth.transaction_cids;
|
||||||
|
DROP TABLE eth.uncle_cids;
|
||||||
|
DROP TABLE eth.header_cids;
|
||||||
|
DROP TABLE public.blocks;
|
||||||
|
|
||||||
|
-- rename new tables
|
||||||
|
ALTER TABLE eth.log_cids_i RENAME TO log_cids;
|
||||||
|
ALTER TABLE eth.access_list_elements_i RENAME TO access_list_elements;
|
||||||
|
ALTER TABLE eth.state_accounts_i RENAME TO state_accounts;
|
||||||
|
ALTER TABLE eth.storage_cids_i RENAME TO storage_cids;
|
||||||
|
ALTER TABLE eth.state_cids_i RENAME TO state_cids;
|
||||||
|
ALTER TABLE eth.receipt_cids_i RENAME TO receipt_cids;
|
||||||
|
ALTER TABLE eth.transaction_cids_i RENAME TO transaction_cids;
|
||||||
|
ALTER TABLE eth.uncle_cids_i RENAME TO uncle_cids;
|
||||||
|
ALTER TABLE eth.header_cids_i RENAME TO header_cids;
|
||||||
|
ALTER TABLE public.blocks_i RENAME TO blocks;
|
@ -1,130 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
-- creating distributed hypertables from duplicate tables for now as we are getting the following error (while running geth
|
|
||||||
-- unit tests) if regular tables are directly converted to distributed hypertables
|
|
||||||
-- error: "cannot PREPARE a transaction that has executed LISTEN, UNLISTEN, or NOTIFY"
|
|
||||||
|
|
||||||
-- create new regular tables
|
|
||||||
CREATE TABLE eth.log_cids_i (LIKE eth.log_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.access_list_elements_i (LIKE eth.access_list_elements INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.state_accounts_i (LIKE eth.state_accounts INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.storage_cids_i (LIKE eth.storage_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.state_cids_i (LIKE eth.state_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.receipt_cids_i (LIKE eth.receipt_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.transaction_cids_i (LIKE eth.transaction_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.uncle_cids_i (LIKE eth.uncle_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.header_cids_i (LIKE eth.header_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE public.blocks_i (LIKE public.blocks INCLUDING ALL);
|
|
||||||
|
|
||||||
-- turn them into distributed hypertables
|
|
||||||
SELECT create_distributed_hypertable('public.blocks_i', 'block_number', chunk_time_interval => 32768);
|
|
||||||
SELECT create_distributed_hypertable('eth.header_cids_i', 'block_number', chunk_time_interval => 32768);
|
|
||||||
SELECT create_distributed_hypertable('eth.uncle_cids_i', 'block_number', chunk_time_interval => 32768);
|
|
||||||
SELECT create_distributed_hypertable('eth.transaction_cids_i', 'block_number', chunk_time_interval => 32768);
|
|
||||||
SELECT create_distributed_hypertable('eth.receipt_cids_i', 'block_number', chunk_time_interval => 32768);
|
|
||||||
SELECT create_distributed_hypertable('eth.state_cids_i', 'block_number', chunk_time_interval => 32768);
|
|
||||||
SELECT create_distributed_hypertable('eth.storage_cids_i', 'block_number', chunk_time_interval => 32768);
|
|
||||||
SELECT create_distributed_hypertable('eth.state_accounts_i', 'block_number', chunk_time_interval => 32768);
|
|
||||||
SELECT create_distributed_hypertable('eth.access_list_elements_i', 'block_number', chunk_time_interval => 32768);
|
|
||||||
SELECT create_distributed_hypertable('eth.log_cids_i', 'block_number', chunk_time_interval => 32768);
|
|
||||||
|
|
||||||
-- migrate data
|
|
||||||
INSERT INTO eth.log_cids_i (SELECT * FROM eth.log_cids);
|
|
||||||
INSERT INTO eth.access_list_elements_i (SELECT * FROM eth.access_list_elements);
|
|
||||||
INSERT INTO eth.state_accounts_i (SELECT * FROM eth.state_accounts);
|
|
||||||
INSERT INTO eth.storage_cids_i (SELECT * FROM eth.storage_cids);
|
|
||||||
INSERT INTO eth.state_cids_i (SELECT * FROM eth.state_cids);
|
|
||||||
INSERT INTO eth.receipt_cids_i (SELECT * FROM eth.receipt_cids);
|
|
||||||
INSERT INTO eth.transaction_cids_i (SELECT * FROM eth.transaction_cids);
|
|
||||||
INSERT INTO eth.uncle_cids_i (SELECT * FROM eth.uncle_cids);
|
|
||||||
INSERT INTO eth.header_cids_i (SELECT * FROM eth.header_cids);
|
|
||||||
INSERT INTO public.blocks_i (SELECT * FROM public.blocks);
|
|
||||||
|
|
||||||
-- drop tables
|
|
||||||
DROP TABLE eth.log_cids;
|
|
||||||
DROP TABLE eth.access_list_elements;
|
|
||||||
DROP TABLE eth.state_accounts;
|
|
||||||
DROP TABLE eth.storage_cids;
|
|
||||||
DROP TABLE eth.state_cids;
|
|
||||||
DROP TABLE eth.receipt_cids;
|
|
||||||
DROP TABLE eth.transaction_cids;
|
|
||||||
DROP TABLE eth.uncle_cids;
|
|
||||||
DROP TABLE eth.header_cids;
|
|
||||||
DROP TABLE public.blocks;
|
|
||||||
|
|
||||||
-- rename distributed hypertables
|
|
||||||
ALTER TABLE eth.log_cids_i RENAME TO log_cids;
|
|
||||||
ALTER TABLE eth.access_list_elements_i RENAME TO access_list_elements;
|
|
||||||
ALTER TABLE eth.state_accounts_i RENAME TO state_accounts;
|
|
||||||
ALTER TABLE eth.storage_cids_i RENAME TO storage_cids;
|
|
||||||
ALTER TABLE eth.state_cids_i RENAME TO state_cids;
|
|
||||||
ALTER TABLE eth.receipt_cids_i RENAME TO receipt_cids;
|
|
||||||
ALTER TABLE eth.transaction_cids_i RENAME TO transaction_cids;
|
|
||||||
ALTER TABLE eth.uncle_cids_i RENAME TO uncle_cids;
|
|
||||||
ALTER TABLE eth.header_cids_i RENAME TO header_cids;
|
|
||||||
ALTER TABLE public.blocks_i RENAME TO blocks;
|
|
||||||
CALL distributed_exec('ALTER TABLE eth.log_cids_i RENAME TO log_cids');
|
|
||||||
CALL distributed_exec('ALTER TABLE eth.access_list_elements_i RENAME TO access_list_elements');
|
|
||||||
CALL distributed_exec('ALTER TABLE eth.state_accounts_i RENAME TO state_accounts');
|
|
||||||
CALL distributed_exec('ALTER TABLE eth.storage_cids_i RENAME TO storage_cids');
|
|
||||||
CALL distributed_exec('ALTER TABLE eth.state_cids_i RENAME TO state_cids');
|
|
||||||
CALL distributed_exec('ALTER TABLE eth.receipt_cids_i RENAME TO receipt_cids');
|
|
||||||
CALL distributed_exec('ALTER TABLE eth.transaction_cids_i RENAME TO transaction_cids');
|
|
||||||
CALL distributed_exec('ALTER TABLE eth.uncle_cids_i RENAME TO uncle_cids');
|
|
||||||
CALL distributed_exec('ALTER TABLE eth.header_cids_i RENAME TO header_cids');
|
|
||||||
CALL distributed_exec('ALTER TABLE public.blocks_i RENAME TO blocks');
|
|
||||||
|
|
||||||
-- update version
|
|
||||||
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.00-dh')
|
|
||||||
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0-dh', NOW());
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.0')
|
|
||||||
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0', NOW());
|
|
||||||
-- reversing conversion to hypertables requires migrating all data from every chunk back to a single table
|
|
||||||
-- create new regular tables
|
|
||||||
CREATE TABLE eth.log_cids_i (LIKE eth.log_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.access_list_elements_i (LIKE eth.access_list_elements INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.state_accounts_i (LIKE eth.state_accounts INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.storage_cids_i (LIKE eth.storage_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.state_cids_i (LIKE eth.state_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.receipt_cids_i (LIKE eth.receipt_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.transaction_cids_i (LIKE eth.transaction_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.uncle_cids_i (LIKE eth.uncle_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.header_cids_i (LIKE eth.header_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE public.blocks_i (LIKE public.blocks INCLUDING ALL);
|
|
||||||
|
|
||||||
-- migrate data
|
|
||||||
INSERT INTO eth.log_cids_i (SELECT * FROM eth.log_cids);
|
|
||||||
INSERT INTO eth.access_list_elements_i (SELECT * FROM eth.access_list_elements);
|
|
||||||
INSERT INTO eth.state_accounts_i (SELECT * FROM eth.state_accounts);
|
|
||||||
INSERT INTO eth.storage_cids_i (SELECT * FROM eth.storage_cids);
|
|
||||||
INSERT INTO eth.state_cids_i (SELECT * FROM eth.state_cids);
|
|
||||||
INSERT INTO eth.receipt_cids_i (SELECT * FROM eth.receipt_cids);
|
|
||||||
INSERT INTO eth.transaction_cids_i (SELECT * FROM eth.transaction_cids);
|
|
||||||
INSERT INTO eth.uncle_cids_i (SELECT * FROM eth.uncle_cids);
|
|
||||||
INSERT INTO eth.header_cids_i (SELECT * FROM eth.header_cids);
|
|
||||||
INSERT INTO public.blocks_i (SELECT * FROM public.blocks);
|
|
||||||
|
|
||||||
-- drop distributed hypertables
|
|
||||||
DROP TABLE eth.log_cids;
|
|
||||||
DROP TABLE eth.access_list_elements;
|
|
||||||
DROP TABLE eth.state_accounts;
|
|
||||||
DROP TABLE eth.storage_cids;
|
|
||||||
DROP TABLE eth.state_cids;
|
|
||||||
DROP TABLE eth.receipt_cids;
|
|
||||||
DROP TABLE eth.transaction_cids;
|
|
||||||
DROP TABLE eth.uncle_cids;
|
|
||||||
DROP TABLE eth.header_cids;
|
|
||||||
DROP TABLE public.blocks;
|
|
||||||
|
|
||||||
-- rename hypertable tables
|
|
||||||
ALTER TABLE eth.log_cids_i RENAME TO log_cids;
|
|
||||||
ALTER TABLE eth.access_list_elements_i RENAME TO access_list_elements;
|
|
||||||
ALTER TABLE eth.state_accounts_i RENAME TO state_accounts;
|
|
||||||
ALTER TABLE eth.storage_cids_i RENAME TO storage_cids;
|
|
||||||
ALTER TABLE eth.state_cids_i RENAME TO state_cids;
|
|
||||||
ALTER TABLE eth.receipt_cids_i RENAME TO receipt_cids;
|
|
||||||
ALTER TABLE eth.transaction_cids_i RENAME TO transaction_cids;
|
|
||||||
ALTER TABLE eth.uncle_cids_i RENAME TO uncle_cids;
|
|
||||||
ALTER TABLE eth.header_cids_i RENAME TO header_cids;
|
|
||||||
ALTER TABLE public.blocks_i RENAME TO blocks;
|
|
@ -1,5 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
-- to be generated by a script
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
-- to be generated by a script
|
|
@ -1,27 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
-- to be generated by a script
|
|
||||||
|
|
||||||
SELECT set_replication_factor('public.blocks', 3);
|
|
||||||
SELECT set_replication_factor('eth.header_cids', 3);
|
|
||||||
SELECT set_replication_factor('eth.uncle_cids', 3);
|
|
||||||
SELECT set_replication_factor('eth.transaction_cids', 3);
|
|
||||||
SELECT set_replication_factor('eth.receipt_cids', 3);
|
|
||||||
SELECT set_replication_factor('eth.state_cids', 3);
|
|
||||||
SELECT set_replication_factor('eth.storage_cids', 3);
|
|
||||||
SELECT set_replication_factor('eth.state_accounts', 3);
|
|
||||||
SELECT set_replication_factor('eth.access_list_elements', 3);
|
|
||||||
SELECT set_replication_factor('eth.log_cids', 3);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
-- to be generated by a script
|
|
||||||
|
|
||||||
SELECT set_replication_factor('public.blocks', 1);
|
|
||||||
SELECT set_replication_factor('eth.header_cids', 1);
|
|
||||||
SELECT set_replication_factor('eth.uncle_cids', 1);
|
|
||||||
SELECT set_replication_factor('eth.transaction_cids', 1);
|
|
||||||
SELECT set_replication_factor('eth.receipt_cids', 1);
|
|
||||||
SELECT set_replication_factor('eth.state_cids', 1);
|
|
||||||
SELECT set_replication_factor('eth.storage_cids', 1);
|
|
||||||
SELECT set_replication_factor('eth.state_accounts', 1);
|
|
||||||
SELECT set_replication_factor('eth.access_list_elements', 1);
|
|
||||||
SELECT set_replication_factor('eth.log_cids', 1);
|
|
@ -5,58 +5,10 @@ services:
|
|||||||
image: timescale/timescaledb:latest-pg14
|
image: timescale/timescaledb:latest-pg14
|
||||||
restart: always
|
restart: always
|
||||||
container_name: access-node
|
container_name: access-node
|
||||||
depends_on:
|
|
||||||
- data-node-1
|
|
||||||
- data-node-2
|
|
||||||
- data-node-3
|
|
||||||
command: ["postgres", "-c", "log_statement=all"]
|
command: ["postgres", "-c", "log_statement=all"]
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_USER: "postgres"
|
POSTGRES_USER: "vdbm"
|
||||||
POSTGRES_DB: "vulcanize_testing_v4"
|
POSTGRES_DB: "vulcanize_testing_v4"
|
||||||
POSTGRES_PASSWORD: "password"
|
POSTGRES_PASSWORD: "password"
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:8066:5432"
|
- "127.0.0.1:8066:5432"
|
||||||
volumes:
|
|
||||||
- ./scripts/init-access-node.sh:/docker-entrypoint-initdb.d/init-access-node.sh
|
|
||||||
|
|
||||||
data-node-1:
|
|
||||||
image: timescale/timescaledb:latest-pg14
|
|
||||||
restart: unless-stopped
|
|
||||||
container_name: data-node-1
|
|
||||||
command: ["postgres", "-c", "log_statement=all"]
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: "postgres"
|
|
||||||
POSTGRES_DB: "vulcanize_testing_v4"
|
|
||||||
POSTGRES_PASSWORD: "password"
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:8067:5432"
|
|
||||||
volumes:
|
|
||||||
- ./scripts/init-data-node.sh:/docker-entrypoint-initdb.d/init-data-node.sh
|
|
||||||
|
|
||||||
data-node-2:
|
|
||||||
image: timescale/timescaledb:latest-pg14
|
|
||||||
restart: unless-stopped
|
|
||||||
container_name: data-node-2
|
|
||||||
command: ["postgres", "-c", "log_statement=all"]
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: "postgres"
|
|
||||||
POSTGRES_DB: "vulcanize_testing_v4"
|
|
||||||
POSTGRES_PASSWORD: "password"
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:8068:5432"
|
|
||||||
volumes:
|
|
||||||
- ./scripts/init-data-node.sh:/docker-entrypoint-initdb.d/init-data-node.sh
|
|
||||||
|
|
||||||
data-node-3:
|
|
||||||
image: timescale/timescaledb:latest-pg14
|
|
||||||
restart: unless-stopped
|
|
||||||
container_name: data-node-3
|
|
||||||
command: ["postgres", "-c", "log_statement=all"]
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: "postgres"
|
|
||||||
POSTGRES_DB: "vulcanize_testing_v4"
|
|
||||||
POSTGRES_PASSWORD: "password"
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:8069:5432"
|
|
||||||
volumes:
|
|
||||||
- ./scripts/init-data-node.sh:/docker-entrypoint-initdb.d/init-data-node.sh
|
|
||||||
|
@ -1,138 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Guards
|
|
||||||
if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ] || [ -z "$4" ] || [ -z "$5" ]
|
|
||||||
then
|
|
||||||
echo "Env variables not provided"
|
|
||||||
echo "Usage: ./add_nodes.sh <MIGRATION_FILE_PATH:string> <NODE_NAME:string> <NODE_HOST:string> <NODE_PORT:numeric/string> <NODE_DATABASE:string> <EXECUTE_SQL:bool>"
|
|
||||||
echo "Only <EXECUTE_SQL> is optional"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
if [ -z "$6" ]
|
|
||||||
then
|
|
||||||
echo "EXECUTE_SQL not set, will not run statements against an access server"
|
|
||||||
else
|
|
||||||
echo "EXECUTE_SQL is set, will run stataments against an access server"
|
|
||||||
echo "Note: this mode is not recommended except in the case when the migration has already been applied with previous
|
|
||||||
nodes and we need/want to add more while still recording them in the existing migration (and adding their Down statements to said migration)"
|
|
||||||
echo "Expected environment variables:"
|
|
||||||
echo "DATABASE_HOSTNAME, DATABASE_NAME, DATABASE_PORT, DATABASE_USER"
|
|
||||||
echo "For now, DATABASE_PASSWORD will be prompted for on each psql call"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Remote DB node info
|
|
||||||
export MIGRATION_FILE_PATH=$1
|
|
||||||
export NODE_NAME=$2
|
|
||||||
export NODE_HOST=$3
|
|
||||||
export NODE_PORT=$4
|
|
||||||
export NODE_DATABASE=$5
|
|
||||||
printf "Enter the ${NODE_HOST} database password:\n"
|
|
||||||
stty -echo
|
|
||||||
read NODE_PASSWORD
|
|
||||||
stty echo
|
|
||||||
export NODE_PASSWORD
|
|
||||||
|
|
||||||
if ! [ -z "$6" ]
|
|
||||||
then
|
|
||||||
# Access DB info
|
|
||||||
echo "heeeeey"
|
|
||||||
export DATABASE_HOSTNAME=localhost
|
|
||||||
export DATABASE_PORT=5432
|
|
||||||
export DATABASE_USER=vdbm
|
|
||||||
export DATABASE_NAME=vulcanize_db
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Array of distributed hypertable names
|
|
||||||
declare -a tables_names=("public.blocks" "eth.header_cid" "eth.uncle_cids" "eth.transaction_cids"
|
|
||||||
"eth.receipt_cids" "eth.state_cid" "eth.storage_cids" "eth.state_accounts"
|
|
||||||
"eth.access_list_elements" "eth.log_cids"
|
|
||||||
)
|
|
||||||
# Array to append Up statements to for later (optional) execution
|
|
||||||
declare -a up_stmts=()
|
|
||||||
|
|
||||||
echo "Writing Up and Down statements to provided migration file at ${migration_file_path}"
|
|
||||||
|
|
||||||
# Create add node statement
|
|
||||||
up_add_pg_str="SELECT add_data_node('${NODE_NAME}', host => '${NODE_HOST}', port => ${NODE_PORT}, database => '${NODE_DATABASE}', password => '${NODE_PASSWORD}');"
|
|
||||||
up_stmts+=(${up_add_pg_str})
|
|
||||||
|
|
||||||
# Insert at the 3rd line of the file
|
|
||||||
sed -i.bak '3 i\
|
|
||||||
'"${up_add_pg_str}"'
|
|
||||||
' "${MIGRATION_FILE_PATH}"
|
|
||||||
|
|
||||||
# Check for error
|
|
||||||
if [[ $? -eq 0 ]]; then
|
|
||||||
echo "Wrote Up add node statement ${up_add_pg_str}"
|
|
||||||
else
|
|
||||||
echo "Could not write Up add node statement ${up_add_pg_str}. Is the migration file path correct?"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create attach node statements
|
|
||||||
for table_name in "${tables_names[@]}"
|
|
||||||
do
|
|
||||||
up_attach_pg_str="SELECT attach_data_node('${NODE_NAME}', '${table_name}', if_not_attached => true);"
|
|
||||||
up_stmts+=(${up_attach_pg_str})
|
|
||||||
# Insert at the 4th line of the file
|
|
||||||
sed -i.bak '4 i\
|
|
||||||
'"${up_attach_pg_str}"'
|
|
||||||
' "${MIGRATION_FILE_PATH}"
|
|
||||||
# Check for error
|
|
||||||
if [[ $? -eq 0 ]]; then
|
|
||||||
echo "Wrote Up attach node statement ${up_attach_pg_str}"
|
|
||||||
else
|
|
||||||
echo "Could not write Up attach node statement ${up_attach_pg_str}. Is the migration file path correct?"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
## Create detach and remove node statement
|
|
||||||
down_attach_pg_str="SELECT detach_data_node('${NODE_NAME}', force => true, if_attached = true);"
|
|
||||||
down_add_pg_str="SELECT delete_data_node('${NODE_NAME}', force => true, if_attached => true);"
|
|
||||||
|
|
||||||
# Append them at the last line in the file
|
|
||||||
sed -i.bak '$ a\
|
|
||||||
'"${down_attach_pg_str}"'
|
|
||||||
' "${MIGRATION_FILE_PATH}"
|
|
||||||
# Check for error
|
|
||||||
if [[ $? -eq 0 ]]; then
|
|
||||||
echo "Wrote Down attach node statement ${down_attach_pg_str}"
|
|
||||||
else
|
|
||||||
echo "Could not write Down attach node statement ${down_attach_pg_str}. Is the migration file path correct?"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
# Append them at the last line in the file
|
|
||||||
sed -i.bak '$ a\
|
|
||||||
'"${down_add_pg_str}"'
|
|
||||||
' "${MIGRATION_FILE_PATH}"
|
|
||||||
# Check for error
|
|
||||||
if [[ $? -eq 0 ]]; then
|
|
||||||
echo "Wrote Down add node statement ${down_add_pg_str}"
|
|
||||||
else
|
|
||||||
echo "Could not write Down add node statement ${down_add_pg_str}. Is the migration file path correct?"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Execute Up statements on the server if we are in that mode
|
|
||||||
if [ -z "$6" ]
|
|
||||||
then
|
|
||||||
echo "Done!"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Executing Up statements against provided server"
|
|
||||||
|
|
||||||
for up_stmt in "${up_stmts[@]}"
|
|
||||||
do
|
|
||||||
psql -c '\x' -c "${up_stmt}" -h $DATABASE_HOSTNAME -p $DATABASE_PORT -U $DATABASE_USER -d $DATABASE_NAME -W
|
|
||||||
# Check for error
|
|
||||||
if [[ $? -eq 0 ]]; then
|
|
||||||
echo "Executed Up statement ${up_stmt}}"
|
|
||||||
else
|
|
||||||
echo "Could not execute Up statement ${up_stmt}. Is the migration file path correct?"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Done!"
|
|
||||||
exit 0
|
|
@ -1,29 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# https://docs.timescale.com/timescaledb/latest/how-to-guides/multinode-timescaledb/multinode-config/
|
|
||||||
|
|
||||||
# To achieve good query performance you need to enable partition-wise aggregation on the access node. This pushes down aggregation queries to the data nodes.
|
|
||||||
# https://www.postgresql.org/docs/12/runtime-config-query.html#enable_partitionwise_aggregate
|
|
||||||
sed -ri "s!^#?(enable_partitionwise_aggregate)\s*=.*!\1 = on!" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
grep "enable_partitionwise_aggregate" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
|
|
||||||
# JIT should be set to off on the access node as JIT currently doesn't work well with distributed queries.
|
|
||||||
# https://www.postgresql.org/docs/12/runtime-config-query.html#jit
|
|
||||||
sed -ri "s!^#?(jit)\s*=.*!\1 = off!" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
grep "jit" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
|
|
||||||
# https://docs.timescale.com/timescaledb/latest/how-to-guides/multinode-timescaledb/multinode-auth/
|
|
||||||
# https://docs.timescale.com/timescaledb/latest/how-to-guides/multinode-timescaledb/multinode-auth/#password-authentication
|
|
||||||
|
|
||||||
# Set password_encryption = 'scram-sha-256' in postgresql.conf on the access node.
|
|
||||||
sed -ri "s!^#?(password_encryption)\s*=.*!\1 = 'scram-sha-256'!" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
grep "password_encryption" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
|
|
||||||
# Append to data/passfile *:*:*:ROLE:ROLE_PASSWORD
|
|
||||||
# This file stores the passwords for each role that the access node connects to on the data nodes.
|
|
||||||
echo "*:*:*:postgres:password">>/var/lib/postgresql/data/passfile
|
|
||||||
chmod 0600 /var/lib/postgresql/data/passfile
|
|
||||||
|
|
||||||
# Add "host all all ACCESS_NODE_IP scram-sha-256" pg_hba.conf on the data nodes.
|
|
||||||
# Skipped. Using default "host all all all scram-sha-256" for now.
|
|
@ -1,27 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# https://docs.timescale.com/timescaledb/latest/how-to-guides/configuration/timescaledb-config/#timescaledb-last-tuned-string
|
|
||||||
# https://docs.timescale.com/timescaledb/latest/how-to-guides/multi-node-setup/required-configuration/
|
|
||||||
|
|
||||||
# It is necessary to change the parameter max_prepared_transactions to a non-zero value ('150' is recommended).
|
|
||||||
# https://www.postgresql.org/docs/12/runtime-config-resource.html#max_prepared_transactions
|
|
||||||
sed -ri "s!^#?(max_prepared_transactions)\s*=.*!\1 = 150!" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
grep "max_prepared_transactions" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
|
|
||||||
# Statement timeout should be disabled on the data nodes and managed through the access node configuration if desired.
|
|
||||||
# https://www.postgresql.org/docs/12/runtime-config-client.html#statement_timeout
|
|
||||||
sed -ri "s!^#?(statement_timeout)\s*=.*!\1 = 0!" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
grep "statement_timeout" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
|
|
||||||
# On the data nodes, set the wal_level to logical or higher to move or copy chunks between data nodes.
|
|
||||||
# https://www.postgresql.org/docs/14/runtime-config-wal.html
|
|
||||||
sed -ri "s!^#?(wal_level)\s*=.*!\1 = 'logical'!" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
grep "wal_level" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
|
|
||||||
# https://docs.timescale.com/timescaledb/latest/how-to-guides/multinode-timescaledb/multinode-auth/
|
|
||||||
# https://docs.timescale.com/timescaledb/latest/how-to-guides/multinode-timescaledb/multinode-auth/#password-authentication
|
|
||||||
|
|
||||||
# Set password_encryption = 'scram-sha-256' in postgresql.conf on the data node.
|
|
||||||
sed -ri "s!^#?(password_encryption)\s*=.*!\1 = 'scram-sha-256'!" /var/lib/postgresql/data/postgresql.conf
|
|
||||||
grep "password_encryption" /var/lib/postgresql/data/postgresql.conf
|
|
@ -7,9 +7,8 @@ VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME
|
|||||||
|
|
||||||
# Run the DB migrations
|
# Run the DB migrations
|
||||||
echo "Connecting with: $VDB_PG_CONNECT"
|
echo "Connecting with: $VDB_PG_CONNECT"
|
||||||
sleep 15
|
|
||||||
echo "Running database migrations"
|
echo "Running database migrations"
|
||||||
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up-to 24
|
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up
|
||||||
|
|
||||||
# If the db migrations ran without err
|
# If the db migrations ran without err
|
||||||
if [[ $? -eq 0 ]]; then
|
if [[ $? -eq 0 ]]; then
|
||||||
|
Loading…
Reference in New Issue
Block a user