Skip migration to create hypertables
This commit is contained in:
parent
33b293f2bb
commit
0ccb54c770
@ -27,7 +27,7 @@ Schemas and utils for IPLD ETH Postgres database
|
||||
* Edit [startup_script.sh](./scripts/startup_script.sh) to change the number of migrations to be run:
|
||||
|
||||
```bash
|
||||
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up-to 22
|
||||
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up-to 21
|
||||
```
|
||||
|
||||
* In another `ipld-eth-db` terminal window, build an image `migrations-test` using [Dockerfile](./db/Dockerfile):
|
||||
|
@ -1,67 +0,0 @@
|
||||
-- +goose Up
|
||||
SELECT create_hypertable('public.blocks', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.header_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.uncle_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.transaction_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.receipt_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.state_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.storage_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.state_accounts', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.access_list_elements', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.log_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
|
||||
-- update version
|
||||
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.0-h')
|
||||
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0-h', NOW());
|
||||
|
||||
-- +goose Down
|
||||
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.0')
|
||||
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0', NOW());
|
||||
-- reversing conversion to hypertable requires migrating all data from every chunk back to a single table
|
||||
-- create new regular tables
|
||||
CREATE TABLE eth.log_cids_i (LIKE eth.log_cids INCLUDING ALL);
|
||||
CREATE TABLE eth.access_list_elements_i (LIKE eth.access_list_elements INCLUDING ALL);
|
||||
CREATE TABLE eth.state_accounts_i (LIKE eth.state_accounts INCLUDING ALL);
|
||||
CREATE TABLE eth.storage_cids_i (LIKE eth.storage_cids INCLUDING ALL);
|
||||
CREATE TABLE eth.state_cids_i (LIKE eth.state_cids INCLUDING ALL);
|
||||
CREATE TABLE eth.receipt_cids_i (LIKE eth.receipt_cids INCLUDING ALL);
|
||||
CREATE TABLE eth.transaction_cids_i (LIKE eth.transaction_cids INCLUDING ALL);
|
||||
CREATE TABLE eth.uncle_cids_i (LIKE eth.uncle_cids INCLUDING ALL);
|
||||
CREATE TABLE eth.header_cids_i (LIKE eth.header_cids INCLUDING ALL);
|
||||
CREATE TABLE public.blocks_i (LIKE public.blocks INCLUDING ALL);
|
||||
|
||||
-- migrate data
|
||||
INSERT INTO eth.log_cids_i (SELECT * FROM eth.log_cids);
|
||||
INSERT INTO eth.access_list_elements_i (SELECT * FROM eth.access_list_elements);
|
||||
INSERT INTO eth.state_accounts_i (SELECT * FROM eth.state_accounts);
|
||||
INSERT INTO eth.storage_cids_i (SELECT * FROM eth.storage_cids);
|
||||
INSERT INTO eth.state_cids_i (SELECT * FROM eth.state_cids);
|
||||
INSERT INTO eth.receipt_cids_i (SELECT * FROM eth.receipt_cids);
|
||||
INSERT INTO eth.transaction_cids_i (SELECT * FROM eth.transaction_cids);
|
||||
INSERT INTO eth.uncle_cids_i (SELECT * FROM eth.uncle_cids);
|
||||
INSERT INTO eth.header_cids_i (SELECT * FROM eth.header_cids);
|
||||
INSERT INTO public.blocks_i (SELECT * FROM public.blocks);
|
||||
|
||||
-- drops hypertables
|
||||
DROP TABLE eth.log_cids;
|
||||
DROP TABLE eth.access_list_elements;
|
||||
DROP TABLE eth.state_accounts;
|
||||
DROP TABLE eth.storage_cids;
|
||||
DROP TABLE eth.state_cids;
|
||||
DROP TABLE eth.receipt_cids;
|
||||
DROP TABLE eth.transaction_cids;
|
||||
DROP TABLE eth.uncle_cids;
|
||||
DROP TABLE eth.header_cids;
|
||||
DROP TABLE public.blocks;
|
||||
|
||||
-- rename new tables
|
||||
ALTER TABLE eth.log_cids_i RENAME TO log_cids;
|
||||
ALTER TABLE eth.access_list_elements_i RENAME TO access_list_elements;
|
||||
ALTER TABLE eth.state_accounts_i RENAME TO state_accounts;
|
||||
ALTER TABLE eth.storage_cids_i RENAME TO storage_cids;
|
||||
ALTER TABLE eth.state_cids_i RENAME TO state_cids;
|
||||
ALTER TABLE eth.receipt_cids_i RENAME TO receipt_cids;
|
||||
ALTER TABLE eth.transaction_cids_i RENAME TO transaction_cids;
|
||||
ALTER TABLE eth.uncle_cids_i RENAME TO uncle_cids;
|
||||
ALTER TABLE eth.header_cids_i RENAME TO header_cids;
|
||||
ALTER TABLE public.blocks_i RENAME TO blocks;
|
@ -1,4 +1,8 @@
|
||||
-- +goose Up
|
||||
-- creating distributed hypertables from duplicate tables for now as we are getting the following error (while running geth
|
||||
-- unit tests) if regular tables are directly converted to distributed hypertables
|
||||
-- error: "cannot PREPARE a transaction that has executed LISTEN, UNLISTEN, or NOTIFY"
|
||||
|
||||
-- create new regular tables
|
||||
CREATE TABLE eth.log_cids_i (LIKE eth.log_cids INCLUDING ALL);
|
||||
CREATE TABLE eth.access_list_elements_i (LIKE eth.access_list_elements INCLUDING ALL);
|
||||
@ -35,7 +39,7 @@ INSERT INTO eth.uncle_cids_i (SELECT * FROM eth.uncle_cids);
|
||||
INSERT INTO eth.header_cids_i (SELECT * FROM eth.header_cids);
|
||||
INSERT INTO public.blocks_i (SELECT * FROM public.blocks);
|
||||
|
||||
-- drops hypertables
|
||||
-- drop tables
|
||||
DROP TABLE eth.log_cids;
|
||||
DROP TABLE eth.access_list_elements;
|
||||
DROP TABLE eth.state_accounts;
|
||||
@ -74,9 +78,9 @@ INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.00-dh')
|
||||
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0-dh', NOW());
|
||||
|
||||
-- +goose Down
|
||||
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.0-h')
|
||||
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0-h', NOW());
|
||||
-- reversing conversion to hypertable requires migrating all data from every chunk back to a single table
|
||||
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.0')
|
||||
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0', NOW());
|
||||
-- reversing conversion to hypertables requires migrating all data from every chunk back to a single table
|
||||
-- create new regular tables
|
||||
CREATE TABLE eth.log_cids_i (LIKE eth.log_cids INCLUDING ALL);
|
||||
CREATE TABLE eth.access_list_elements_i (LIKE eth.access_list_elements INCLUDING ALL);
|
||||
@ -89,18 +93,6 @@ CREATE TABLE eth.uncle_cids_i (LIKE eth.uncle_cids INCLUDING ALL);
|
||||
CREATE TABLE eth.header_cids_i (LIKE eth.header_cids INCLUDING ALL);
|
||||
CREATE TABLE public.blocks_i (LIKE public.blocks INCLUDING ALL);
|
||||
|
||||
-- turn them into hypertables
|
||||
SELECT create_hypertable('public.blocks_i', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.header_cids_i', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.uncle_cids_i', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.transaction_cids_i', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.receipt_cids_i', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.state_cids_i', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.storage_cids_i', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.state_accounts_i', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.access_list_elements_i', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
SELECT create_hypertable('eth.log_cids_i', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||
|
||||
-- migrate data
|
||||
INSERT INTO eth.log_cids_i (SELECT * FROM eth.log_cids);
|
||||
INSERT INTO eth.access_list_elements_i (SELECT * FROM eth.access_list_elements);
|
||||
@ -113,7 +105,7 @@ INSERT INTO eth.uncle_cids_i (SELECT * FROM eth.uncle_cids);
|
||||
INSERT INTO eth.header_cids_i (SELECT * FROM eth.header_cids);
|
||||
INSERT INTO public.blocks_i (SELECT * FROM public.blocks);
|
||||
|
||||
-- drops distributed hypertables
|
||||
-- drop distributed hypertables
|
||||
DROP TABLE eth.log_cids;
|
||||
DROP TABLE eth.access_list_elements;
|
||||
DROP TABLE eth.state_accounts;
|
@ -8,7 +8,7 @@ VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME
|
||||
# Run the DB migrations
|
||||
echo "Connecting with: $VDB_PG_CONNECT"
|
||||
echo "Running database migrations"
|
||||
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up-to 22
|
||||
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up-to 21
|
||||
|
||||
# If the db migrations ran without err
|
||||
if [[ $? -eq 0 ]]; then
|
||||
|
Loading…
Reference in New Issue
Block a user