Compare commits
89 Commits
Author | SHA1 | Date | |
---|---|---|---|
89d08ac1b9 | |||
d24142a301 | |||
9e0b69d23c | |||
825a0bc235 | |||
e90bc38fdb | |||
ccfc2dbc84 | |||
fdd56e9803 | |||
68a347e38d | |||
097804b1e9 | |||
925abd314b | |||
|
aaa4459655 | ||
|
8f360133ed | ||
|
f6da2ce571 | ||
|
245e6d07b2 | ||
|
402d2790e0 | ||
29c5d04159 | |||
|
9196c449f0 | ||
|
146282fb7f | ||
323ed2da00 | |||
5b92e02ebd | |||
dcd911a8ff | |||
59d01ad30b | |||
d2d8020856 | |||
9618c0710e | |||
8779bb2b86 | |||
1b922dbff3 | |||
66cd1d9e69 | |||
af9910d381 | |||
e4e1b0ea1f | |||
050c1ae3df | |||
2afa6e9fa5 | |||
3a475b4de4 | |||
bf7b4eb627 | |||
|
167cfbfb20 | ||
|
fcb93d17ed | ||
|
1bb4fe04f0 | ||
|
10c84b589f | ||
|
527ff11328 | ||
a91f44773c | |||
0595f3dc15 | |||
|
9f07d4f4d4 | ||
|
c9edf6c832 | ||
|
67dc84205a | ||
|
22dcf5c72e | ||
|
be28c2ab79 | ||
|
42803af51a | ||
|
afc47af045 | ||
|
2695d9e353 | ||
71dede2031 | |||
|
df352ffd1a | ||
|
dd35277c86 | ||
|
92cc8fbea3 | ||
|
05db3c697f | ||
|
a710db0284 | ||
|
5e153c601f | ||
|
802cfe7180 | ||
|
b06b4f2cfb | ||
|
3fd1638ff6 | ||
|
1e5cbfd184 | ||
|
40b1709c2c | ||
|
92a9f5856b | ||
|
9f060ff0bf | ||
|
a8440e4ded | ||
|
c402e5c285 | ||
|
85bc243896 | ||
|
f67f03481b | ||
|
27e923f70d | ||
|
73a66dae8b | ||
|
29e2bd4e7b | ||
986ce1ead8 | |||
268a282eac | |||
53461a0996 | |||
|
26d970ed2f | ||
|
c35cda7b5e | ||
|
f3c58e39ca | ||
|
2165b316fa | ||
|
713f6a9208 | ||
|
241bb281eb | ||
|
bb57b4a033 | ||
|
a9755c6ecc | ||
|
7f8247cb4f | ||
|
701f9c2729 | ||
|
475ead282b | ||
|
42f46fc397 | ||
|
2bf5c82150 | ||
|
0c62ccc552 | ||
|
13f0ff3933 | ||
|
0856168b92 | ||
|
a8395d1413 |
23
.gitea/workflows/on-pr.yaml
Normal file
23
.gitea/workflows/on-pr.yaml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
name: Basic test
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
basic-test:
|
||||||
|
name: Build and sanity check
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Build Docker images
|
||||||
|
run: docker compose build
|
||||||
|
- name: Run Docker containers
|
||||||
|
run: docker compose up -d
|
||||||
|
- name: Check migration version
|
||||||
|
timeout-minutes: 1
|
||||||
|
run: |
|
||||||
|
MIGRATION_VERSION=$(ls db/migrations/*.sql | wc -l)
|
||||||
|
while
|
||||||
|
version=$(docker compose run --rm migrations version 2>&1 | tail -1 | awk '{print $(NF)}')
|
||||||
|
[[ $version != $MIGRATION_VERSION ]]; do
|
||||||
|
echo "Incorrect version: $version"
|
||||||
|
echo "Retrying..."
|
||||||
|
done
|
26
.gitea/workflows/publish.yaml
Normal file
26
.gitea/workflows/publish.yaml
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
name: Publish Docker image
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published, edited]
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build and publish image
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- id: vars
|
||||||
|
name: Output SHA and version tag
|
||||||
|
run: |
|
||||||
|
echo "sha=${GITHUB_SHA:0:7}" >> $GITHUB_OUTPUT
|
||||||
|
echo "tag=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||||
|
- name: Build and tag image
|
||||||
|
run: |
|
||||||
|
docker build . \
|
||||||
|
-t cerc-io/ipld-eth-db \
|
||||||
|
-t git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.sha}} \
|
||||||
|
-t git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.tag}}
|
||||||
|
- name: Push image tags
|
||||||
|
run: |
|
||||||
|
echo ${{ secrets.CICD_PUBLISH_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
|
||||||
|
docker push git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.sha}}
|
||||||
|
docker push git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.tag}}
|
78
.github/workflows/on-pr.yaml
vendored
78
.github/workflows/on-pr.yaml
vendored
@ -1,78 +0,0 @@
|
|||||||
name: Docker Build
|
|
||||||
|
|
||||||
on: [pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Run docker build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Run docker build
|
|
||||||
run: make docker-build
|
|
||||||
test:
|
|
||||||
# Add a dummy job to avoid failing GitHub CI checks.
|
|
||||||
# Other checks to be added later.
|
|
||||||
name: Dummy job
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Run dummy job
|
|
||||||
run: echo "Empty dummy job"
|
|
||||||
# concise_migration_diff:
|
|
||||||
# name: Verify concise migration and generated schema
|
|
||||||
# runs-on: ubuntu-latest
|
|
||||||
# steps:
|
|
||||||
# - uses: actions/checkout@v2
|
|
||||||
# - name: Run docker concise migration build
|
|
||||||
# run: make docker-concise-migration-build
|
|
||||||
# - name: Run database
|
|
||||||
# run: docker-compose -f docker-compose.test.yml up -d test-db
|
|
||||||
# - name: Test concise migration
|
|
||||||
# run: |
|
|
||||||
# sleep 10
|
|
||||||
# docker run --rm --network host -e DATABASE_USER=vdbm -e DATABASE_PASSWORD=password \
|
|
||||||
# -e DATABASE_HOSTNAME=127.0.0.1 -e DATABASE_PORT=8066 -e DATABASE_NAME=vulcanize_testing \
|
|
||||||
# vulcanize/concise-migration-build
|
|
||||||
# - name: Verify schema is latest
|
|
||||||
# run: |
|
|
||||||
# PGPASSWORD="password" pg_dump -h localhost -p 8066 -U vdbm vulcanize_testing --no-owner --schema-only > ./db/migration_schema.sql
|
|
||||||
# ./scripts/check_diff.sh ./db/migration_schema.sql db/schema.sql
|
|
||||||
|
|
||||||
# incremental_migration_diff:
|
|
||||||
# name: Compare conscise migration schema with incremental migration.
|
|
||||||
# runs-on: ubuntu-latest
|
|
||||||
# steps:
|
|
||||||
# - uses: actions/checkout@v2
|
|
||||||
# - name: Run database
|
|
||||||
# run: docker-compose -f docker-compose.test.yml up -d test-db statediff-migrations
|
|
||||||
# - name: Test incremental migration
|
|
||||||
# run: |
|
|
||||||
# sleep 10
|
|
||||||
# docker run --rm --network host -e DATABASE_USER=vdbm -e DATABASE_PASSWORD=password \
|
|
||||||
# -e DATABASE_HOSTNAME=127.0.0.1 -e DATABASE_PORT=8066 -e DATABASE_NAME=vulcanize_testing \
|
|
||||||
# vulcanize/statediff-migrations:v0.9.0
|
|
||||||
# - name: Verify schema is latest
|
|
||||||
# run: |
|
|
||||||
# PGPASSWORD="password" pg_dump -h localhost -p 8066 -U vdbm vulcanize_testing --no-owner --schema-only > ./db/migration_schema.sql
|
|
||||||
# ./scripts/check_diff.sh db/schema.sql ./db/migration_schema.sql
|
|
||||||
|
|
||||||
# migration:
|
|
||||||
# name: Compare up and down migration
|
|
||||||
# env:
|
|
||||||
# GOPATH: /tmp/go
|
|
||||||
# strategy:
|
|
||||||
# matrix:
|
|
||||||
# go-version: [ 1.16.x ]
|
|
||||||
# os: [ ubuntu-latest ]
|
|
||||||
# runs-on: ${{ matrix.os }}
|
|
||||||
# steps:
|
|
||||||
# - name: Create GOPATH
|
|
||||||
# run: mkdir -p /tmp/go
|
|
||||||
# - name: Install Go
|
|
||||||
# uses: actions/setup-go@v2
|
|
||||||
# with:
|
|
||||||
# go-version: ${{ matrix.go-version }}
|
|
||||||
# - uses: actions/checkout@v2
|
|
||||||
# - name: Test migration
|
|
||||||
# run: |
|
|
||||||
# timeout 5m make test-migrations
|
|
41
.github/workflows/publish.yaml
vendored
41
.github/workflows/publish.yaml
vendored
@ -1,41 +0,0 @@
|
|||||||
name: Publish Docker image
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [published, edited]
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Run docker build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Get the version
|
|
||||||
id: vars
|
|
||||||
run: echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
|
||||||
- name: Run docker build
|
|
||||||
run: make docker-build
|
|
||||||
- name: Tag docker image
|
|
||||||
run: docker tag vulcanize/ipld-eth-db docker.pkg.github.com/vulcanize/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.sha}}
|
|
||||||
- name: Docker Login
|
|
||||||
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
|
||||||
- name: Docker Push
|
|
||||||
run: docker push docker.pkg.github.com/vulcanize/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.sha}}
|
|
||||||
push_to_registries:
|
|
||||||
name: Push Docker image to Docker Hub
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
steps:
|
|
||||||
- name: Get the version
|
|
||||||
id: vars
|
|
||||||
run: |
|
|
||||||
echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
|
||||||
echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
|
|
||||||
- name: Docker Login to Github Registry
|
|
||||||
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
|
||||||
- name: Docker Pull
|
|
||||||
run: docker pull docker.pkg.github.com/vulcanize/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.sha}}
|
|
||||||
- name: Docker Login to Docker Registry
|
|
||||||
run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
|
|
||||||
- name: Tag docker image
|
|
||||||
run: docker tag docker.pkg.github.com/vulcanize/ipld-eth-db/ipld-eth-db:${{steps.vars.outputs.sha}} vulcanize/ipld-eth-db:${{steps.vars.outputs.tag}}
|
|
||||||
- name: Docker Push to Docker Hub
|
|
||||||
run: docker push vulcanize/ipld-eth-db:${{steps.vars.outputs.tag}}
|
|
25
Dockerfile
25
Dockerfile
@ -1,24 +1,19 @@
|
|||||||
FROM golang:1.18-alpine as builder
|
FROM alpine as builder
|
||||||
|
|
||||||
RUN apk --update --no-cache add make git g++ linux-headers
|
# Get migration tool
|
||||||
|
WORKDIR /
|
||||||
ADD . /go/src/github.com/vulcanize/ipld-eth-db
|
ARG GOOSE_VERSION="v3.6.1"
|
||||||
|
RUN arch=$(arch | sed s/aarch64/arm64/) && \
|
||||||
# Build migration tool
|
wget -O ./goose https://github.com/pressly/goose/releases/download/${GOOSE_VERSION}/goose_linux_${arch}
|
||||||
WORKDIR /go/src/github.com/pressly
|
RUN chmod +x ./goose
|
||||||
ARG GOOSE_VER="v3.6.1"
|
|
||||||
RUN git clone --depth 1 --branch ${GOOSE_VER} https://github.com/pressly/goose.git
|
|
||||||
WORKDIR /go/src/github.com/pressly/goose/cmd/goose
|
|
||||||
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -tags='no_sqlite3' -o goose .
|
|
||||||
|
|
||||||
# app container
|
# app container
|
||||||
FROM alpine
|
FROM alpine
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-db/scripts/startup_script.sh .
|
COPY --from=builder /goose goose
|
||||||
|
ADD scripts/startup_script.sh .
|
||||||
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
|
ADD db/migrations migrations
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-db/db/migrations migrations/vulcanizedb
|
|
||||||
|
|
||||||
ENTRYPOINT ["/app/startup_script.sh"]
|
ENTRYPOINT ["/app/startup_script.sh"]
|
||||||
|
127
Makefile
127
Makefile
@ -1,127 +0,0 @@
|
|||||||
ifndef GOPATH
|
|
||||||
override GOPATH = $(HOME)/go
|
|
||||||
endif
|
|
||||||
|
|
||||||
BIN = $(GOPATH)/bin
|
|
||||||
|
|
||||||
# Tools
|
|
||||||
## Migration tool
|
|
||||||
GOOSE = $(BIN)/goose
|
|
||||||
$(BIN)/goose:
|
|
||||||
go get -u github.com/pressly/goose/cmd/goose
|
|
||||||
|
|
||||||
.PHONY: installtools
|
|
||||||
installtools: | $(GOOSE)
|
|
||||||
echo "Installing tools"
|
|
||||||
|
|
||||||
#Database
|
|
||||||
HOST_NAME = localhost
|
|
||||||
PORT = 5432
|
|
||||||
NAME =
|
|
||||||
USER = postgres
|
|
||||||
PASSWORD = password
|
|
||||||
CONNECT_STRING=postgresql://$(USER):$(PASSWORD)@$(HOST_NAME):$(PORT)/$(NAME)?sslmode=disable
|
|
||||||
|
|
||||||
# Parameter checks
|
|
||||||
## Check that DB variables are provided
|
|
||||||
.PHONY: checkdbvars
|
|
||||||
checkdbvars:
|
|
||||||
test -n "$(HOST_NAME)" # $$HOST_NAME
|
|
||||||
test -n "$(PORT)" # $$PORT
|
|
||||||
test -n "$(NAME)" # $$NAME
|
|
||||||
@echo $(CONNECT_STRING)
|
|
||||||
|
|
||||||
## Check that the migration variable (id/timestamp) is provided
|
|
||||||
.PHONY: checkmigration
|
|
||||||
checkmigration:
|
|
||||||
test -n "$(MIGRATION)" # $$MIGRATION
|
|
||||||
|
|
||||||
# Check that the migration name is provided
|
|
||||||
.PHONY: checkmigname
|
|
||||||
checkmigname:
|
|
||||||
test -n "$(NAME)" # $$NAME
|
|
||||||
|
|
||||||
# Migration operations
|
|
||||||
## Rollback the last migration
|
|
||||||
.PHONY: rollback
|
|
||||||
rollback: $(GOOSE) checkdbvars
|
|
||||||
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" down
|
|
||||||
pg_dump -O -s $(CONNECT_STRING) > schema.sql
|
|
||||||
|
|
||||||
## Rollback to a select migration (id/timestamp)
|
|
||||||
.PHONY: rollback_to
|
|
||||||
rollback_to: $(GOOSE) checkmigration checkdbvars
|
|
||||||
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" down-to "$(MIGRATION)"
|
|
||||||
|
|
||||||
## Rollback pre_batch_set
|
|
||||||
.PHONY: `rollback_pre_batch_set`
|
|
||||||
rollback_pre_batch_set: $(GOOSE) checkdbvars
|
|
||||||
$(GOOSE) -dir db/pre_batch_processing_migrations postgres "$(CONNECT_STRING)" down
|
|
||||||
|
|
||||||
## Rollback post_batch_set
|
|
||||||
.PHONY: rollback_post_batch_set
|
|
||||||
rollback_post_batch_set: $(GOOSE) checkdbvars
|
|
||||||
$(GOOSE) -dir db/post_batch_processing_migrations postgres "$(CONNECT_STRING)" down
|
|
||||||
|
|
||||||
## Apply the next up migration
|
|
||||||
.PHONY: migrate_up_by_one
|
|
||||||
migrate_up_by_one: $(GOOSE) checkdbvars
|
|
||||||
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" up-by-one
|
|
||||||
|
|
||||||
## Apply all migrations not already run
|
|
||||||
.PHONY: migrate
|
|
||||||
migrate: $(GOOSE) checkdbvars
|
|
||||||
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" up
|
|
||||||
pg_dump -O -s $(CONNECT_STRING) > schema.sql
|
|
||||||
|
|
||||||
## Apply migrations to be ran before a batch processing
|
|
||||||
.PHONY: migrate_pre_batch_set
|
|
||||||
migrate_pre_batch_set: $(GOOSE) checkdbvars
|
|
||||||
$(GOOSE) -dir db/pre_batch_processing_migrations postgres "$(CONNECT_STRING)" up
|
|
||||||
|
|
||||||
## Apply migrations to be ran after a batch processing, one-by-one
|
|
||||||
.PHONY: migrate_post_batch_set_up_by_one
|
|
||||||
migrate_post_batch_set_up_by_one: $(GOOSE) checkdbvars
|
|
||||||
$(GOOSE) -dir db/post_batch_processing_migrations postgres "$(CONNECT_STRING)" up-by-one
|
|
||||||
|
|
||||||
## Apply migrations to be ran after a batch processing
|
|
||||||
.PHONY: migrate_post_batch_set
|
|
||||||
migrate_post_batch_set: $(GOOSE) checkdbvars
|
|
||||||
$(GOOSE) -dir db/post_batch_processing_migrations postgres "$(CONNECT_STRING)" up
|
|
||||||
|
|
||||||
## Create a new migration file
|
|
||||||
.PHONY: new_migration
|
|
||||||
new_migration: $(GOOSE) checkmigname
|
|
||||||
$(GOOSE) -dir db/migrations create $(NAME) sql
|
|
||||||
|
|
||||||
## Check which migrations are applied at the moment
|
|
||||||
.PHONY: migration_status
|
|
||||||
migration_status: $(GOOSE) checkdbvars
|
|
||||||
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" status
|
|
||||||
|
|
||||||
# Convert timestamped migrations to versioned (to be run in CI);
|
|
||||||
# merge timestamped files to prevent conflict
|
|
||||||
.PHONY: version_migrations
|
|
||||||
version_migrations:
|
|
||||||
$(GOOSE) -dir db/migrations fix
|
|
||||||
|
|
||||||
# Import a psql schema to the database
|
|
||||||
.PHONY: import
|
|
||||||
import:
|
|
||||||
test -n "$(NAME)" # $$NAME
|
|
||||||
psql $(NAME) < schema.sql
|
|
||||||
|
|
||||||
|
|
||||||
## Build docker image with schema
|
|
||||||
.PHONY: docker-build
|
|
||||||
docker-build:
|
|
||||||
docker-compose -f docker-compose.test.yml build
|
|
||||||
|
|
||||||
# ## Build docker image for migration
|
|
||||||
# .PHONY: docker-concise-migration-build
|
|
||||||
# docker-concise-migration-build:
|
|
||||||
# docker build -t vulcanize/concise-migration-build -f ./db/Dockerfile .
|
|
||||||
|
|
||||||
.PHONY: test-migrations
|
|
||||||
test-migrations: $(GOOSE)
|
|
||||||
./scripts/test_migration.sh
|
|
228
README.md
228
README.md
@ -33,3 +33,231 @@ Schemas and utils for IPLD ETH Postgres database
|
|||||||
```
|
```
|
||||||
docker-compose -f docker-compose.test.yml up --build
|
docker-compose -f docker-compose.test.yml up --build
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Example queries
|
||||||
|
|
||||||
|
Note that searching by block_number in addition to block_hash is optional in the below queries where both are provided,
|
||||||
|
but since the tables are partitioned by block_number doing so will improve query performance by informing the query
|
||||||
|
planner which partition it needs to search.
|
||||||
|
|
||||||
|
### Headers
|
||||||
|
|
||||||
|
Retrieve header RLP (IPLD block) and CID for a given block hash
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT header_cids.cid,
|
||||||
|
blocks.data
|
||||||
|
FROM ipld.blocks,
|
||||||
|
eth.header_cids
|
||||||
|
WHERE header_cids.block_hash = {block_hash}
|
||||||
|
AND header_cids.block_number = {block_number}
|
||||||
|
AND header_cids.canonical
|
||||||
|
AND blocks.key = header_cids.cid
|
||||||
|
AND blocks.block_number = header_cids.block_number
|
||||||
|
LIMIT 1
|
||||||
|
```
|
||||||
|
|
||||||
|
### Uncles
|
||||||
|
Retrieve the uncle list RLP (IPLD block) and CID for a given block hash
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT uncle_cids.cid,
|
||||||
|
blocks.data
|
||||||
|
FROM eth.uncle_cids
|
||||||
|
INNER JOIN eth.header_cids ON (
|
||||||
|
uncle_cids.header_id = header_cids.block_hash
|
||||||
|
AND uncle_cids.block_number = header_cids.block_number)
|
||||||
|
INNER JOIN ipld.blocks ON (
|
||||||
|
uncle_cids.cid = blocks.key
|
||||||
|
AND uncle_cids.block_number = blocks.block_number)
|
||||||
|
WHERE header_cids.block_hash = {block_hash}
|
||||||
|
AND header_cids.block_number = {block_number}
|
||||||
|
ORDER BY uncle_cids.parent_hash
|
||||||
|
LIMIT 1
|
||||||
|
```
|
||||||
|
|
||||||
|
### Transactions
|
||||||
|
|
||||||
|
Retrieve an ordered list of all the RLP encoded transactions (IPLD blocks) and their CIDs for a given block hash
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT transaction_cids.cid,
|
||||||
|
blocks.data
|
||||||
|
FROM eth.transaction_cids,
|
||||||
|
eth.header_cids,
|
||||||
|
ipld.blocks
|
||||||
|
WHERE header_cids.block_hash = {block_hash}
|
||||||
|
AND header_cids.block_number = {block_number}
|
||||||
|
AND header_cids.canonical
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number
|
||||||
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND blocks.block_number = header_cids.block_number
|
||||||
|
AND blocks.key = transaction_cids.cid
|
||||||
|
ORDER BY eth.transaction_cids.index ASC
|
||||||
|
```
|
||||||
|
|
||||||
|
Retrieve an RLP encoded transaction (IPLD block), the block hash and block number for the block it belongs to, and its position in the transaction
|
||||||
|
for that block for a provided transaction hash
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT blocks.data,
|
||||||
|
transaction_cids.header_id,
|
||||||
|
transaction_cids.block_number,
|
||||||
|
transaction_cids.index
|
||||||
|
FROM eth.transaction_cids,
|
||||||
|
ipld.blocks,
|
||||||
|
eth.header_cids
|
||||||
|
WHERE transaction_cids.tx_hash = {transaction_hash}
|
||||||
|
AND header_cids.block_hash = transaction_cids.header_id
|
||||||
|
AND header_cids.block_number = transaction_cids.block_number
|
||||||
|
AND header_cids.canonical
|
||||||
|
AND blocks.key = transaction_cids.cid
|
||||||
|
AND blocks.block_number = transaction_cids.block_number
|
||||||
|
```
|
||||||
|
|
||||||
|
### Receipts
|
||||||
|
|
||||||
|
Retrieve an ordered list of all the RLP encoded receipts (IPLD blocks), their CIDs, and their corresponding transaction
|
||||||
|
hashes for a given block hash
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT receipt_cids.cid,
|
||||||
|
blocks.data,
|
||||||
|
eth.transaction_cids.tx_hash
|
||||||
|
FROM eth.receipt_cids,
|
||||||
|
eth.transaction_cids,
|
||||||
|
eth.header_cids,
|
||||||
|
ipld.blocks
|
||||||
|
WHERE header_cids.block_hash = {block_hash}
|
||||||
|
AND header_cids.block_number = {block_number}
|
||||||
|
AND header_cids.canonical
|
||||||
|
AND receipt_cids.block_number = header_cids.block_number
|
||||||
|
AND receipt_cids.header_id = header_cids.block_hash
|
||||||
|
AND receipt_cids.TX_ID = transaction_cids.TX_HASH
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number
|
||||||
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND blocks.block_number = header_cids.block_number
|
||||||
|
AND blocks.key = receipt_cids.cid
|
||||||
|
ORDER BY eth.transaction_cids.index ASC
|
||||||
|
```
|
||||||
|
|
||||||
|
Retrieve the RLP encoded receipt (IPLD) and CID corresponding to a provided transaction hash
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT receipt_cids.cid,
|
||||||
|
blocks.data
|
||||||
|
FROM eth.receipt_cids
|
||||||
|
INNER JOIN eth.transaction_cids ON (
|
||||||
|
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.block_number = transaction_cids.block_number)
|
||||||
|
INNER JOIN ipld.blocks ON (
|
||||||
|
receipt_cids.cid = blocks.key
|
||||||
|
AND receipt_cids.block_number = blocks.block_number)
|
||||||
|
WHERE transaction_cids.tx_hash = {transaction_hash}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Logs
|
||||||
|
|
||||||
|
Retrieve all the logs and their associated transaction hashes at a given block with that were emitted from
|
||||||
|
any of the provided contract addresses and which match on any of the provided topics
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT blocks.data,
|
||||||
|
eth.transaction_cids.tx_hash
|
||||||
|
FROM eth.log_cids
|
||||||
|
INNER JOIN eth.transaction_cids ON (
|
||||||
|
log_cids.rct_id = transaction_cids.tx_hash
|
||||||
|
AND log_cids.header_id = transaction_cids.header_id
|
||||||
|
AND log_cids.block_number = transaction_cids.block_number)
|
||||||
|
INNER JOIN ipld.blocks ON (
|
||||||
|
log_cids.cid = blocks.key
|
||||||
|
AND log_cids.block_number = blocks.block_number)
|
||||||
|
WHERE log_cids.header_id = {block_hash}
|
||||||
|
AND log_cids.block_number = {block_number}
|
||||||
|
AND eth.log_cids.address = ANY ({list,of,addresses})
|
||||||
|
AND eth.log_cids.topic0 = ANY ({list,of,topic0s})
|
||||||
|
AND eth.log_cids.topic1 = ANY ({list,of,topic1s})
|
||||||
|
AND eth.log_cids.topic2 = ANY ({list,of,topic2s})
|
||||||
|
AND eth.log_cids.topic3 = ANY ({list,of,topic3s})
|
||||||
|
ORDER BY eth.transaction_cids.index, eth.log_cids.index
|
||||||
|
```
|
||||||
|
|
||||||
|
Retrieve all the logs and their associated transaction hashes within a provided block range that were emitted from
|
||||||
|
any of the provided contract addresses and which match on any of the provided topics
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT blocks.data,
|
||||||
|
eth.transaction_cids.tx_hash
|
||||||
|
FROM eth.log_cids
|
||||||
|
INNER JOIN eth.transaction_cids ON (
|
||||||
|
log_cids.rct_id = transaction_cids.tx_hash
|
||||||
|
AND log_cids.header_id = transaction_cids.header_id
|
||||||
|
AND log_cids.block_number = transaction_cids.block_number)
|
||||||
|
INNER JOIN eth.header_cids ON (
|
||||||
|
transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number)
|
||||||
|
INNER JOIN ipld.blocks ON (
|
||||||
|
log_cids.cid = blocks.key
|
||||||
|
AND log_cids.block_number = blocks.block_number)
|
||||||
|
WHERE eth.header_cids.block_number >= {range_start} AND eth.header_cids.block_number <= {range_stop}
|
||||||
|
AND eth.header_cids.canonical
|
||||||
|
AND eth.log_cids.address = ANY ({list,of,addresses})
|
||||||
|
AND eth.log_cids.topic0 = ANY ({list,of,topic0s})
|
||||||
|
AND eth.log_cids.topic1 = ANY ({list,of,topic1s})
|
||||||
|
AND eth.log_cids.topic2 = ANY ({list,of,topic2s})
|
||||||
|
AND eth.log_cids.topic3 = ANY ({list,of,topic3s})
|
||||||
|
ORDER BY eth.header_cids.block_number, eth.transaction_cids.index, eth.log_cids.index
|
||||||
|
```
|
||||||
|
|
||||||
|
### State and storage
|
||||||
|
|
||||||
|
Retrieve the state account for a given address hash at a provided block hash. If `state_cids.removed == true` then
|
||||||
|
the account is empty.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT state_cids.nonce,
|
||||||
|
state_cids.balance,
|
||||||
|
state_cids.storage_root,
|
||||||
|
state_cids.code_hash,
|
||||||
|
state_cids.removed
|
||||||
|
FROM eth.state_cids,
|
||||||
|
eth.header_cids
|
||||||
|
WHERE state_cids.state_leaf_key = {address_hash}
|
||||||
|
AND state_cids.block_number <=
|
||||||
|
(SELECT block_number
|
||||||
|
FROM eth.header_cids
|
||||||
|
WHERE block_hash = {block_hash}
|
||||||
|
LIMIT 1)
|
||||||
|
AND header_cids.canonical
|
||||||
|
AND state_cids.header_id = header_cids.block_hash
|
||||||
|
AND state_cids.block_number = header_cids.block_number
|
||||||
|
ORDER BY state_cids.block_number DESC
|
||||||
|
LIMIT 1
|
||||||
|
```
|
||||||
|
|
||||||
|
Retrieve a storage value, as well as the RLP encoded leaf node that stores it, for a given contract address hash and
|
||||||
|
storage leaf key (storage slot hash) at a provided block hash. If `state_leaf_removed == true`
|
||||||
|
or `storage_cids.removed == true` then the slot is empty
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT storage_cids.cid,
|
||||||
|
storage_cids.val,
|
||||||
|
storage_cids.block_number,
|
||||||
|
storage_cids.removed,
|
||||||
|
was_state_leaf_removed_by_number(storage_cids.state_leaf_key, storage_cids.block_number) AS state_leaf_removed,
|
||||||
|
blocks.data
|
||||||
|
FROM eth.storage_cids,
|
||||||
|
eth.header_cids,
|
||||||
|
ipld.blocks
|
||||||
|
WHERE header_cids.block_number <= (SELECT block_number from eth.header_cids where block_hash = $3 LIMIT 1)
|
||||||
|
AND header_cids.canonical
|
||||||
|
AND storage_cids.block_number = header_cids.block_number
|
||||||
|
AND storage_cids.header_id = header_cids.block_hash
|
||||||
|
AND storage_cids.storage_leaf_key = {storage_slot_hash}
|
||||||
|
AND storage_cids.state_leaf_key = {contract_address_hash}
|
||||||
|
AND blocks.key = storage_cids.cid
|
||||||
|
AND blocks.block_number = storage_cids.block_number
|
||||||
|
ORDER BY storage_cids.block_number DESC LIMIT 1
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,15 +1,18 @@
|
|||||||
version: '3.2'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
migrations:
|
migrations:
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
- ipld-eth-db
|
ipld-eth-db:
|
||||||
|
condition: service_healthy
|
||||||
|
# Use local build
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
# Use an existing image
|
# Use an existing image
|
||||||
image: vulcanize/ipld-eth-db:v4.2.1-alpha
|
image: cerc/ipld-eth-db:local
|
||||||
environment:
|
environment:
|
||||||
DATABASE_USER: "vdbm"
|
DATABASE_USER: "vdbm"
|
||||||
DATABASE_NAME: "vulcanize_testing"
|
DATABASE_NAME: "cerc_testing"
|
||||||
DATABASE_PASSWORD: "password"
|
DATABASE_PASSWORD: "password"
|
||||||
DATABASE_HOSTNAME: "ipld-eth-db"
|
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||||
DATABASE_PORT: 5432
|
DATABASE_PORT: 5432
|
||||||
@ -20,7 +23,12 @@ services:
|
|||||||
command: ["postgres", "-c", "log_statement=all"]
|
command: ["postgres", "-c", "log_statement=all"]
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_USER: "vdbm"
|
POSTGRES_USER: "vdbm"
|
||||||
POSTGRES_DB: "vulcanize_testing"
|
POSTGRES_DB: "cerc_testing"
|
||||||
POSTGRES_PASSWORD: "password"
|
POSTGRES_PASSWORD: "password"
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:8077:5432"
|
- "127.0.0.1:8077:5432"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "pg_isready", "-U", "vdbm"]
|
||||||
|
interval: 2s
|
||||||
|
timeout: 1s
|
||||||
|
retries: 3
|
@ -1,5 +1,7 @@
|
|||||||
-- +goose Up
|
-- +goose Up
|
||||||
CREATE TABLE IF NOT EXISTS public.blocks (
|
CREATE SCHEMA ipld;
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS ipld.blocks (
|
||||||
block_number BIGINT NOT NULL,
|
block_number BIGINT NOT NULL,
|
||||||
key TEXT NOT NULL,
|
key TEXT NOT NULL,
|
||||||
data BYTEA NOT NULL,
|
data BYTEA NOT NULL,
|
||||||
@ -7,4 +9,5 @@ CREATE TABLE IF NOT EXISTS public.blocks (
|
|||||||
);
|
);
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
DROP TABLE public.blocks;
|
DROP TABLE ipld.blocks;
|
||||||
|
DROP SCHEMA ipld;
|
||||||
|
@ -5,17 +5,17 @@ CREATE TABLE IF NOT EXISTS eth.header_cids (
|
|||||||
parent_hash VARCHAR(66) NOT NULL,
|
parent_hash VARCHAR(66) NOT NULL,
|
||||||
cid TEXT NOT NULL,
|
cid TEXT NOT NULL,
|
||||||
td NUMERIC NOT NULL,
|
td NUMERIC NOT NULL,
|
||||||
node_id VARCHAR(128) NOT NULL,
|
node_ids VARCHAR(128)[] NOT NULL,
|
||||||
reward NUMERIC NOT NULL,
|
reward NUMERIC NOT NULL,
|
||||||
state_root VARCHAR(66) NOT NULL,
|
state_root VARCHAR(66) NOT NULL,
|
||||||
tx_root VARCHAR(66) NOT NULL,
|
tx_root VARCHAR(66) NOT NULL,
|
||||||
receipt_root VARCHAR(66) NOT NULL,
|
receipt_root VARCHAR(66) NOT NULL,
|
||||||
uncle_root VARCHAR(66) NOT NULL,
|
uncles_hash VARCHAR(66) NOT NULL,
|
||||||
bloom BYTEA NOT NULL,
|
bloom BYTEA NOT NULL,
|
||||||
timestamp BIGINT NOT NULL,
|
timestamp BIGINT NOT NULL,
|
||||||
mh_key TEXT NOT NULL,
|
|
||||||
times_validated INTEGER NOT NULL DEFAULT 1,
|
|
||||||
coinbase VARCHAR(66) NOT NULL,
|
coinbase VARCHAR(66) NOT NULL,
|
||||||
|
canonical BOOLEAN NOT NULL DEFAULT TRUE,
|
||||||
|
withdrawals_root VARCHAR(66) NOT NULL,
|
||||||
PRIMARY KEY (block_hash, block_number)
|
PRIMARY KEY (block_hash, block_number)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ CREATE TABLE IF NOT EXISTS eth.uncle_cids (
|
|||||||
parent_hash VARCHAR(66) NOT NULL,
|
parent_hash VARCHAR(66) NOT NULL,
|
||||||
cid TEXT NOT NULL,
|
cid TEXT NOT NULL,
|
||||||
reward NUMERIC NOT NULL,
|
reward NUMERIC NOT NULL,
|
||||||
mh_key TEXT NOT NULL,
|
index INT NOT NULL,
|
||||||
PRIMARY KEY (block_hash, block_number)
|
PRIMARY KEY (block_hash, block_number)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -4,11 +4,9 @@ CREATE TABLE IF NOT EXISTS eth.transaction_cids (
|
|||||||
header_id VARCHAR(66) NOT NULL,
|
header_id VARCHAR(66) NOT NULL,
|
||||||
tx_hash VARCHAR(66) NOT NULL,
|
tx_hash VARCHAR(66) NOT NULL,
|
||||||
cid TEXT NOT NULL,
|
cid TEXT NOT NULL,
|
||||||
dst VARCHAR(66) NOT NULL,
|
dst VARCHAR(66),
|
||||||
src VARCHAR(66) NOT NULL,
|
src VARCHAR(66) NOT NULL,
|
||||||
index INTEGER NOT NULL,
|
index INTEGER NOT NULL,
|
||||||
mh_key TEXT NOT NULL,
|
|
||||||
tx_data BYTEA,
|
|
||||||
tx_type INTEGER,
|
tx_type INTEGER,
|
||||||
value NUMERIC,
|
value NUMERIC,
|
||||||
PRIMARY KEY (tx_hash, header_id, block_number)
|
PRIMARY KEY (tx_hash, header_id, block_number)
|
||||||
|
@ -3,13 +3,10 @@ CREATE TABLE IF NOT EXISTS eth.receipt_cids (
|
|||||||
block_number BIGINT NOT NULL,
|
block_number BIGINT NOT NULL,
|
||||||
header_id VARCHAR(66) NOT NULL,
|
header_id VARCHAR(66) NOT NULL,
|
||||||
tx_id VARCHAR(66) NOT NULL,
|
tx_id VARCHAR(66) NOT NULL,
|
||||||
leaf_cid TEXT NOT NULL,
|
cid TEXT NOT NULL,
|
||||||
contract VARCHAR(66),
|
contract VARCHAR(66),
|
||||||
contract_hash VARCHAR(66),
|
|
||||||
leaf_mh_key TEXT NOT NULL,
|
|
||||||
post_state VARCHAR(66),
|
post_state VARCHAR(66),
|
||||||
post_status INTEGER,
|
post_status SMALLINT,
|
||||||
log_root VARCHAR(66),
|
|
||||||
PRIMARY KEY (tx_id, header_id, block_number)
|
PRIMARY KEY (tx_id, header_id, block_number)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -2,13 +2,15 @@
|
|||||||
CREATE TABLE IF NOT EXISTS eth.state_cids (
|
CREATE TABLE IF NOT EXISTS eth.state_cids (
|
||||||
block_number BIGINT NOT NULL,
|
block_number BIGINT NOT NULL,
|
||||||
header_id VARCHAR(66) NOT NULL,
|
header_id VARCHAR(66) NOT NULL,
|
||||||
state_leaf_key VARCHAR(66),
|
state_leaf_key VARCHAR(66) NOT NULL,
|
||||||
cid TEXT NOT NULL,
|
cid TEXT NOT NULL,
|
||||||
state_path BYTEA NOT NULL,
|
|
||||||
node_type INTEGER NOT NULL,
|
|
||||||
diff BOOLEAN NOT NULL DEFAULT FALSE,
|
diff BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
mh_key TEXT NOT NULL,
|
balance NUMERIC, -- NULL if "removed"
|
||||||
PRIMARY KEY (state_path, header_id, block_number)
|
nonce BIGINT, -- NULL if "removed"
|
||||||
|
code_hash VARCHAR(66), -- NULL if "removed"
|
||||||
|
storage_root VARCHAR(66), -- NULL if "removed"
|
||||||
|
removed BOOLEAN NOT NULL,
|
||||||
|
PRIMARY KEY (state_leaf_key, header_id, block_number)
|
||||||
);
|
);
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
|
@ -2,14 +2,13 @@
|
|||||||
CREATE TABLE IF NOT EXISTS eth.storage_cids (
|
CREATE TABLE IF NOT EXISTS eth.storage_cids (
|
||||||
block_number BIGINT NOT NULL,
|
block_number BIGINT NOT NULL,
|
||||||
header_id VARCHAR(66) NOT NULL,
|
header_id VARCHAR(66) NOT NULL,
|
||||||
state_path BYTEA NOT NULL,
|
state_leaf_key VARCHAR(66) NOT NULL,
|
||||||
storage_leaf_key VARCHAR(66),
|
storage_leaf_key VARCHAR(66) NOT NULL,
|
||||||
cid TEXT NOT NULL,
|
cid TEXT NOT NULL,
|
||||||
storage_path BYTEA NOT NULL,
|
|
||||||
node_type INTEGER NOT NULL,
|
|
||||||
diff BOOLEAN NOT NULL DEFAULT FALSE,
|
diff BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
mh_key TEXT NOT NULL,
|
val BYTEA, -- NULL if "removed"
|
||||||
PRIMARY KEY (storage_path, state_path, header_id, block_number)
|
removed BOOLEAN NOT NULL,
|
||||||
|
PRIMARY KEY (storage_leaf_key, state_leaf_key, header_id, block_number)
|
||||||
);
|
);
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
|
@ -2,8 +2,7 @@
|
|||||||
CREATE TABLE IF NOT EXISTS eth.log_cids (
|
CREATE TABLE IF NOT EXISTS eth.log_cids (
|
||||||
block_number BIGINT NOT NULL,
|
block_number BIGINT NOT NULL,
|
||||||
header_id VARCHAR(66) NOT NULL,
|
header_id VARCHAR(66) NOT NULL,
|
||||||
leaf_cid TEXT NOT NULL,
|
cid TEXT NOT NULL,
|
||||||
leaf_mh_key TEXT NOT NULL,
|
|
||||||
rct_id VARCHAR(66) NOT NULL,
|
rct_id VARCHAR(66) NOT NULL,
|
||||||
address VARCHAR(66) NOT NULL,
|
address VARCHAR(66) NOT NULL,
|
||||||
index INTEGER NOT NULL,
|
index INTEGER NOT NULL,
|
||||||
@ -11,7 +10,6 @@ CREATE TABLE IF NOT EXISTS eth.log_cids (
|
|||||||
topic1 VARCHAR(66),
|
topic1 VARCHAR(66),
|
||||||
topic2 VARCHAR(66),
|
topic2 VARCHAR(66),
|
||||||
topic3 VARCHAR(66),
|
topic3 VARCHAR(66),
|
||||||
log_data BYTEA,
|
|
||||||
PRIMARY KEY (rct_id, index, header_id, block_number)
|
PRIMARY KEY (rct_id, index, header_id, block_number)
|
||||||
);
|
);
|
||||||
|
|
@ -1,14 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE IF NOT EXISTS eth.state_accounts (
|
|
||||||
block_number BIGINT NOT NULL,
|
|
||||||
header_id VARCHAR(66) NOT NULL,
|
|
||||||
state_path BYTEA NOT NULL,
|
|
||||||
balance NUMERIC NOT NULL,
|
|
||||||
nonce BIGINT NOT NULL,
|
|
||||||
code_hash BYTEA NOT NULL,
|
|
||||||
storage_root VARCHAR(66) NOT NULL,
|
|
||||||
PRIMARY KEY (state_path, header_id, block_number)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.state_accounts;
|
|
@ -1,12 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE IF NOT EXISTS eth.access_list_elements (
|
|
||||||
block_number BIGINT NOT NULL,
|
|
||||||
tx_id VARCHAR(66) NOT NULL,
|
|
||||||
index INTEGER NOT NULL,
|
|
||||||
address VARCHAR(66),
|
|
||||||
storage_keys VARCHAR(66)[],
|
|
||||||
PRIMARY KEY (tx_id, index, block_number)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.access_list_elements;
|
|
@ -3,7 +3,7 @@ COMMENT ON TABLE public.nodes IS E'@name NodeInfo';
|
|||||||
COMMENT ON TABLE eth.transaction_cids IS E'@name EthTransactionCids';
|
COMMENT ON TABLE eth.transaction_cids IS E'@name EthTransactionCids';
|
||||||
COMMENT ON TABLE eth.header_cids IS E'@name EthHeaderCids';
|
COMMENT ON TABLE eth.header_cids IS E'@name EthHeaderCids';
|
||||||
COMMENT ON COLUMN public.nodes.node_id IS E'@name ChainNodeID';
|
COMMENT ON COLUMN public.nodes.node_id IS E'@name ChainNodeID';
|
||||||
COMMENT ON COLUMN eth.header_cids.node_id IS E'@name EthNodeID';
|
COMMENT ON COLUMN eth.header_cids.node_ids IS E'@name EthNodeIDs';
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
|
|
||||||
@ -11,4 +11,4 @@ COMMENT ON TABLE public.nodes IS NULL;
|
|||||||
COMMENT ON TABLE eth.transaction_cids IS NULL;
|
COMMENT ON TABLE eth.transaction_cids IS NULL;
|
||||||
COMMENT ON TABLE eth.header_cids IS NULL;
|
COMMENT ON TABLE eth.header_cids IS NULL;
|
||||||
COMMENT ON COLUMN public.nodes.node_id IS NULL;
|
COMMENT ON COLUMN public.nodes.node_id IS NULL;
|
||||||
COMMENT ON COLUMN eth.header_cids.node_id IS NULL;
|
COMMENT ON COLUMN eth.header_cids.node_ids IS NULL;
|
101
db/migrations/00012_create_cid_indexes.sql
Normal file
101
db/migrations/00012_create_cid_indexes.sql
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
-- +goose Up
|
||||||
|
-- header indexes
|
||||||
|
CREATE INDEX header_block_number_index ON eth.header_cids USING btree (block_number);
|
||||||
|
CREATE UNIQUE INDEX header_cid_block_number_index ON eth.header_cids USING btree (cid, block_number);
|
||||||
|
CREATE INDEX state_root_index ON eth.header_cids USING btree (state_root);
|
||||||
|
CREATE INDEX timestamp_index ON eth.header_cids USING btree (timestamp);
|
||||||
|
|
||||||
|
-- uncle indexes
|
||||||
|
CREATE INDEX uncle_block_number_index ON eth.uncle_cids USING btree (block_number);
|
||||||
|
CREATE UNIQUE INDEX uncle_cid_block_number_index ON eth.uncle_cids USING btree (cid, block_number, index);
|
||||||
|
CREATE INDEX uncle_header_id_index ON eth.uncle_cids USING btree (header_id);
|
||||||
|
|
||||||
|
-- transaction indexes
|
||||||
|
CREATE INDEX tx_block_number_index ON eth.transaction_cids USING btree (block_number);
|
||||||
|
CREATE INDEX tx_header_id_index ON eth.transaction_cids USING btree (header_id);
|
||||||
|
CREATE INDEX tx_cid_block_number_index ON eth.transaction_cids USING btree (cid, block_number);
|
||||||
|
CREATE INDEX tx_dst_index ON eth.transaction_cids USING btree (dst);
|
||||||
|
CREATE INDEX tx_src_index ON eth.transaction_cids USING btree (src);
|
||||||
|
|
||||||
|
-- receipt indexes
|
||||||
|
CREATE INDEX rct_block_number_index ON eth.receipt_cids USING btree (block_number);
|
||||||
|
CREATE INDEX rct_header_id_index ON eth.receipt_cids USING btree (header_id);
|
||||||
|
CREATE INDEX rct_cid_block_number_index ON eth.receipt_cids USING btree (cid, block_number);
|
||||||
|
CREATE INDEX rct_contract_index ON eth.receipt_cids USING btree (contract);
|
||||||
|
|
||||||
|
-- state node indexes
|
||||||
|
CREATE INDEX state_block_number_index ON eth.state_cids USING btree (block_number);
|
||||||
|
CREATE INDEX state_cid_block_number_index ON eth.state_cids USING btree (cid, block_number);
|
||||||
|
CREATE INDEX state_header_id_index ON eth.state_cids USING btree (header_id);
|
||||||
|
CREATE INDEX state_removed_index ON eth.state_cids USING btree (removed);
|
||||||
|
CREATE INDEX state_code_hash_index ON eth.state_cids USING btree (code_hash); -- could be useful for e.g. selecting all the state accounts with the same contract bytecode deployed
|
||||||
|
CREATE INDEX state_leaf_key_block_number_index ON eth.state_cids(state_leaf_key, block_number DESC);
|
||||||
|
|
||||||
|
-- storage node indexes
|
||||||
|
CREATE INDEX storage_block_number_index ON eth.storage_cids USING btree (block_number);
|
||||||
|
CREATE INDEX storage_state_leaf_key_index ON eth.storage_cids USING btree (state_leaf_key);
|
||||||
|
CREATE INDEX storage_cid_block_number_index ON eth.storage_cids USING btree (cid, block_number);
|
||||||
|
CREATE INDEX storage_header_id_index ON eth.storage_cids USING btree (header_id);
|
||||||
|
CREATE INDEX storage_removed_index ON eth.storage_cids USING btree (removed);
|
||||||
|
CREATE INDEX storage_leaf_key_block_number_index ON eth.storage_cids(storage_leaf_key, block_number DESC);
|
||||||
|
|
||||||
|
-- log indexes
|
||||||
|
CREATE INDEX log_block_number_index ON eth.log_cids USING btree (block_number);
|
||||||
|
CREATE INDEX log_header_id_index ON eth.log_cids USING btree (header_id);
|
||||||
|
CREATE INDEX log_cid_block_number_index ON eth.log_cids USING btree (cid, block_number);
|
||||||
|
CREATE INDEX log_address_index ON eth.log_cids USING btree (address);
|
||||||
|
CREATE INDEX log_topic0_index ON eth.log_cids USING btree (topic0);
|
||||||
|
CREATE INDEX log_topic1_index ON eth.log_cids USING btree (topic1);
|
||||||
|
CREATE INDEX log_topic2_index ON eth.log_cids USING btree (topic2);
|
||||||
|
CREATE INDEX log_topic3_index ON eth.log_cids USING btree (topic3);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
-- log indexes
|
||||||
|
DROP INDEX eth.log_topic3_index;
|
||||||
|
DROP INDEX eth.log_topic2_index;
|
||||||
|
DROP INDEX eth.log_topic1_index;
|
||||||
|
DROP INDEX eth.log_topic0_index;
|
||||||
|
DROP INDEX eth.log_address_index;
|
||||||
|
DROP INDEX eth.log_cid_block_number_index;
|
||||||
|
DROP INDEX eth.log_header_id_index;
|
||||||
|
DROP INDEX eth.log_block_number_index;
|
||||||
|
|
||||||
|
-- storage node indexes
|
||||||
|
DROP INDEX eth.storage_removed_index;
|
||||||
|
DROP INDEX eth.storage_header_id_index;
|
||||||
|
DROP INDEX eth.storage_cid_block_number_index;
|
||||||
|
DROP INDEX eth.storage_state_leaf_key_index;
|
||||||
|
DROP INDEX eth.storage_block_number_index;
|
||||||
|
DROP INDEX eth.storage_leaf_key_block_number_index;
|
||||||
|
|
||||||
|
-- state node indexes
|
||||||
|
DROP INDEX eth.state_code_hash_index;
|
||||||
|
DROP INDEX eth.state_removed_index;
|
||||||
|
DROP INDEX eth.state_header_id_index;
|
||||||
|
DROP INDEX eth.state_cid_block_number_index;
|
||||||
|
DROP INDEX eth.state_block_number_index;
|
||||||
|
DROP INDEX eth.state_leaf_key_block_number_index;
|
||||||
|
|
||||||
|
-- receipt indexes
|
||||||
|
DROP INDEX eth.rct_contract_index;
|
||||||
|
DROP INDEX eth.rct_cid_block_number_index;
|
||||||
|
DROP INDEX eth.rct_header_id_index;
|
||||||
|
DROP INDEX eth.rct_block_number_index;
|
||||||
|
|
||||||
|
-- transaction indexes
|
||||||
|
DROP INDEX eth.tx_src_index;
|
||||||
|
DROP INDEX eth.tx_dst_index;
|
||||||
|
DROP INDEX eth.tx_cid_block_number_index;
|
||||||
|
DROP INDEX eth.tx_header_id_index;
|
||||||
|
DROP INDEX eth.tx_block_number_index;
|
||||||
|
|
||||||
|
-- uncle indexes
|
||||||
|
DROP INDEX eth.uncle_block_number_index;
|
||||||
|
DROP INDEX eth.uncle_cid_block_number_index;
|
||||||
|
DROP INDEX eth.uncle_header_id_index;
|
||||||
|
|
||||||
|
-- header indexes
|
||||||
|
DROP INDEX eth.timestamp_index;
|
||||||
|
DROP INDEX eth.state_root_index;
|
||||||
|
DROP INDEX eth.header_cid_block_number_index;
|
||||||
|
DROP INDEX eth.header_block_number_index;
|
@ -5,5 +5,8 @@ CREATE TABLE IF NOT EXISTS public.db_version (
|
|||||||
tstamp TIMESTAMP WITHOUT TIME ZONE DEFAULT NOW()
|
tstamp TIMESTAMP WITHOUT TIME ZONE DEFAULT NOW()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v5.0.0')
|
||||||
|
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v5.0.0', NOW());
|
||||||
|
|
||||||
-- +goose Down
|
-- +goose Down
|
||||||
DROP TABLE public.db_version;
|
DROP TABLE public.db_version;
|
@ -1,133 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
-- header indexes
|
|
||||||
CREATE INDEX header_block_number_index ON eth.header_cids USING brin (block_number);
|
|
||||||
CREATE UNIQUE INDEX header_cid_index ON eth.header_cids USING btree (cid, block_number);
|
|
||||||
CREATE UNIQUE INDEX header_mh_block_number_index ON eth.header_cids USING btree (mh_key, block_number);
|
|
||||||
CREATE INDEX state_root_index ON eth.header_cids USING btree (state_root);
|
|
||||||
CREATE INDEX timestamp_index ON eth.header_cids USING brin (timestamp);
|
|
||||||
|
|
||||||
-- uncle indexes
|
|
||||||
CREATE INDEX uncle_block_number_index ON eth.uncle_cids USING brin (block_number);
|
|
||||||
CREATE UNIQUE INDEX uncle_mh_block_number_index ON eth.uncle_cids USING btree (mh_key, block_number);
|
|
||||||
CREATE INDEX uncle_header_id_index ON eth.uncle_cids USING btree (header_id);
|
|
||||||
|
|
||||||
-- transaction indexes
|
|
||||||
CREATE INDEX tx_block_number_index ON eth.transaction_cids USING brin (block_number);
|
|
||||||
CREATE INDEX tx_header_id_index ON eth.transaction_cids USING btree (header_id);
|
|
||||||
CREATE INDEX tx_cid_index ON eth.transaction_cids USING btree (cid, block_number);
|
|
||||||
CREATE INDEX tx_mh_block_number_index ON eth.transaction_cids USING btree (mh_key, block_number);
|
|
||||||
CREATE INDEX tx_dst_index ON eth.transaction_cids USING btree (dst);
|
|
||||||
CREATE INDEX tx_src_index ON eth.transaction_cids USING btree (src);
|
|
||||||
|
|
||||||
-- receipt indexes
|
|
||||||
CREATE INDEX rct_block_number_index ON eth.receipt_cids USING brin (block_number);
|
|
||||||
CREATE INDEX rct_header_id_index ON eth.receipt_cids USING btree (header_id);
|
|
||||||
CREATE INDEX rct_leaf_cid_index ON eth.receipt_cids USING btree (leaf_cid);
|
|
||||||
CREATE INDEX rct_leaf_mh_block_number_index ON eth.receipt_cids USING btree (leaf_mh_key, block_number);
|
|
||||||
CREATE INDEX rct_contract_index ON eth.receipt_cids USING btree (contract);
|
|
||||||
CREATE INDEX rct_contract_hash_index ON eth.receipt_cids USING btree (contract_hash);
|
|
||||||
|
|
||||||
-- state node indexes
|
|
||||||
CREATE INDEX state_block_number_index ON eth.state_cids USING brin (block_number);
|
|
||||||
CREATE INDEX state_leaf_key_index ON eth.state_cids USING btree (state_leaf_key);
|
|
||||||
CREATE INDEX state_cid_index ON eth.state_cids USING btree (cid);
|
|
||||||
CREATE INDEX state_mh_block_number_index ON eth.state_cids USING btree (mh_key, block_number);
|
|
||||||
CREATE INDEX state_header_id_index ON eth.state_cids USING btree (header_id);
|
|
||||||
CREATE INDEX state_node_type_index ON eth.state_cids USING btree (node_type);
|
|
||||||
|
|
||||||
-- storage node indexes
|
|
||||||
CREATE INDEX storage_block_number_index ON eth.storage_cids USING brin (block_number);
|
|
||||||
CREATE INDEX storage_state_path_index ON eth.storage_cids USING btree (state_path);
|
|
||||||
CREATE INDEX storage_leaf_key_index ON eth.storage_cids USING btree (storage_leaf_key);
|
|
||||||
CREATE INDEX storage_cid_index ON eth.storage_cids USING btree (cid);
|
|
||||||
CREATE INDEX storage_mh_block_number_index ON eth.storage_cids USING btree (mh_key, block_number);
|
|
||||||
CREATE INDEX storage_header_id_index ON eth.storage_cids USING btree (header_id);
|
|
||||||
CREATE INDEX storage_node_type_index ON eth.storage_cids USING btree (node_type);
|
|
||||||
|
|
||||||
-- state accounts indexes
|
|
||||||
CREATE INDEX account_block_number_index ON eth.state_accounts USING brin (block_number);
|
|
||||||
CREATE INDEX account_header_id_index ON eth.state_accounts USING btree (header_id);
|
|
||||||
CREATE INDEX account_storage_root_index ON eth.state_accounts USING btree (storage_root);
|
|
||||||
|
|
||||||
-- access list indexes
|
|
||||||
CREATE INDEX access_list_block_number_index ON eth.access_list_elements USING brin (block_number);
|
|
||||||
CREATE INDEX access_list_element_address_index ON eth.access_list_elements USING btree (address);
|
|
||||||
CREATE INDEX access_list_storage_keys_index ON eth.access_list_elements USING gin (storage_keys);
|
|
||||||
|
|
||||||
-- log indexes
|
|
||||||
CREATE INDEX log_block_number_index ON eth.log_cids USING brin (block_number);
|
|
||||||
CREATE INDEX log_header_id_index ON eth.log_cids USING btree (header_id);
|
|
||||||
CREATE INDEX log_leaf_mh_block_number_index ON eth.log_cids USING btree (leaf_mh_key, block_number);
|
|
||||||
CREATE INDEX log_cid_index ON eth.log_cids USING btree (leaf_cid);
|
|
||||||
CREATE INDEX log_address_index ON eth.log_cids USING btree (address);
|
|
||||||
CREATE INDEX log_topic0_index ON eth.log_cids USING btree (topic0);
|
|
||||||
CREATE INDEX log_topic1_index ON eth.log_cids USING btree (topic1);
|
|
||||||
CREATE INDEX log_topic2_index ON eth.log_cids USING btree (topic2);
|
|
||||||
CREATE INDEX log_topic3_index ON eth.log_cids USING btree (topic3);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
-- log indexes
|
|
||||||
DROP INDEX eth.log_topic3_index;
|
|
||||||
DROP INDEX eth.log_topic2_index;
|
|
||||||
DROP INDEX eth.log_topic1_index;
|
|
||||||
DROP INDEX eth.log_topic0_index;
|
|
||||||
DROP INDEX eth.log_address_index;
|
|
||||||
DROP INDEX eth.log_cid_index;
|
|
||||||
DROP INDEX eth.log_leaf_mh_block_number_index;
|
|
||||||
DROP INDEX eth.log_header_id_index;
|
|
||||||
DROP INDEX eth.log_block_number_index;
|
|
||||||
|
|
||||||
-- access list indexes
|
|
||||||
DROP INDEX eth.access_list_storage_keys_index;
|
|
||||||
DROP INDEX eth.access_list_element_address_index;
|
|
||||||
DROP INDEX eth.access_list_block_number_index;
|
|
||||||
|
|
||||||
-- state account indexes
|
|
||||||
DROP INDEX eth.account_storage_root_index;
|
|
||||||
DROP index eth.account_header_id_index;
|
|
||||||
DROP INDEX eth.account_block_number_index;
|
|
||||||
|
|
||||||
-- storage node indexes
|
|
||||||
DROP INDEX eth.storage_node_type_index;
|
|
||||||
DROP INDEX eth.storage_header_id_index;
|
|
||||||
DROP INDEX eth.storage_mh_block_number_index;
|
|
||||||
DROP INDEX eth.storage_cid_index;
|
|
||||||
DROP INDEX eth.storage_leaf_key_index;
|
|
||||||
DROP INDEX eth.storage_state_path_index;
|
|
||||||
DROP INDEX eth.storage_block_number_index;
|
|
||||||
|
|
||||||
-- state node indexes
|
|
||||||
DROP INDEX eth.state_node_type_index;
|
|
||||||
DROP INDEX eth.state_header_id_index;
|
|
||||||
DROP INDEX eth.state_mh_block_number_index;
|
|
||||||
DROP INDEX eth.state_cid_index;
|
|
||||||
DROP INDEX eth.state_leaf_key_index;
|
|
||||||
DROP INDEX eth.state_block_number_index;
|
|
||||||
|
|
||||||
-- receipt indexes
|
|
||||||
DROP INDEX eth.rct_contract_hash_index;
|
|
||||||
DROP INDEX eth.rct_contract_index;
|
|
||||||
DROP INDEX eth.rct_leaf_mh_block_number_index;
|
|
||||||
DROP INDEX eth.rct_leaf_cid_index;
|
|
||||||
DROP INDEX eth.rct_header_id_index;
|
|
||||||
DROP INDEX eth.rct_block_number_index;
|
|
||||||
|
|
||||||
-- transaction indexes
|
|
||||||
DROP INDEX eth.tx_src_index;
|
|
||||||
DROP INDEX eth.tx_dst_index;
|
|
||||||
DROP INDEX eth.tx_mh_block_number_index;
|
|
||||||
DROP INDEX eth.tx_cid_index;
|
|
||||||
DROP INDEX eth.tx_header_id_index;
|
|
||||||
DROP INDEX eth.tx_block_number_index;
|
|
||||||
|
|
||||||
-- uncle indexes
|
|
||||||
DROP INDEX eth.uncle_block_number_index;
|
|
||||||
DROP INDEX eth.uncle_mh_block_number_index;
|
|
||||||
DROP INDEX eth.uncle_header_id_index;
|
|
||||||
|
|
||||||
-- header indexes
|
|
||||||
DROP INDEX eth.timestamp_index;
|
|
||||||
DROP INDEX eth.state_root_index;
|
|
||||||
DROP INDEX eth.header_mh_block_number_index;
|
|
||||||
DROP INDEX eth.header_cid_index;
|
|
||||||
DROP INDEX eth.header_block_number_index;
|
|
@ -1,119 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
-- Name: graphql_subscription(); Type: FUNCTION; Schema: eth; Owner: -
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
CREATE FUNCTION eth.graphql_subscription() RETURNS TRIGGER AS $$
|
|
||||||
DECLARE
|
|
||||||
obj jsonb;
|
|
||||||
BEGIN
|
|
||||||
IF (TG_TABLE_NAME = 'state_cids') OR (TG_TABLE_NAME = 'state_accounts') THEN
|
|
||||||
obj := json_build_array(
|
|
||||||
TG_TABLE_NAME,
|
|
||||||
NEW.header_id,
|
|
||||||
NEW.state_path
|
|
||||||
);
|
|
||||||
ELSIF (TG_TABLE_NAME = 'storage_cids') THEN
|
|
||||||
obj := json_build_array(
|
|
||||||
TG_TABLE_NAME,
|
|
||||||
NEW.header_id,
|
|
||||||
NEW.state_path,
|
|
||||||
NEW.storage_path
|
|
||||||
);
|
|
||||||
ELSIF (TG_TABLE_NAME = 'log_cids') THEN
|
|
||||||
obj := json_build_array(
|
|
||||||
TG_TABLE_NAME,
|
|
||||||
NEW.header_id,
|
|
||||||
NEW.rct_id,
|
|
||||||
NEW.index
|
|
||||||
);
|
|
||||||
ELSIF (TG_TABLE_NAME = 'receipt_cids') THEN
|
|
||||||
obj := json_build_array(
|
|
||||||
TG_TABLE_NAME,
|
|
||||||
NEW.header_id,
|
|
||||||
NEW.tx_id
|
|
||||||
);
|
|
||||||
ELSIF (TG_TABLE_NAME = 'transaction_cids') THEN
|
|
||||||
obj := json_build_array(
|
|
||||||
TG_TABLE_NAME,
|
|
||||||
NEW.header_id,
|
|
||||||
NEW.tx_hash
|
|
||||||
);
|
|
||||||
ELSIF (TG_TABLE_NAME = 'access_list_elements') THEN
|
|
||||||
obj := json_build_array(
|
|
||||||
TG_TABLE_NAME,
|
|
||||||
NEW.tx_id,
|
|
||||||
NEW.index
|
|
||||||
);
|
|
||||||
ELSIF (TG_TABLE_NAME = 'uncle_cids') OR (TG_TABLE_NAME = 'header_cids') THEN
|
|
||||||
obj := json_build_array(
|
|
||||||
TG_TABLE_NAME,
|
|
||||||
NEW.block_hash
|
|
||||||
);
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
perform pg_notify('postgraphile:' || TG_RELNAME , json_build_object(
|
|
||||||
'__node__', obj
|
|
||||||
)::text
|
|
||||||
);
|
|
||||||
RETURN NEW;
|
|
||||||
END;
|
|
||||||
$$ language plpgsql;
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
CREATE TRIGGER trg_eth_header_cids
|
|
||||||
AFTER INSERT ON eth.header_cids
|
|
||||||
FOR EACH ROW
|
|
||||||
EXECUTE PROCEDURE eth.graphql_subscription();
|
|
||||||
|
|
||||||
CREATE TRIGGER trg_eth_uncle_cids
|
|
||||||
AFTER INSERT ON eth.uncle_cids
|
|
||||||
FOR EACH ROW
|
|
||||||
EXECUTE PROCEDURE eth.graphql_subscription();
|
|
||||||
|
|
||||||
CREATE TRIGGER trg_eth_transaction_cids
|
|
||||||
AFTER INSERT ON eth.transaction_cids
|
|
||||||
FOR EACH ROW
|
|
||||||
EXECUTE PROCEDURE eth.graphql_subscription();
|
|
||||||
|
|
||||||
CREATE TRIGGER trg_eth_receipt_cids
|
|
||||||
AFTER INSERT ON eth.receipt_cids
|
|
||||||
FOR EACH ROW
|
|
||||||
EXECUTE PROCEDURE eth.graphql_subscription();
|
|
||||||
|
|
||||||
CREATE TRIGGER trg_eth_state_cids
|
|
||||||
AFTER INSERT ON eth.state_cids
|
|
||||||
FOR EACH ROW
|
|
||||||
EXECUTE PROCEDURE eth.graphql_subscription();
|
|
||||||
|
|
||||||
CREATE TRIGGER trg_eth_log_cids
|
|
||||||
AFTER INSERT ON eth.log_cids
|
|
||||||
FOR EACH ROW
|
|
||||||
EXECUTE PROCEDURE eth.graphql_subscription();
|
|
||||||
|
|
||||||
CREATE TRIGGER trg_eth_storage_cids
|
|
||||||
AFTER INSERT ON eth.storage_cids
|
|
||||||
FOR EACH ROW
|
|
||||||
EXECUTE PROCEDURE eth.graphql_subscription();
|
|
||||||
|
|
||||||
CREATE TRIGGER trg_eth_state_accounts
|
|
||||||
AFTER INSERT ON eth.state_accounts
|
|
||||||
FOR EACH ROW
|
|
||||||
EXECUTE PROCEDURE eth.graphql_subscription();
|
|
||||||
|
|
||||||
CREATE TRIGGER trg_eth_access_list_elements
|
|
||||||
AFTER INSERT ON eth.access_list_elements
|
|
||||||
FOR EACH ROW
|
|
||||||
EXECUTE PROCEDURE eth.graphql_subscription();
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TRIGGER trg_eth_uncle_cids ON eth.uncle_cids;
|
|
||||||
DROP TRIGGER trg_eth_transaction_cids ON eth.transaction_cids;
|
|
||||||
DROP TRIGGER trg_eth_storage_cids ON eth.storage_cids;
|
|
||||||
DROP TRIGGER trg_eth_state_cids ON eth.state_cids;
|
|
||||||
DROP TRIGGER trg_eth_state_accounts ON eth.state_accounts;
|
|
||||||
DROP TRIGGER trg_eth_receipt_cids ON eth.receipt_cids;
|
|
||||||
DROP TRIGGER trg_eth_header_cids ON eth.header_cids;
|
|
||||||
DROP TRIGGER trg_eth_log_cids ON eth.log_cids;
|
|
||||||
DROP TRIGGER trg_eth_access_list_elements ON eth.access_list_elements;
|
|
||||||
|
|
||||||
DROP FUNCTION eth.graphql_subscription();
|
|
43
db/migrations/00016_create_stored_procedures.sql
Normal file
43
db/migrations/00016_create_stored_procedures.sql
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
-- +goose Up
|
||||||
|
-- +goose StatementBegin
|
||||||
|
-- returns whether the state leaf key is vacated (previously existed but now is empty) at the provided block hash
|
||||||
|
CREATE OR REPLACE FUNCTION was_state_leaf_removed(v_key VARCHAR(66), v_hash VARCHAR)
|
||||||
|
RETURNS boolean AS $$
|
||||||
|
SELECT state_cids.removed = true
|
||||||
|
FROM eth.state_cids
|
||||||
|
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||||
|
WHERE state_leaf_key = v_key
|
||||||
|
AND state_cids.block_number <= (SELECT block_number
|
||||||
|
FROM eth.header_cids
|
||||||
|
WHERE block_hash = v_hash)
|
||||||
|
ORDER BY state_cids.block_number DESC LIMIT 1;
|
||||||
|
$$
|
||||||
|
language sql;
|
||||||
|
-- +goose StatementEnd
|
||||||
|
|
||||||
|
-- +goose StatementBegin
|
||||||
|
-- returns whether the state leaf key is vacated (previously existed but now is empty) at the provided block height
|
||||||
|
CREATE OR REPLACE FUNCTION public.was_state_leaf_removed_by_number(v_key VARCHAR(66), v_block_no BIGINT)
|
||||||
|
RETURNS BOOLEAN AS $$
|
||||||
|
SELECT state_cids.removed = true
|
||||||
|
FROM eth.state_cids
|
||||||
|
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||||
|
WHERE state_leaf_key = v_key
|
||||||
|
AND state_cids.block_number <= v_block_no
|
||||||
|
ORDER BY state_cids.block_number DESC LIMIT 1;
|
||||||
|
$$
|
||||||
|
language sql;
|
||||||
|
-- +goose StatementEnd
|
||||||
|
|
||||||
|
-- +goose StatementBegin
|
||||||
|
CREATE OR REPLACE FUNCTION canonical_header_hash(height BIGINT) RETURNS character varying AS
|
||||||
|
$BODY$
|
||||||
|
SELECT block_hash from eth.header_cids WHERE block_number = height AND canonical = true LIMIT 1;
|
||||||
|
$BODY$
|
||||||
|
LANGUAGE sql;
|
||||||
|
-- +goose StatementEnd
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP FUNCTION was_state_leaf_removed;
|
||||||
|
DROP FUNCTION was_state_leaf_removed_by_number;
|
||||||
|
DROP FUNCTION canonical_header_hash;
|
110
db/migrations/00017_create_get_storage_at_functions.sql
Normal file
110
db/migrations/00017_create_get_storage_at_functions.sql
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
-- +goose Up
|
||||||
|
-- +goose StatementBegin
|
||||||
|
CREATE OR REPLACE FUNCTION public.get_storage_at_by_number(v_state_leaf_key text, v_storage_leaf_key text, v_block_no bigint)
|
||||||
|
RETURNS TABLE
|
||||||
|
(
|
||||||
|
cid TEXT,
|
||||||
|
val BYTEA,
|
||||||
|
block_number BIGINT,
|
||||||
|
removed BOOL,
|
||||||
|
state_leaf_removed BOOL
|
||||||
|
)
|
||||||
|
AS
|
||||||
|
$BODY$
|
||||||
|
DECLARE
|
||||||
|
v_state_path BYTEA;
|
||||||
|
v_header TEXT;
|
||||||
|
v_canonical_header TEXT;
|
||||||
|
BEGIN
|
||||||
|
CREATE TEMP TABLE tmp_tt_stg2
|
||||||
|
(
|
||||||
|
header_id TEXT,
|
||||||
|
cid TEXT,
|
||||||
|
val BYTEA,
|
||||||
|
block_number BIGINT,
|
||||||
|
removed BOOL,
|
||||||
|
state_leaf_removed BOOL
|
||||||
|
) ON COMMIT DROP;
|
||||||
|
|
||||||
|
-- in best case scenario, the latest record we find for the provided keys is for a canonical block
|
||||||
|
INSERT INTO tmp_tt_stg2
|
||||||
|
SELECT storage_cids.header_id,
|
||||||
|
storage_cids.cid,
|
||||||
|
storage_cids.val,
|
||||||
|
storage_cids.block_number,
|
||||||
|
storage_cids.removed,
|
||||||
|
was_state_leaf_removed_by_number(v_state_leaf_key, v_block_no) AS state_leaf_removed
|
||||||
|
FROM eth.storage_cids
|
||||||
|
WHERE storage_leaf_key = v_storage_leaf_key
|
||||||
|
AND storage_cids.state_leaf_key = v_state_leaf_key -- can lookup directly on the leaf key in v5
|
||||||
|
AND storage_cids.block_number <= v_block_no
|
||||||
|
ORDER BY storage_cids.block_number DESC LIMIT 1;
|
||||||
|
|
||||||
|
-- check if result is from canonical state
|
||||||
|
SELECT header_id, canonical_header_hash(tmp_tt_stg2.block_number)
|
||||||
|
INTO v_header, v_canonical_header
|
||||||
|
FROM tmp_tt_stg2 LIMIT 1;
|
||||||
|
|
||||||
|
IF v_header IS NULL OR v_header != v_canonical_header THEN
|
||||||
|
RAISE NOTICE 'get_storage_at_by_number: chosen header NULL OR % != canonical header % for block number %, trying again.', v_header, v_canonical_header, v_block_no;
|
||||||
|
TRUNCATE tmp_tt_stg2;
|
||||||
|
-- If we hit on a non-canonical block, we need to go back and do a comprehensive check.
|
||||||
|
-- We try to avoid this to avoid joining between storage_cids and header_cids
|
||||||
|
INSERT INTO tmp_tt_stg2
|
||||||
|
SELECT storage_cids.header_id,
|
||||||
|
storage_cids.cid,
|
||||||
|
storage_cids.val,
|
||||||
|
storage_cids.block_number,
|
||||||
|
storage_cids.removed,
|
||||||
|
was_state_leaf_removed_by_number(
|
||||||
|
v_state_leaf_key,
|
||||||
|
v_block_no
|
||||||
|
) AS state_leaf_removed
|
||||||
|
FROM eth.storage_cids
|
||||||
|
INNER JOIN eth.header_cids ON (
|
||||||
|
storage_cids.header_id = header_cids.block_hash
|
||||||
|
AND storage_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
WHERE state_leaf_key = v_state_leaf_key
|
||||||
|
AND storage_leaf_key = v_storage_leaf_key
|
||||||
|
AND storage_cids.block_number <= v_block_no
|
||||||
|
AND header_cids.block_number <= v_block_no
|
||||||
|
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||||
|
ORDER BY header_cids.block_number DESC LIMIT 1;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
RETURN QUERY SELECT t.cid, t.val, t.block_number, t.removed, t.state_leaf_removed
|
||||||
|
FROM tmp_tt_stg2 AS t LIMIT 1;
|
||||||
|
END
|
||||||
|
$BODY$
|
||||||
|
language 'plpgsql';
|
||||||
|
-- +goose StatementEnd
|
||||||
|
|
||||||
|
-- +goose StatementBegin
|
||||||
|
CREATE OR REPLACE FUNCTION public.get_storage_at_by_hash(v_state_leaf_key TEXT, v_storage_leaf_key text, v_block_hash text)
|
||||||
|
RETURNS TABLE
|
||||||
|
(
|
||||||
|
cid TEXT,
|
||||||
|
val BYTEA,
|
||||||
|
block_number BIGINT,
|
||||||
|
removed BOOL,
|
||||||
|
state_leaf_removed BOOL
|
||||||
|
)
|
||||||
|
AS
|
||||||
|
$BODY$
|
||||||
|
DECLARE
|
||||||
|
v_block_no BIGINT;
|
||||||
|
BEGIN
|
||||||
|
SELECT h.block_number INTO v_block_no FROM eth.header_cids AS h WHERE block_hash = v_block_hash LIMIT 1;
|
||||||
|
IF v_block_no IS NULL THEN
|
||||||
|
RETURN;
|
||||||
|
END IF;
|
||||||
|
RETURN QUERY SELECT * FROM get_storage_at_by_number(v_state_leaf_key, v_storage_leaf_key, v_block_no);
|
||||||
|
END
|
||||||
|
$BODY$
|
||||||
|
LANGUAGE 'plpgsql';
|
||||||
|
-- +goose StatementEnd
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP FUNCTION get_storage_at_by_hash;
|
||||||
|
DROP FUNCTION get_storage_at_by_number;
|
105
db/migrations/00018_create_foreign_keys.sql
Normal file
105
db/migrations/00018_create_foreign_keys.sql
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
-- +goose Up
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
ADD CONSTRAINT header_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.uncle_cids
|
||||||
|
ADD CONSTRAINT uncle_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.uncle_cids
|
||||||
|
ADD CONSTRAINT uncle_cids_header_cids_fkey
|
||||||
|
FOREIGN KEY (header_id, block_number)
|
||||||
|
REFERENCES eth.header_cids (block_hash, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.transaction_cids
|
||||||
|
ADD CONSTRAINT transaction_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.transaction_cids
|
||||||
|
ADD CONSTRAINT transaction_cids_header_cids_fkey
|
||||||
|
FOREIGN KEY (header_id, block_number)
|
||||||
|
REFERENCES eth.header_cids (block_hash, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.receipt_cids
|
||||||
|
ADD CONSTRAINT receipt_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.receipt_cids
|
||||||
|
ADD CONSTRAINT receipt_cids_transaction_cids_fkey
|
||||||
|
FOREIGN KEY (tx_id, header_id, block_number)
|
||||||
|
REFERENCES eth.transaction_cids (tx_hash, header_id, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
ADD CONSTRAINT state_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
ADD CONSTRAINT state_cids_header_cids_fkey
|
||||||
|
FOREIGN KEY (header_id, block_number)
|
||||||
|
REFERENCES eth.header_cids (block_hash, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
ADD CONSTRAINT storage_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
ADD CONSTRAINT storage_cids_state_cids_fkey
|
||||||
|
FOREIGN KEY (state_leaf_key, header_id, block_number)
|
||||||
|
REFERENCES eth.state_cids (state_leaf_key, header_id, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.log_cids
|
||||||
|
ADD CONSTRAINT log_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.log_cids
|
||||||
|
ADD CONSTRAINT log_cids_receipt_cids_fkey
|
||||||
|
FOREIGN KEY (rct_id, header_id, block_number)
|
||||||
|
REFERENCES eth.receipt_cids (tx_id, header_id, block_number);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
ALTER TABLE eth.log_cids
|
||||||
|
DROP CONSTRAINT log_cids_receipt_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.log_cids
|
||||||
|
DROP CONSTRAINT log_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
DROP CONSTRAINT storage_cids_state_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
DROP CONSTRAINT storage_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
DROP CONSTRAINT state_cids_header_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
DROP CONSTRAINT state_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.receipt_cids
|
||||||
|
DROP CONSTRAINT receipt_cids_transaction_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.receipt_cids
|
||||||
|
DROP CONSTRAINT receipt_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.transaction_cids
|
||||||
|
DROP CONSTRAINT transaction_cids_header_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.transaction_cids
|
||||||
|
DROP CONSTRAINT transaction_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.uncle_cids
|
||||||
|
DROP CONSTRAINT uncle_cids_header_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.uncle_cids
|
||||||
|
DROP CONSTRAINT uncle_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
DROP CONSTRAINT header_cids_ipld_blocks_fkey;
|
@ -1,10 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth_meta.known_gaps (
|
|
||||||
starting_block_number bigint PRIMARY KEY,
|
|
||||||
ending_block_number bigint,
|
|
||||||
checked_out boolean,
|
|
||||||
processing_key bigint
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth_meta.known_gaps;
|
|
105
db/migrations/00019_drop_foreign_keys.sql
Normal file
105
db/migrations/00019_drop_foreign_keys.sql
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
-- +goose Up
|
||||||
|
ALTER TABLE eth.log_cids
|
||||||
|
DROP CONSTRAINT log_cids_receipt_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.log_cids
|
||||||
|
DROP CONSTRAINT log_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
DROP CONSTRAINT storage_cids_state_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
DROP CONSTRAINT storage_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
DROP CONSTRAINT state_cids_header_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
DROP CONSTRAINT state_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.receipt_cids
|
||||||
|
DROP CONSTRAINT receipt_cids_transaction_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.receipt_cids
|
||||||
|
DROP CONSTRAINT receipt_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.transaction_cids
|
||||||
|
DROP CONSTRAINT transaction_cids_header_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.transaction_cids
|
||||||
|
DROP CONSTRAINT transaction_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.uncle_cids
|
||||||
|
DROP CONSTRAINT uncle_cids_header_cids_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.uncle_cids
|
||||||
|
DROP CONSTRAINT uncle_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
DROP CONSTRAINT header_cids_ipld_blocks_fkey;
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
ALTER TABLE eth.header_cids
|
||||||
|
ADD CONSTRAINT header_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.uncle_cids
|
||||||
|
ADD CONSTRAINT uncle_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.uncle_cids
|
||||||
|
ADD CONSTRAINT uncle_cids_header_cids_fkey
|
||||||
|
FOREIGN KEY (header_id, block_number)
|
||||||
|
REFERENCES eth.header_cids (block_hash, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.transaction_cids
|
||||||
|
ADD CONSTRAINT transaction_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.transaction_cids
|
||||||
|
ADD CONSTRAINT transaction_cids_header_cids_fkey
|
||||||
|
FOREIGN KEY (header_id, block_number)
|
||||||
|
REFERENCES eth.header_cids (block_hash, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.receipt_cids
|
||||||
|
ADD CONSTRAINT receipt_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.receipt_cids
|
||||||
|
ADD CONSTRAINT receipt_cids_transaction_cids_fkey
|
||||||
|
FOREIGN KEY (tx_id, header_id, block_number)
|
||||||
|
REFERENCES eth.transaction_cids (tx_hash, header_id, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
ADD CONSTRAINT state_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.state_cids
|
||||||
|
ADD CONSTRAINT state_cids_header_cids_fkey
|
||||||
|
FOREIGN KEY (header_id, block_number)
|
||||||
|
REFERENCES eth.header_cids (block_hash, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
ADD CONSTRAINT storage_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.storage_cids
|
||||||
|
ADD CONSTRAINT storage_cids_state_cids_fkey
|
||||||
|
FOREIGN KEY (state_leaf_key, header_id, block_number)
|
||||||
|
REFERENCES eth.state_cids (state_leaf_key, header_id, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.log_cids
|
||||||
|
ADD CONSTRAINT log_cids_ipld_blocks_fkey
|
||||||
|
FOREIGN KEY (cid, block_number)
|
||||||
|
REFERENCES ipld.blocks (key, block_number);
|
||||||
|
|
||||||
|
ALTER TABLE eth.log_cids
|
||||||
|
ADD CONSTRAINT log_cids_receipt_cids_fkey
|
||||||
|
FOREIGN KEY (rct_id, header_id, block_number)
|
||||||
|
REFERENCES eth.receipt_cids (tx_id, header_id, block_number);
|
98
db/migrations/00020_convert_to_hypertables.sql
Normal file
98
db/migrations/00020_convert_to_hypertables.sql
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
-- +goose Up
|
||||||
|
SELECT create_hypertable('ipld.blocks', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.uncle_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.transaction_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.receipt_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.state_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.storage_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
SELECT create_hypertable('eth.log_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
|
||||||
|
-- update version
|
||||||
|
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v5.0.0-h')
|
||||||
|
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v5.0.0-h', NOW());
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v5.0.0')
|
||||||
|
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v5.0.0', NOW());
|
||||||
|
|
||||||
|
-- reversing conversion to hypertable requires migrating all data from every chunk back to a single table
|
||||||
|
-- create new regular tables
|
||||||
|
CREATE TABLE eth.log_cids_i (LIKE eth.log_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.storage_cids_i (LIKE eth.storage_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.state_cids_i (LIKE eth.state_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.receipt_cids_i (LIKE eth.receipt_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.transaction_cids_i (LIKE eth.transaction_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE eth.uncle_cids_i (LIKE eth.uncle_cids INCLUDING ALL);
|
||||||
|
CREATE TABLE ipld.blocks_i (LIKE ipld.blocks INCLUDING ALL);
|
||||||
|
|
||||||
|
-- migrate data
|
||||||
|
INSERT INTO eth.log_cids_i (SELECT * FROM eth.log_cids);
|
||||||
|
INSERT INTO eth.storage_cids_i (SELECT * FROM eth.storage_cids);
|
||||||
|
INSERT INTO eth.state_cids_i (SELECT * FROM eth.state_cids);
|
||||||
|
INSERT INTO eth.receipt_cids_i (SELECT * FROM eth.receipt_cids);
|
||||||
|
INSERT INTO eth.transaction_cids_i (SELECT * FROM eth.transaction_cids);
|
||||||
|
INSERT INTO eth.uncle_cids_i (SELECT * FROM eth.uncle_cids);
|
||||||
|
INSERT INTO ipld.blocks_i (SELECT * FROM ipld.blocks);
|
||||||
|
|
||||||
|
-- drop hypertables
|
||||||
|
DROP TABLE eth.log_cids;
|
||||||
|
DROP TABLE eth.storage_cids;
|
||||||
|
DROP TABLE eth.state_cids;
|
||||||
|
DROP TABLE eth.receipt_cids;
|
||||||
|
DROP TABLE eth.transaction_cids;
|
||||||
|
DROP TABLE eth.uncle_cids;
|
||||||
|
DROP TABLE ipld.blocks;
|
||||||
|
|
||||||
|
-- rename new tables
|
||||||
|
ALTER TABLE eth.log_cids_i RENAME TO log_cids;
|
||||||
|
ALTER TABLE eth.storage_cids_i RENAME TO storage_cids;
|
||||||
|
ALTER TABLE eth.state_cids_i RENAME TO state_cids;
|
||||||
|
ALTER TABLE eth.receipt_cids_i RENAME TO receipt_cids;
|
||||||
|
ALTER TABLE eth.transaction_cids_i RENAME TO transaction_cids;
|
||||||
|
ALTER TABLE eth.uncle_cids_i RENAME TO uncle_cids;
|
||||||
|
ALTER TABLE ipld.blocks_i RENAME TO blocks;
|
||||||
|
|
||||||
|
-- rename indexes:
|
||||||
|
-- log indexes
|
||||||
|
ALTER INDEX eth.log_cids_i_topic3_idx RENAME TO log_topic3_index;
|
||||||
|
ALTER INDEX eth.log_cids_i_topic2_idx RENAME TO log_topic2_index;
|
||||||
|
ALTER INDEX eth.log_cids_i_topic1_idx RENAME TO log_topic1_index;
|
||||||
|
ALTER INDEX eth.log_cids_i_topic0_idx RENAME TO log_topic0_index;
|
||||||
|
ALTER INDEX eth.log_cids_i_address_idx RENAME TO log_address_index;
|
||||||
|
ALTER INDEX eth.log_cids_i_cid_block_number_idx RENAME TO log_cid_block_number_index;
|
||||||
|
ALTER INDEX eth.log_cids_i_header_id_idx RENAME TO log_header_id_index;
|
||||||
|
ALTER INDEX eth.log_cids_i_block_number_idx RENAME TO log_block_number_index;
|
||||||
|
|
||||||
|
-- storage node indexes -- storage node indexes
|
||||||
|
ALTER INDEX eth.storage_cids_i_removed_idx RENAME TO storage_removed_index;
|
||||||
|
ALTER INDEX eth.storage_cids_i_header_id_idx RENAME TO storage_header_id_index;
|
||||||
|
ALTER INDEX eth.storage_cids_i_cid_block_number_idx RENAME TO storage_cid_block_number_index;
|
||||||
|
ALTER INDEX eth.storage_cids_i_state_leaf_key_idx RENAME TO storage_state_leaf_key_index;
|
||||||
|
ALTER INDEX eth.storage_cids_i_block_number_idx RENAME TO storage_block_number_index;
|
||||||
|
ALTER INDEX eth.storage_cids_i_storage_leaf_key_block_number_idx RENAME TO storage_leaf_key_block_number_index;
|
||||||
|
|
||||||
|
-- state node indexes -- state node indexes
|
||||||
|
ALTER INDEX eth.state_cids_i_code_hash_idx RENAME TO state_code_hash_index;
|
||||||
|
ALTER INDEX eth.state_cids_i_removed_idx RENAME TO state_removed_index;
|
||||||
|
ALTER INDEX eth.state_cids_i_header_id_idx RENAME TO state_header_id_index;
|
||||||
|
ALTER INDEX eth.state_cids_i_cid_block_number_idx RENAME TO state_cid_block_number_index;
|
||||||
|
ALTER INDEX eth.state_cids_i_block_number_idx RENAME TO state_block_number_index;
|
||||||
|
ALTER INDEX eth.state_cids_i_state_leaf_key_block_number_idx RENAME TO state_leaf_key_block_number_index;
|
||||||
|
|
||||||
|
-- receipt indexes -- receipt indexes
|
||||||
|
ALTER INDEX eth.receipt_cids_i_contract_idx RENAME TO rct_contract_index;
|
||||||
|
ALTER INDEX eth.receipt_cids_i_cid_block_number_idx RENAME TO rct_cid_block_number_index;
|
||||||
|
ALTER INDEX eth.receipt_cids_i_header_id_idx RENAME TO rct_header_id_index;
|
||||||
|
ALTER INDEX eth.receipt_cids_i_block_number_idx RENAME TO rct_block_number_index;
|
||||||
|
|
||||||
|
-- transaction indexes -- transaction indexes
|
||||||
|
ALTER INDEX eth.transaction_cids_i_src_idx RENAME TO tx_src_index;
|
||||||
|
ALTER INDEX eth.transaction_cids_i_dst_idx RENAME TO tx_dst_index;
|
||||||
|
ALTER INDEX eth.transaction_cids_i_cid_block_number_idx RENAME TO tx_cid_block_number_index;
|
||||||
|
ALTER INDEX eth.transaction_cids_i_header_id_idx RENAME TO tx_header_id_index;
|
||||||
|
ALTER INDEX eth.transaction_cids_i_block_number_idx RENAME TO tx_block_number_index;
|
||||||
|
|
||||||
|
-- uncle indexes -- uncle indexes
|
||||||
|
ALTER INDEX eth.uncle_cids_i_block_number_idx RENAME TO uncle_block_number_index;
|
||||||
|
ALTER INDEX eth.uncle_cids_i_cid_block_number_index_idx RENAME TO uncle_cid_block_number_index;
|
||||||
|
ALTER INDEX eth.uncle_cids_i_header_id_idx RENAME TO uncle_header_id_index;
|
@ -1,6 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.0')
|
|
||||||
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0', NOW());
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DELETE FROM public.db_version WHERE version = 'v4.0.0';
|
|
@ -1,68 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
SELECT create_hypertable('public.blocks', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
|
||||||
SELECT create_hypertable('eth.header_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
|
||||||
SELECT create_hypertable('eth.uncle_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
|
||||||
SELECT create_hypertable('eth.transaction_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
|
||||||
SELECT create_hypertable('eth.receipt_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
|
||||||
SELECT create_hypertable('eth.state_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
|
||||||
SELECT create_hypertable('eth.storage_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
|
||||||
SELECT create_hypertable('eth.state_accounts', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
|
||||||
SELECT create_hypertable('eth.access_list_elements', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
|
||||||
SELECT create_hypertable('eth.log_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
|
||||||
|
|
||||||
-- update version
|
|
||||||
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.0-h')
|
|
||||||
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0-h', NOW());
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
INSERT INTO public.db_version (singleton, version) VALUES (true, 'v4.0.0')
|
|
||||||
ON CONFLICT (singleton) DO UPDATE SET (version, tstamp) = ('v4.0.0', NOW());
|
|
||||||
|
|
||||||
-- reversing conversion to hypertable requires migrating all data from every chunk back to a single table
|
|
||||||
-- create new regular tables
|
|
||||||
CREATE TABLE eth.log_cids_i (LIKE eth.log_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.access_list_elements_i (LIKE eth.access_list_elements INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.state_accounts_i (LIKE eth.state_accounts INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.storage_cids_i (LIKE eth.storage_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.state_cids_i (LIKE eth.state_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.receipt_cids_i (LIKE eth.receipt_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.transaction_cids_i (LIKE eth.transaction_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.uncle_cids_i (LIKE eth.uncle_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE eth.header_cids_i (LIKE eth.header_cids INCLUDING ALL);
|
|
||||||
CREATE TABLE public.blocks_i (LIKE public.blocks INCLUDING ALL);
|
|
||||||
|
|
||||||
-- migrate data
|
|
||||||
INSERT INTO eth.log_cids_i (SELECT * FROM eth.log_cids);
|
|
||||||
INSERT INTO eth.access_list_elements_i (SELECT * FROM eth.access_list_elements);
|
|
||||||
INSERT INTO eth.state_accounts_i (SELECT * FROM eth.state_accounts);
|
|
||||||
INSERT INTO eth.storage_cids_i (SELECT * FROM eth.storage_cids);
|
|
||||||
INSERT INTO eth.state_cids_i (SELECT * FROM eth.state_cids);
|
|
||||||
INSERT INTO eth.receipt_cids_i (SELECT * FROM eth.receipt_cids);
|
|
||||||
INSERT INTO eth.transaction_cids_i (SELECT * FROM eth.transaction_cids);
|
|
||||||
INSERT INTO eth.uncle_cids_i (SELECT * FROM eth.uncle_cids);
|
|
||||||
INSERT INTO eth.header_cids_i (SELECT * FROM eth.header_cids);
|
|
||||||
INSERT INTO public.blocks_i (SELECT * FROM public.blocks);
|
|
||||||
|
|
||||||
-- drop hypertables
|
|
||||||
DROP TABLE eth.log_cids;
|
|
||||||
DROP TABLE eth.access_list_elements;
|
|
||||||
DROP TABLE eth.state_accounts;
|
|
||||||
DROP TABLE eth.storage_cids;
|
|
||||||
DROP TABLE eth.state_cids;
|
|
||||||
DROP TABLE eth.receipt_cids;
|
|
||||||
DROP TABLE eth.transaction_cids;
|
|
||||||
DROP TABLE eth.uncle_cids;
|
|
||||||
DROP TABLE eth.header_cids;
|
|
||||||
DROP TABLE public.blocks;
|
|
||||||
|
|
||||||
-- rename new tables
|
|
||||||
ALTER TABLE eth.log_cids_i RENAME TO log_cids;
|
|
||||||
ALTER TABLE eth.access_list_elements_i RENAME TO access_list_elements;
|
|
||||||
ALTER TABLE eth.state_accounts_i RENAME TO state_accounts;
|
|
||||||
ALTER TABLE eth.storage_cids_i RENAME TO storage_cids;
|
|
||||||
ALTER TABLE eth.state_cids_i RENAME TO state_cids;
|
|
||||||
ALTER TABLE eth.receipt_cids_i RENAME TO receipt_cids;
|
|
||||||
ALTER TABLE eth.transaction_cids_i RENAME TO transaction_cids;
|
|
||||||
ALTER TABLE eth.uncle_cids_i RENAME TO uncle_cids;
|
|
||||||
ALTER TABLE eth.header_cids_i RENAME TO header_cids;
|
|
||||||
ALTER TABLE public.blocks_i RENAME TO blocks;
|
|
16
db/migrations/00021_create_eth_withdrawal_cids_table.sql
Normal file
16
db/migrations/00021_create_eth_withdrawal_cids_table.sql
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE IF NOT EXISTS eth.withdrawal_cids (
|
||||||
|
block_number BIGINT NOT NULL,
|
||||||
|
header_id VARCHAR(66) NOT NULL,
|
||||||
|
cid TEXT NOT NULL,
|
||||||
|
index INTEGER NOT NULL,
|
||||||
|
validator INTEGER NOT NULL,
|
||||||
|
address VARCHAR(66) NOT NULL,
|
||||||
|
amount NUMERIC NOT NULL,
|
||||||
|
PRIMARY KEY (index, header_id, block_number)
|
||||||
|
);
|
||||||
|
|
||||||
|
SELECT create_hypertable('eth.withdrawal_cids', 'block_number', migrate_data => true, chunk_time_interval => 32768);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE eth.withdrawal_cids;
|
12
db/migrations/00022_create_eth_blob_hash_cids_table.sql
Normal file
12
db/migrations/00022_create_eth_blob_hash_cids_table.sql
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE eth.blob_hashes (
|
||||||
|
tx_hash VARCHAR(66) NOT NULL,
|
||||||
|
index INTEGER NOT NULL,
|
||||||
|
blob_hash BYTEA NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE UNIQUE INDEX blob_hashes_tx_hash_index ON eth.blob_hashes(tx_hash, index);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP INDEX eth.blob_hashes_tx_hash_index;
|
||||||
|
DROP TABLE eth.blob_hashes;
|
@ -1,248 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
-- +goose StatementBegin
|
|
||||||
-- returns if a state leaf node was removed within the provided block number
|
|
||||||
CREATE OR REPLACE FUNCTION was_state_leaf_removed(key character varying, hash character varying)
|
|
||||||
RETURNS boolean AS $$
|
|
||||||
SELECT state_cids.node_type = 3
|
|
||||||
FROM eth.state_cids
|
|
||||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
|
||||||
WHERE state_leaf_key = key
|
|
||||||
AND state_cids.block_number <= (SELECT block_number
|
|
||||||
FROM eth.header_cids
|
|
||||||
WHERE block_hash = hash)
|
|
||||||
ORDER BY state_cids.block_number DESC LIMIT 1;
|
|
||||||
$$
|
|
||||||
language sql;
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
CREATE TYPE child_result AS (
|
|
||||||
has_child BOOLEAN,
|
|
||||||
children eth.header_cids[]
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION has_child(hash VARCHAR(66), height BIGINT) RETURNS child_result AS
|
|
||||||
$BODY$
|
|
||||||
DECLARE
|
|
||||||
child_height INT;
|
|
||||||
temp_child eth.header_cids;
|
|
||||||
new_child_result child_result;
|
|
||||||
BEGIN
|
|
||||||
child_height = height + 1;
|
|
||||||
-- short circuit if there are no children
|
|
||||||
SELECT exists(SELECT 1
|
|
||||||
FROM eth.header_cids
|
|
||||||
WHERE parent_hash = hash
|
|
||||||
AND block_number = child_height
|
|
||||||
LIMIT 1)
|
|
||||||
INTO new_child_result.has_child;
|
|
||||||
-- collect all the children for this header
|
|
||||||
IF new_child_result.has_child THEN
|
|
||||||
FOR temp_child IN
|
|
||||||
SELECT * FROM eth.header_cids WHERE parent_hash = hash AND block_number = child_height
|
|
||||||
LOOP
|
|
||||||
new_child_result.children = array_append(new_child_result.children, temp_child);
|
|
||||||
END LOOP;
|
|
||||||
END IF;
|
|
||||||
RETURN new_child_result;
|
|
||||||
END
|
|
||||||
$BODY$
|
|
||||||
LANGUAGE 'plpgsql';
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
CREATE OR REPLACE FUNCTION canonical_header_from_array(headers eth.header_cids[]) RETURNS eth.header_cids AS
|
|
||||||
$BODY$
|
|
||||||
DECLARE
|
|
||||||
canonical_header eth.header_cids;
|
|
||||||
canonical_child eth.header_cids;
|
|
||||||
header eth.header_cids;
|
|
||||||
current_child_result child_result;
|
|
||||||
child_headers eth.header_cids[];
|
|
||||||
current_header_with_child eth.header_cids;
|
|
||||||
has_children_count INT DEFAULT 0;
|
|
||||||
BEGIN
|
|
||||||
-- for each header in the provided set
|
|
||||||
FOREACH header IN ARRAY headers
|
|
||||||
LOOP
|
|
||||||
-- check if it has any children
|
|
||||||
current_child_result = has_child(header.block_hash, header.block_number);
|
|
||||||
IF current_child_result.has_child THEN
|
|
||||||
-- if it does, take note
|
|
||||||
has_children_count = has_children_count + 1;
|
|
||||||
current_header_with_child = header;
|
|
||||||
-- and add the children to the growing set of child headers
|
|
||||||
child_headers = array_cat(child_headers, current_child_result.children);
|
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
-- if none of the headers had children, none is more canonical than the other
|
|
||||||
IF has_children_count = 0 THEN
|
|
||||||
-- return the first one selected
|
|
||||||
SELECT * INTO canonical_header FROM unnest(headers) LIMIT 1;
|
|
||||||
-- if only one header had children, it can be considered the heaviest/canonical header of the set
|
|
||||||
ELSIF has_children_count = 1 THEN
|
|
||||||
-- return the only header with a child
|
|
||||||
canonical_header = current_header_with_child;
|
|
||||||
-- if there are multiple headers with children
|
|
||||||
ELSE
|
|
||||||
-- find the canonical header from the child set
|
|
||||||
canonical_child = canonical_header_from_array(child_headers);
|
|
||||||
-- the header that is parent to this header, is the canonical header at this level
|
|
||||||
SELECT * INTO canonical_header FROM unnest(headers)
|
|
||||||
WHERE block_hash = canonical_child.parent_hash;
|
|
||||||
END IF;
|
|
||||||
RETURN canonical_header;
|
|
||||||
END
|
|
||||||
$BODY$
|
|
||||||
LANGUAGE 'plpgsql';
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
CREATE OR REPLACE FUNCTION canonical_header_hash(height BIGINT) RETURNS character varying AS
|
|
||||||
$BODY$
|
|
||||||
DECLARE
|
|
||||||
canonical_header eth.header_cids;
|
|
||||||
headers eth.header_cids[];
|
|
||||||
header_count INT;
|
|
||||||
temp_header eth.header_cids;
|
|
||||||
BEGIN
|
|
||||||
-- collect all headers at this height
|
|
||||||
FOR temp_header IN
|
|
||||||
SELECT * FROM eth.header_cids WHERE block_number = height
|
|
||||||
LOOP
|
|
||||||
headers = array_append(headers, temp_header);
|
|
||||||
END LOOP;
|
|
||||||
-- count the number of headers collected
|
|
||||||
header_count = array_length(headers, 1);
|
|
||||||
-- if we have less than 1 header, return NULL
|
|
||||||
IF header_count IS NULL OR header_count < 1 THEN
|
|
||||||
RETURN NULL;
|
|
||||||
-- if we have one header, return its hash
|
|
||||||
ELSIF header_count = 1 THEN
|
|
||||||
RETURN headers[1].block_hash;
|
|
||||||
-- if we have multiple headers we need to determine which one is canonical
|
|
||||||
ELSE
|
|
||||||
canonical_header = canonical_header_from_array(headers);
|
|
||||||
RETURN canonical_header.block_hash;
|
|
||||||
END IF;
|
|
||||||
END
|
|
||||||
$BODY$
|
|
||||||
LANGUAGE 'plpgsql';
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
CREATE TYPE state_node_result AS (
|
|
||||||
data BYTEA,
|
|
||||||
state_leaf_key VARCHAR(66),
|
|
||||||
cid TEXT,
|
|
||||||
state_path BYTEA,
|
|
||||||
node_type INTEGER,
|
|
||||||
mh_key TEXT
|
|
||||||
);
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
CREATE OR REPLACE FUNCTION state_snapshot(starting_height BIGINT, ending_height BIGINT) RETURNS void AS
|
|
||||||
$BODY$
|
|
||||||
DECLARE
|
|
||||||
canonical_hash VARCHAR(66);
|
|
||||||
results state_node_result[];
|
|
||||||
BEGIN
|
|
||||||
-- get the canonical hash for the header at ending_height
|
|
||||||
canonical_hash = canonical_header_hash(ending_height);
|
|
||||||
IF canonical_hash IS NULL THEN
|
|
||||||
RAISE EXCEPTION 'cannot create state snapshot, no header can be found at height %', ending_height;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
-- select all of the state nodes for this snapshot: the latest state node record at every unique path
|
|
||||||
SELECT ARRAY (SELECT DISTINCT ON (state_path) ROW (blocks.data, state_cids.state_leaf_key, state_cids.cid, state_cids.state_path,
|
|
||||||
state_cids.node_type, state_cids.mh_key)
|
|
||||||
FROM eth.state_cids
|
|
||||||
INNER JOIN public.blocks
|
|
||||||
ON (state_cids.mh_key, state_cids.block_number) = (blocks.key, blocks.block_number)
|
|
||||||
WHERE state_cids.block_number BETWEEN starting_height AND ending_height
|
|
||||||
ORDER BY state_path, state_cids.block_number DESC)
|
|
||||||
INTO results;
|
|
||||||
|
|
||||||
-- from the set returned above, insert public.block records at the ending_height block number
|
|
||||||
INSERT INTO public.blocks (block_number, key, data)
|
|
||||||
SELECT ending_height, r.mh_key, r.data
|
|
||||||
FROM unnest(results) r;
|
|
||||||
|
|
||||||
-- from the set returned above, insert eth.state_cids records at the ending_height block number
|
|
||||||
-- anchoring all the records to the canonical header found at ending_height
|
|
||||||
INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key)
|
|
||||||
SELECT ending_height, canonical_hash, r.state_leaf_key, r.cid, r.state_path, r.node_type, false, r.mh_key
|
|
||||||
FROM unnest(results) r
|
|
||||||
ON CONFLICT (state_path, header_id, block_number) DO NOTHING;
|
|
||||||
END
|
|
||||||
$BODY$
|
|
||||||
LANGUAGE 'plpgsql';
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
CREATE TYPE storage_node_result AS (
|
|
||||||
data BYTEA,
|
|
||||||
state_path BYTEA,
|
|
||||||
storage_leaf_key VARCHAR(66),
|
|
||||||
cid TEXT,
|
|
||||||
storage_path BYTEA,
|
|
||||||
node_type INTEGER,
|
|
||||||
mh_key TEXT
|
|
||||||
);
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
-- this should only be ran after a state_snapshot has been completed
|
|
||||||
-- this should probably be rolled together with state_snapshot into a single procedure...
|
|
||||||
CREATE OR REPLACE FUNCTION storage_snapshot(starting_height BIGINT, ending_height BIGINT) RETURNS void AS
|
|
||||||
$BODY$
|
|
||||||
DECLARE
|
|
||||||
canonical_hash VARCHAR(66);
|
|
||||||
results storage_node_result[];
|
|
||||||
BEGIN
|
|
||||||
-- get the canonical hash for the header at ending_height
|
|
||||||
SELECT canonical_header_hash(ending_height) INTO canonical_hash;
|
|
||||||
IF canonical_hash IS NULL THEN
|
|
||||||
RAISE EXCEPTION 'cannot create state snapshot, no header can be found at height %', ending_height;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
-- select all of the storage nodes for this snapshot: the latest storage node record at every unique state leaf key
|
|
||||||
SELECT ARRAY (SELECT DISTINCT ON (state_leaf_key, storage_path) ROW (blocks.data, storage_cids.state_path, storage_cids.storage_leaf_key,
|
|
||||||
storage_cids.cid, storage_cids.storage_path, storage_cids.node_type, storage_cids.mh_key)
|
|
||||||
FROM eth.storage_cids
|
|
||||||
INNER JOIN public.blocks
|
|
||||||
ON (storage_cids.mh_key, storage_cids.block_number) = (blocks.key, blocks.block_number)
|
|
||||||
INNER JOIN eth.state_cids
|
|
||||||
ON (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
|
|
||||||
WHERE storage_cids.block_number BETWEEN starting_height AND ending_height
|
|
||||||
ORDER BY state_leaf_key, storage_path, storage_cids.state_path, storage_cids.block_number DESC)
|
|
||||||
INTO results;
|
|
||||||
|
|
||||||
-- from the set returned above, insert public.block records at the ending_height block number
|
|
||||||
INSERT INTO public.blocks (block_number, key, data)
|
|
||||||
SELECT ending_height, r.mh_key, r.data
|
|
||||||
FROM unnest(results) r;
|
|
||||||
|
|
||||||
-- from the set returned above, insert eth.state_cids records at the ending_height block number
|
|
||||||
-- anchoring all the records to the canonical header found at ending_height
|
|
||||||
INSERT INTO eth.storage_cids (block_number, header_id, state_path, storage_leaf_key, cid, storage_path,
|
|
||||||
node_type, diff, mh_key)
|
|
||||||
SELECT ending_height, canonical_hash, r.state_path, r.storage_leaf_key, r.cid, r.storage_path, r.node_type, false, r.mh_key
|
|
||||||
FROM unnest(results) r
|
|
||||||
ON CONFLICT (storage_path, state_path, header_id, block_number) DO NOTHING;
|
|
||||||
END
|
|
||||||
$BODY$
|
|
||||||
LANGUAGE 'plpgsql';
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP FUNCTION storage_snapshot;
|
|
||||||
DROP TYPE storage_node_result;
|
|
||||||
DROP FUNCTION state_snapshot;
|
|
||||||
DROP TYPE state_node_result;
|
|
||||||
DROP FUNCTION was_state_leaf_removed;
|
|
||||||
DROP FUNCTION canonical_header_hash;
|
|
||||||
DROP FUNCTION canonical_header_from_array;
|
|
||||||
DROP FUNCTION has_child;
|
|
||||||
DROP TYPE child_result;
|
|
@ -1,27 +0,0 @@
|
|||||||
version: '3.2'
|
|
||||||
|
|
||||||
services:
|
|
||||||
migrations:
|
|
||||||
restart: on-failure
|
|
||||||
depends_on:
|
|
||||||
- ipld-eth-db
|
|
||||||
image: vulcanize/ipld-eth-db
|
|
||||||
# Build image using local context
|
|
||||||
build: .
|
|
||||||
environment:
|
|
||||||
DATABASE_USER: "vdbm"
|
|
||||||
DATABASE_NAME: "vulcanize_testing"
|
|
||||||
DATABASE_PASSWORD: "password"
|
|
||||||
DATABASE_HOSTNAME: "ipld-eth-db"
|
|
||||||
DATABASE_PORT: 5432
|
|
||||||
|
|
||||||
ipld-eth-db:
|
|
||||||
image: timescale/timescaledb:latest-pg14
|
|
||||||
restart: always
|
|
||||||
command: ["postgres", "-c", "log_statement=all"]
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: "vdbm"
|
|
||||||
POSTGRES_DB: "vulcanize_testing"
|
|
||||||
POSTGRES_PASSWORD: "password"
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:8077:5432"
|
|
820
schema.sql
820
schema.sql
File diff suppressed because it is too large
Load Diff
@ -1,20 +1,14 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
# Runs the db migrations
|
set -e
|
||||||
set +x
|
|
||||||
|
# Default command is "goose up"
|
||||||
|
if [[ $# -eq 0 ]]; then
|
||||||
|
set -- "up"
|
||||||
|
fi
|
||||||
|
|
||||||
# Construct the connection string for postgres
|
# Construct the connection string for postgres
|
||||||
VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
|
VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
|
||||||
|
|
||||||
# Run the DB migrations
|
# Run the DB migrations
|
||||||
echo "Connecting with: $VDB_PG_CONNECT"
|
set -x
|
||||||
echo "Running database migrations"
|
exec ./goose -dir migrations postgres "$VDB_PG_CONNECT" "$@"
|
||||||
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up
|
|
||||||
|
|
||||||
# If the db migrations ran without err
|
|
||||||
if [[ $? -eq 0 ]]; then
|
|
||||||
echo "Migration process ran successfully"
|
|
||||||
tail -f /dev/null
|
|
||||||
else
|
|
||||||
echo "Could not run migrations. Are the database details correct?"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
@ -10,7 +10,7 @@ sleep 5s
|
|||||||
export HOST_NAME=localhost
|
export HOST_NAME=localhost
|
||||||
export PORT=8066
|
export PORT=8066
|
||||||
export USER=vdbm
|
export USER=vdbm
|
||||||
export TEST_DB=vulcanize_testing
|
export TEST_DB=cerc_testing
|
||||||
export TEST_CONNECT_STRING=postgresql://$USER@$HOST_NAME:$PORT/$TEST_DB?sslmode=disable
|
export TEST_CONNECT_STRING=postgresql://$USER@$HOST_NAME:$PORT/$TEST_DB?sslmode=disable
|
||||||
export PGPASSWORD=password
|
export PGPASSWORD=password
|
||||||
|
|
||||||
|
BIN
vulcanize_db.png
BIN
vulcanize_db.png
Binary file not shown.
Before Width: | Height: | Size: 516 KiB After Width: | Height: | Size: 508 KiB |
211
vulcanize_db.uml
211
vulcanize_db.uml
@ -1,148 +1,113 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Diagram>
|
<Diagram>
|
||||||
<ID>DATABASE</ID>
|
<ID>DATABASE</ID>
|
||||||
<OriginalElement>407978cb-39e6-453c-b9b7-c4183a4e26ef</OriginalElement>
|
<OriginalElement>86a0461b-ec84-4911-9aa2-e562b5d7b24c</OriginalElement>
|
||||||
<nodes>
|
<nodes>
|
||||||
<node x="553.96484375" y="196.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.header_cids</node>
|
<node x="561.75" y="152.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.header_cids</node>
|
||||||
<node x="93.5" y="944.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.receipt_cids</node>
|
<node x="0.0" y="1439.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.public.goose_db_version</node>
|
||||||
<node x="884.80078125" y="966.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.state_accounts</node>
|
<node x="1133.0" y="0.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.public.nodes</node>
|
||||||
<node x="849.705078125" y="0.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.nodes</node>
|
<node x="203.25" y="1140.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.log_cids</node>
|
||||||
<node x="127.30078125" y="680.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.uncle_cids</node>
|
<node x="729.5" y="603.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.uncle_cids</node>
|
||||||
<node x="102.67578125" y="33.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.blocks</node>
|
<node x="467.25" y="0.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.ipld.blocks</node>
|
||||||
<node x="341.30078125" y="636.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.transaction_cids</node>
|
<node x="500.5" y="570.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.state_cids</node>
|
||||||
<node x="357.365234375" y="988.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.access_list_elements</node>
|
<node x="1384.0" y="0.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth_meta.watched_addresses</node>
|
||||||
<node x="832.365234375" y="669.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.state_cids</node>
|
<node x="660.6941964285713" y="878.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.storage_cids</node>
|
||||||
<node x="1433.705078125" y="0.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.db_version</node>
|
<node x="303.5" y="581.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.transaction_cids</node>
|
||||||
<node x="58.0" y="1206.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.log_cids</node>
|
<node x="233.0" y="1439.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.public.db_version</node>
|
||||||
<node x="1200.705078125" y="0.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.goose_db_version</node>
|
<node x="251.5" y="889.0">86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.receipt_cids</node>
|
||||||
<node x="592.365234375" y="944.0">407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.storage_cids</node>
|
|
||||||
</nodes>
|
</nodes>
|
||||||
<notes />
|
<notes />
|
||||||
<edges>
|
<edges>
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.access_list_elements" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.transaction_cids" relationship="REFERENCES">
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.state_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.ipld.blocks" relationship="REFERENCES">
|
||||||
<point x="0.0" y="-61.0" />
|
<point x="-52.25" y="-127.0" />
|
||||||
<point x="464.865234375" y="922.0" />
|
<point x="552.75" y="538.0" />
|
||||||
<point x="511.30078125" y="922.0" />
|
<point x="551.25" y="538.0" />
|
||||||
<point x="0.0" y="127.0" />
|
<point x="0.0" y="50.0" />
|
||||||
</edge>
|
</edge>
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.state_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.header_cids" relationship="REFERENCES">
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.receipt_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.ipld.blocks" relationship="REFERENCES">
|
||||||
<point x="-52.25" y="-94.0" />
|
<point x="-45.5" y="-94.0" />
|
||||||
<point x="884.615234375" y="614.0" />
|
<point x="297.0" y="851.0" />
|
||||||
<point x="700.96484375" y="614.0" />
|
<point x="293.0" y="851.0" />
|
||||||
<point x="0.0" y="193.0" />
|
<point x="293.0" y="126.0" />
|
||||||
|
<point x="551.25" y="126.0" />
|
||||||
|
<point x="0.0" y="50.0" />
|
||||||
</edge>
|
</edge>
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.receipt_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.blocks" relationship="REFERENCES">
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.transaction_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.header_cids" relationship="REFERENCES">
|
||||||
<point x="-51.75" y="-105.0" />
|
<point x="44.25" y="-116.0" />
|
||||||
<point x="145.25" y="922.0" />
|
<point x="436.25" y="548.0" />
|
||||||
<point x="59.365234375" y="922.0" />
|
<point x="658.75" y="548.0" />
|
||||||
<point x="59.365234375" y="175.0" />
|
<point x="0.0" y="182.0" />
|
||||||
<point x="52.75" y="175.0" />
|
|
||||||
<point x="52.75" y="165.0" />
|
|
||||||
<point x="127.92578125" y="165.0" />
|
|
||||||
<point x="-25.25" y="39.0" />
|
|
||||||
</edge>
|
</edge>
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.state_accounts" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.state_cids" relationship="REFERENCES">
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.log_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.receipt_cids" relationship="REFERENCES">
|
||||||
<point x="0.0" y="-83.0" />
|
<point x="44.25" y="-127.0" />
|
||||||
<point x="983.30078125" y="922.0" />
|
<point x="336.0" y="1114.0" />
|
||||||
<point x="936.865234375" y="922.0" />
|
<point x="342.5" y="1114.0" />
|
||||||
<point x="0.0" y="94.0" />
|
<point x="0.0" y="94.0" />
|
||||||
</edge>
|
</edge>
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.log_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.blocks" relationship="REFERENCES">
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.uncle_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.header_cids" relationship="REFERENCES">
|
||||||
<point x="-41.0" y="-127.0" />
|
<point x="-48.5" y="-94.0" />
|
||||||
<point x="99.0" y="1180.0" />
|
<point x="778.0" y="548.0" />
|
||||||
<point x="47.5" y="1180.0" />
|
<point x="658.75" y="548.0" />
|
||||||
<point x="47.5" y="175.0" />
|
<point x="0.0" y="182.0" />
|
||||||
<point x="52.75" y="175.0" />
|
|
||||||
<point x="52.75" y="165.0" />
|
|
||||||
<point x="127.92578125" y="165.0" />
|
|
||||||
<point x="-25.25" y="39.0" />
|
|
||||||
</edge>
|
</edge>
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.receipt_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.transaction_cids" relationship="REFERENCES">
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.log_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.ipld.blocks" relationship="REFERENCES">
|
||||||
<point x="51.75" y="-105.0" />
|
<point x="-44.25" y="-127.0" />
|
||||||
<point x="248.75" y="922.0" />
|
<point x="247.5" y="1114.0" />
|
||||||
<point x="511.30078125" y="922.0" />
|
<point x="241.0" y="1114.0" />
|
||||||
<point x="0.0" y="127.0" />
|
<point x="241.0" y="126.0" />
|
||||||
|
<point x="551.25" y="126.0" />
|
||||||
|
<point x="0.0" y="50.0" />
|
||||||
</edge>
|
</edge>
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.storage_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.blocks" relationship="REFERENCES">
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.receipt_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.transaction_cids" relationship="REFERENCES">
|
||||||
|
<point x="45.5" y="-94.0" />
|
||||||
|
<point x="388.0" y="851.0" />
|
||||||
|
<point x="392.0" y="851.0" />
|
||||||
|
<point x="0.0" y="116.0" />
|
||||||
|
</edge>
|
||||||
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.state_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.header_cids" relationship="REFERENCES">
|
||||||
|
<point x="52.25" y="-127.0" />
|
||||||
|
<point x="657.25" y="548.0" />
|
||||||
|
<point x="658.75" y="548.0" />
|
||||||
|
<point x="0.0" y="182.0" />
|
||||||
|
</edge>
|
||||||
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.storage_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.state_cids" relationship="REFERENCES">
|
||||||
<point x="-56.5" y="-105.0" />
|
<point x="-56.5" y="-105.0" />
|
||||||
<point x="648.865234375" y="912.0" />
|
<point x="717.1941964285713" y="851.0" />
|
||||||
<point x="116.80078125" y="912.0" />
|
<point x="605.0" y="851.0" />
|
||||||
<point x="116.80078125" y="175.0" />
|
<point x="0.0" y="127.0" />
|
||||||
<point x="437.205078125" y="175.0" />
|
|
||||||
<point x="437.205078125" y="165.0" />
|
|
||||||
<point x="178.42578125" y="165.0" />
|
|
||||||
<point x="25.25" y="39.0" />
|
|
||||||
</edge>
|
</edge>
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.transaction_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.header_cids" relationship="REFERENCES">
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.transaction_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.ipld.blocks" relationship="REFERENCES">
|
||||||
<point x="85.0" y="-127.0" />
|
<point x="-44.25" y="-116.0" />
|
||||||
<point x="596.30078125" y="614.0" />
|
<point x="347.75" y="548.0" />
|
||||||
<point x="700.96484375" y="614.0" />
|
<point x="391.1941964285714" y="548.0" />
|
||||||
<point x="0.0" y="193.0" />
|
<point x="391.1941964285714" y="126.0" />
|
||||||
|
<point x="551.25" y="126.0" />
|
||||||
|
<point x="0.0" y="50.0" />
|
||||||
</edge>
|
</edge>
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.state_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.blocks" relationship="REFERENCES">
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.storage_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.ipld.blocks" relationship="REFERENCES">
|
||||||
<point x="52.25" y="-94.0" />
|
|
||||||
<point x="989.115234375" y="614.0" />
|
|
||||||
<point x="970.279296875" y="614.0" />
|
|
||||||
<point x="970.279296875" y="175.0" />
|
|
||||||
<point x="437.205078125" y="175.0" />
|
|
||||||
<point x="437.205078125" y="165.0" />
|
|
||||||
<point x="178.42578125" y="165.0" />
|
|
||||||
<point x="25.25" y="39.0" />
|
|
||||||
</edge>
|
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.storage_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.state_cids" relationship="REFERENCES">
|
|
||||||
<point x="56.5" y="-105.0" />
|
<point x="56.5" y="-105.0" />
|
||||||
<point x="761.865234375" y="922.0" />
|
<point x="830.1941964285713" y="851.0" />
|
||||||
<point x="936.865234375" y="922.0" />
|
<point x="934.0" y="851.0" />
|
||||||
<point x="0.0" y="94.0" />
|
<point x="934.0" y="126.0" />
|
||||||
|
<point x="551.25" y="126.0" />
|
||||||
|
<point x="0.0" y="50.0" />
|
||||||
</edge>
|
</edge>
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.log_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.receipt_cids" relationship="REFERENCES">
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.uncle_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.ipld.blocks" relationship="REFERENCES">
|
||||||
<point x="41.0" y="-127.0" />
|
<point x="48.5" y="-94.0" />
|
||||||
<point x="181.0" y="1180.0" />
|
<point x="875.0" y="548.0" />
|
||||||
<point x="197.0" y="1180.0" />
|
<point x="835.6941964285714" y="548.0" />
|
||||||
<point x="0.0" y="105.0" />
|
<point x="835.6941964285714" y="126.0" />
|
||||||
|
<point x="551.25" y="126.0" />
|
||||||
|
<point x="0.0" y="50.0" />
|
||||||
</edge>
|
</edge>
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.header_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.nodes" relationship="REFERENCES">
|
<edge source="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.eth.header_cids" target="86a0461b-ec84-4911-9aa2-e562b5d7b24c.TABLE:uml_diagram.ipld.blocks" relationship="REFERENCES">
|
||||||
<point x="73.5" y="-193.0" />
|
<point x="0.0" y="-182.0" />
|
||||||
<point x="774.46484375" y="165.0" />
|
<point x="658.75" y="126.0" />
|
||||||
<point x="1002.705078125" y="165.0" />
|
<point x="551.25" y="126.0" />
|
||||||
<point x="0.0" y="72.0" />
|
<point x="0.0" y="50.0" />
|
||||||
</edge>
|
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.uncle_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.header_cids" relationship="REFERENCES">
|
|
||||||
<point x="48.5" y="-83.0" />
|
|
||||||
<point x="272.80078125" y="614.0" />
|
|
||||||
<point x="700.96484375" y="614.0" />
|
|
||||||
<point x="0.0" y="193.0" />
|
|
||||||
</edge>
|
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.header_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.blocks" relationship="REFERENCES">
|
|
||||||
<point x="-73.5" y="-193.0" />
|
|
||||||
<point x="627.46484375" y="175.0" />
|
|
||||||
<point x="437.205078125" y="175.0" />
|
|
||||||
<point x="437.205078125" y="165.0" />
|
|
||||||
<point x="178.42578125" y="165.0" />
|
|
||||||
<point x="25.25" y="39.0" />
|
|
||||||
</edge>
|
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.uncle_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.blocks" relationship="REFERENCES">
|
|
||||||
<point x="-48.5" y="-83.0" />
|
|
||||||
<point x="175.80078125" y="614.0" />
|
|
||||||
<point x="214.30078125" y="614.0" />
|
|
||||||
<point x="214.30078125" y="175.0" />
|
|
||||||
<point x="437.205078125" y="175.0" />
|
|
||||||
<point x="437.205078125" y="165.0" />
|
|
||||||
<point x="178.42578125" y="165.0" />
|
|
||||||
<point x="25.25" y="39.0" />
|
|
||||||
</edge>
|
|
||||||
<edge source="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.eth.transaction_cids" target="407978cb-39e6-453c-b9b7-c4183a4e26ef.TABLE:vulcanize_test.public.blocks" relationship="REFERENCES">
|
|
||||||
<point x="-85.0" y="-127.0" />
|
|
||||||
<point x="426.30078125" y="604.0" />
|
|
||||||
<point x="344.55078125" y="604.0" />
|
|
||||||
<point x="344.55078125" y="175.0" />
|
|
||||||
<point x="437.205078125" y="175.0" />
|
|
||||||
<point x="437.205078125" y="165.0" />
|
|
||||||
<point x="178.42578125" y="165.0" />
|
|
||||||
<point x="25.25" y="39.0" />
|
|
||||||
</edge>
|
</edge>
|
||||||
</edges>
|
</edges>
|
||||||
<settings layout="Hierarchic" zoom="0.40337837837837837" showDependencies="false" x="791.0" y="730.0" />
|
<settings layout="Hierarchic" zoom="0.40290955091714103" showDependencies="false" x="793.0" y="780.5" />
|
||||||
<SelectedNodes />
|
<SelectedNodes />
|
||||||
<Categories>
|
<Categories>
|
||||||
<Category>Columns</Category>
|
<Category>Columns</Category>
|
||||||
|
Loading…
Reference in New Issue
Block a user