diff --git a/.github/workflows/generic-testing.yml b/.github/workflows/generic-testing.yml
new file mode 100644
index 0000000..063360e
--- /dev/null
+++ b/.github/workflows/generic-testing.yml
@@ -0,0 +1,199 @@
+name: Test the stack.
+on:
+ workflow_call:
+ inputs:
+ stack-orchestrator-ref:
+ required: false
+ type: string
+ ipld-eth-beacon-db-ref:
+ required: false
+ type: string
+ ssz-data-ref:
+ required: false
+ type: string
+ secrets:
+ GHA_KEY:
+ required: true
+
+env:
+ stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || '7fb664270a0ba09e2caa3095e8c91f3fdb5b38af' }}
+ ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref || '3dfe416302d553f8240f6051c08a7899b0e39e12' }}
+ ssz-data-ref: ${{ inputs.ssz-data-ref || 'main' }}
+ GOPATH: /tmp/go
+jobs:
+ build:
+ name: Run Docker Build
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ path: "./ipld-eth-beacon-indexer"
+
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ env.stack-orchestrator-ref }}
+ path: "./stack-orchestrator/"
+ repository: vulcanize/stack-orchestrator
+ fetch-depth: 0
+
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ env.ipld-eth-beacon-db-ref }}
+ repository: vulcanize/ipld-eth-beacon-db
+ path: "./ipld-eth-beacon-db/"
+ ssh-key: ${{secrets.GHA_KEY}}
+ fetch-depth: 0
+
+ - name: Create config file
+ run: |
+ echo vulcanize_ipld_eth_beacon_db=$GITHUB_WORKSPACE/ipld-eth-beacon-db/ > ./config.sh
+ echo vulcanize_ipld_eth_beacon_indexer=$GITHUB_WORKSPACE/ipld-eth-beacon-indexer >> ./config.sh
+ echo eth_beacon_config_file=$GITHUB_WORKSPACE/ipld-eth-beacon-indexer/config/cicd/boot.ipld-eth-beacon-indexer.json >> ./config.sh
+ cat ./config.sh
+
+ - name: Run docker compose
+ run: |
+ docker-compose \
+ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
+ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
+ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" \
+ --env-file ./config.sh \
+ up -d --build
+
+ - name: Check to make sure HEALTH file is present
+ shell: bash
+ run: |
+ until $(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" --env-file ./config.sh cp ipld-eth-beacon-indexer:/root/HEALTH ./HEALTH) ; do sleep 10; done
+ cat ./HEALTH
+ if [[ "$(cat ./HEALTH)" -eq "0" ]]; then echo "Application boot successful" && (exit 0); else docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" cp ipld-eth-beacon-indexer:/root/ipld-eth-beacon-indexer.log . && cat ipld-eth-beacon-indexer.log && (exit 1); fi
+
+ unit-test:
+ name: Run Unit Tests
+ runs-on: ubuntu-latest
+ ## IF you want to update the default branch for `pull_request runs, do it after the ||`
+ steps:
+ - name: Create GOPATH
+ run: mkdir -p /tmp/go
+
+ - uses: actions/checkout@v2
+ with:
+ path: "./ipld-eth-beacon-indexer"
+
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ env.stack-orchestrator-ref }}
+ path: "./stack-orchestrator/"
+ repository: vulcanize/stack-orchestrator
+ fetch-depth: 0
+
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ env.ipld-eth-beacon-db-ref }}
+ repository: vulcanize/ipld-eth-beacon-db
+ path: "./ipld-eth-beacon-db/"
+ ssh-key: ${{ secrets.GHA_KEY }}
+ fetch-depth: 0
+
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ env.ssz-data-ref }}
+ repository: vulcanize/ssz-data
+ path: "./ipld-eth-beacon-indexer/pkg/beaconclient/ssz-data"
+ fetch-depth: 0
+
+ - name: Create config file
+ run: |
+ echo vulcanize_ipld_eth_beacon_db=$GITHUB_WORKSPACE/ipld-eth-beacon-db/ > ./config.sh
+ echo vulcanize_ipld_eth_beacon_indexer=$GITHUB_WORKSPACE/ipld-eth-beacon-indexer >> ./config.sh
+ cat ./config.sh
+
+ - name: Run docker compose
+ run: |
+ docker-compose \
+ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
+ --env-file ./config.sh \
+ up -d --build
+
+ - uses: actions/setup-go@v3
+ with:
+ go-version: ">=1.18.0"
+ check-latest: true
+
+ - name: Install packages
+ run: |
+ go install github.com/onsi/ginkgo/v2/ginkgo@latest
+ which ginkgo
+
+ - name: Run the tests using Make
+ run: |
+ cd ipld-eth-beacon-indexer
+ make unit-test-ci
+
+ integration-test:
+ name: Run Integration Tests
+ runs-on: ubuntu-latest
+ steps:
+ - name: Create GOPATH
+ run: mkdir -p /tmp/go
+
+ - uses: actions/checkout@v2
+ with:
+ path: "./ipld-eth-beacon-indexer"
+
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ env.stack-orchestrator-ref }}
+ path: "./stack-orchestrator/"
+ repository: vulcanize/stack-orchestrator
+ fetch-depth: 0
+
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ env.ipld-eth-beacon-db-ref }}
+ repository: vulcanize/ipld-eth-beacon-db
+ path: "./ipld-eth-beacon-db/"
+ ssh-key: ${{secrets.GHA_KEY}}
+ fetch-depth: 0
+
+ - name: Create config file
+ run: |
+ echo vulcanize_ipld_eth_beacon_db=$GITHUB_WORKSPACE/ipld-eth-beacon-db/ > ./config.sh
+ echo vulcanize_ipld_eth_beacon_indexer=$GITHUB_WORKSPACE/ipld-eth-beacon-indexer >> ./config.sh
+ echo eth_beacon_capture_mode=boot >> ./config.sh
+ cat ./config.sh
+
+ - name: Run docker compose
+ run: |
+ docker-compose \
+ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
+ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
+ --env-file ./config.sh \
+ up -d --build
+
+ - uses: actions/setup-go@v3
+ with:
+ go-version: ">=1.18.0"
+ check-latest: true
+
+ - name: Install packages
+ run: |
+ go install github.com/onsi/ginkgo/v2/ginkgo@latest
+ which ginkgo
+
+ - name: Run the tests using Make
+ run: |
+ cd ipld-eth-beacon-indexer
+ make integration-test-ci
+
+ golangci:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/setup-go@v3
+ with:
+ go-version: ">=1.18.0"
+ - uses: actions/checkout@v3
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v3
+ with:
+ args: --timeout 90s --disable deadcode,unused
+# args: --timeout 90s --disable deadcode,
diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml
index c9d3fe6..f3254a0 100644
--- a/.github/workflows/on-pr.yml
+++ b/.github/workflows/on-pr.yml
@@ -7,8 +7,8 @@ on:
description: "The branch, commit or sha from stack-orchestrator to checkout"
required: false
default: "main"
- ipld-ethcl-db-ref:
- description: "The branch, commit or sha from ipld-ethcl-db to checkout"
+ ipld-eth-beacon-db-ref:
+ description: "The branch, commit or sha from ipld-eth-beacon-db to checkout"
required: false
default: "main"
ssz-data-ref:
@@ -22,188 +22,26 @@ on:
- "!LICENSE"
- "!.github/workflows/**"
- ".github/workflows/on-pr.yml"
+ - ".github/workflows/tests.yml"
- "**"
+ schedule:
+ - cron: '0 13 * * *' # Must be single quotes!!
-env:
- stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref || 'feature/client-build'}}
- ipld-ethcl-db-ref: ${{ github.event.inputs.ipld-ethcl-db-ref || 'feature/schema-ipld-ethcl-indexer' }}
- ssz-data-ref: ${{ github.event.inputs.ssz-data-ref || 'main' }}
- GOPATH: /tmp/go
jobs:
- build:
- name: Run Docker Build
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- with:
- path: "./ipld-ethcl-indexer"
-
- - uses: actions/checkout@v3
- with:
- ref: ${{ env.stack-orchestrator-ref }}
- path: "./stack-orchestrator/"
- repository: vulcanize/stack-orchestrator
- fetch-depth: 0
-
- - uses: actions/checkout@v3
- with:
- ref: ${{ env.ipld-ethcl-db-ref }}
- repository: vulcanize/ipld-ethcl-db
- path: "./ipld-ethcl-db/"
- token: ${{ secrets.GH_PAT }}
- fetch-depth: 0
-
- - name: Create config file
- run: |
- echo vulcanize_ipld_ethcl_db=$GITHUB_WORKSPACE/ipld-ethcl-db/ > ./config.sh
- echo vulcanize_ipld_ethcl_indexer=$GITHUB_WORKSPACE/ipld-ethcl-indexer >> ./config.sh
- echo ethcl_capture_mode=boot >> ./config.sh
- echo ethcl_skip_sync=true >> ./config.sh
- echo ethcl_known_gap_increment=1000000 >> ./config.sh
- cat ./config.sh
-
- - name: Run docker compose
- run: |
- docker-compose \
- -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ethcl-db.yml" \
- -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
- -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-ethcl-indexer.yml" \
- --env-file ./config.sh \
- up -d --build
-
- - name: Check to make sure HEALTH file is present
- shell: bash
- run: |
- until $(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-ethcl-indexer.yml" cp ipld-ethcl-indexer:/root/HEALTH ./HEALTH) ; do sleep 10; done
- cat ./HEALTH
- if [[ "$(cat ./HEALTH)" -eq "0" ]]; then echo "Application boot successful" && (exit 0); else docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-ethcl-indexer.yml" cp ipld-ethcl-indexer:/root/ipld-ethcl-indexer.log . && cat ipld-ethcl-indexer.log && (exit 1); fi
-
- unit-test:
- name: Run Unit Tests
- runs-on: ubuntu-latest
- ## IF you want to update the default branch for `pull_request runs, do it after the ||`
- steps:
- - name: Create GOPATH
- run: mkdir -p /tmp/go
-
- - uses: actions/checkout@v2
- with:
- path: "./ipld-ethcl-indexer"
-
- - uses: actions/checkout@v3
- with:
- ref: ${{ env.stack-orchestrator-ref }}
- path: "./stack-orchestrator/"
- repository: vulcanize/stack-orchestrator
- fetch-depth: 0
-
- - uses: actions/checkout@v3
- with:
- ref: ${{ env.ipld-ethcl-db-ref }}
- repository: vulcanize/ipld-ethcl-db
- path: "./ipld-ethcl-db/"
- token: ${{ secrets.GH_PAT }}
- fetch-depth: 0
-
- - uses: actions/checkout@v3
- with:
- ref: ${{ env.ssz-data-ref }}
- repository: vulcanize/ssz-data
- path: "./ipld-ethcl-indexer/pkg/beaconclient/ssz-data"
- fetch-depth: 0
-
- - name: Create config file
- run: |
- echo vulcanize_ipld_ethcl_db=$GITHUB_WORKSPACE/ipld-ethcl-db/ > ./config.sh
- echo vulcanize_ipld_ethcl_indexer=$GITHUB_WORKSPACE/ipld-ethcl-indexer >> ./config.sh
- cat ./config.sh
-
- - name: Run docker compose
- run: |
- docker-compose \
- -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ethcl-db.yml" \
- --env-file ./config.sh \
- up -d --build
-
- - uses: actions/setup-go@v3
- with:
- go-version: ">=1.17.0"
- check-latest: true
-
- - name: Install packages
- run: |
- go install github.com/onsi/ginkgo/v2/ginkgo@latest
- which ginkgo
-
- - name: Run the tests using Make
- run: |
- cd ipld-ethcl-indexer
- make unit-test-ci
-
- integration-test:
- name: Run Integration Tests
- runs-on: ubuntu-latest
- steps:
- - name: Create GOPATH
- run: mkdir -p /tmp/go
-
- - uses: actions/checkout@v2
- with:
- path: "./ipld-ethcl-indexer"
-
- - uses: actions/checkout@v3
- with:
- ref: ${{ env.stack-orchestrator-ref }}
- path: "./stack-orchestrator/"
- repository: vulcanize/stack-orchestrator
- fetch-depth: 0
-
- - uses: actions/checkout@v3
- with:
- ref: ${{ env.ipld-ethcl-db-ref }}
- repository: vulcanize/ipld-ethcl-db
- path: "./ipld-ethcl-db/"
- token: ${{ secrets.GH_PAT }}
- fetch-depth: 0
-
- - name: Create config file
- run: |
- echo vulcanize_ipld_ethcl_db=$GITHUB_WORKSPACE/ipld-ethcl-db/ > ./config.sh
- echo vulcanize_ipld_ethcl_indexer=$GITHUB_WORKSPACE/ipld-ethcl-indexer >> ./config.sh
- echo ethcl_capture_mode=boot >> ./config.sh
- cat ./config.sh
-
- - name: Run docker compose
- run: |
- docker-compose \
- -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ethcl-db.yml" \
- -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
- --env-file ./config.sh \
- up -d --build
-
- - uses: actions/setup-go@v3
- with:
- go-version: ">=1.17.0"
- check-latest: true
-
- - name: Install packages
- run: |
- go install github.com/onsi/ginkgo/v2/ginkgo@latest
- which ginkgo
-
- - name: Run the tests using Make
- run: |
- cd ipld-ethcl-indexer
- make integration-test-ci
-
- golangci:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/setup-go@v3
- with:
- go-version: ">=1.17.0"
- - uses: actions/checkout@v3
- - name: golangci-lint
- uses: golangci/golangci-lint-action@v3
- with:
- args: --timeout 90s
+ trigger-tests:
+ if: github.event_name != 'schedule'
+ uses: ./.github/workflows/generic-testing.yml
+ with:
+ stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
+ ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
+ ssz-data-ref: ${{ github.event.inputs.ssz-data-ref }}
+ secrets:
+ GHA_KEY: ${{secrets.GHA_KEY}}
+ system-testing:
+ uses: ./.github/workflows/system-tests.yml
+ with:
+ stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
+ ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
+ secrets:
+ GHA_KEY: ${{secrets.GHA_KEY}}
+ BC_ADDRESS: ${{secrets.BC_ADDRESS}}
diff --git a/.github/workflows/on-publish.yml b/.github/workflows/on-publish.yml
index 801a1c9..7ed6b38 100644
--- a/.github/workflows/on-publish.yml
+++ b/.github/workflows/on-publish.yml
@@ -3,9 +3,28 @@ on:
release:
types: [published, edited]
jobs:
+ trigger-tests:
+ uses: ./.github/workflows/generic-testing.yml
+ with:
+ stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
+ ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
+ ssz-data-ref: ${{ github.event.inputs.ssz-data-ref }}
+ secrets:
+ GHA_KEY: ${{secrets.GHA_KEY}}
+ system-testing:
+ uses: ./.github/workflows/system-tests.yml
+ with:
+ stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
+ ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
+ secrets:
+ GHA_KEY: ${{secrets.GHA_KEY}}
+ BC_ADDRESS: ${{secrets.BC_ADDRESS}}
build:
name: Run docker build
runs-on: ubuntu-latest
+ needs:
+ - trigger-tests
+ - system-testing
steps:
- uses: actions/checkout@v2
- name: Get the version
@@ -14,11 +33,11 @@ jobs:
- name: Run docker build
run: make docker-build
- name: Tag docker image
- run: docker tag vulcanize/ipld-ethcl-indexer docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}}
+ run: docker tag vulcanize/ipld-eth-beacon-indexer docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}}
- name: Docker Login
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
- name: Docker Push
- run: docker push docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}}
+ run: docker push docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}}
push_to_registries:
name: Push Docker image to Docker Hub
runs-on: ubuntu-latest
@@ -32,10 +51,10 @@ jobs:
- name: Docker Login to Github Registry
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
- name: Docker Pull
- run: docker pull docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}}
+ run: docker pull docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}}
- name: Docker Login to Docker Registry
run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
- name: Tag docker image
- run: docker tag docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}} vulcanize/ipld-ethcl-indexer:${{steps.vars.outputs.tag}}
+ run: docker tag docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}} vulcanize/ipld-eth-beacon-indexer:${{steps.vars.outputs.tag}}
- name: Docker Push to Docker Hub
- run: docker push vulcanize/ipld-ethcl-indexer:${{steps.vars.outputs.tag}}
+ run: docker push vulcanize/ipld-eth-beacon-indexer:${{steps.vars.outputs.tag}}
diff --git a/.github/workflows/system-tests.yml b/.github/workflows/system-tests.yml
new file mode 100644
index 0000000..c1be739
--- /dev/null
+++ b/.github/workflows/system-tests.yml
@@ -0,0 +1,95 @@
+name: System Testing for the stack.
+on:
+ workflow_call:
+ inputs:
+ stack-orchestrator-ref:
+ required: false
+ type: string
+ ipld-eth-beacon-db-ref:
+ required: false
+ type: string
+ secrets:
+ GHA_KEY:
+ required: true
+ BC_ADDRESS:
+ required: true
+env:
+ stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || '7fb664270a0ba09e2caa3095e8c91f3fdb5b38af' }}
+ ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref || '3dfe416302d553f8240f6051c08a7899b0e39e12' }}
+ GOPATH: /tmp/go
+ bc_protocol: "http"
+ bc_address: ${{secrets.BC_ADDRESS}}
+ bc_port: 5052
+ db_host: localhost
+ db_port: 8076
+ db_name: vulcanize_testing
+ db_user: vdbm
+ db_password: password
+ db_driver: "pgx"
+
+jobs:
+ system-testing:
+ runs-on: self-hosted
+ steps:
+ - name: Create GOPATH
+ run: mkdir -p /tmp/go
+
+ - uses: actions/checkout@v2
+ with:
+ path: "./ipld-eth-beacon-indexer"
+
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ env.stack-orchestrator-ref }}
+ path: "./stack-orchestrator/"
+ repository: vulcanize/stack-orchestrator
+ fetch-depth: 0
+
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ env.ipld-eth-beacon-db-ref }}
+ repository: vulcanize/ipld-eth-beacon-db
+ path: "./ipld-eth-beacon-db/"
+ ssh-key: ${{secrets.GHA_KEY}}
+ fetch-depth: 0
+
+ - name: Create config file
+ run: |
+ echo vulcanize_ipld_eth_beacon_db=$(pwd)/ipld-eth-beacon-db > ./config.sh
+ cat ./config.sh
+
+ - name: Run docker compose
+ id: compose
+ shell: bash
+ run: |
+ ls "./stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml"
+ whoami
+ /usr/local/bin/docker-compose \
+ -f "./stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
+ --env-file ./config.sh \
+ up -d --build
+
+ - uses: actions/setup-go@v3
+ with:
+ go-version: ">=1.18.0"
+ check-latest: true
+
+ - name: Install packages
+ run: |
+ go install github.com/onsi/ginkgo/v2/ginkgo@latest
+ which ginkgo
+
+ - name: Run the tests using Make
+ run: |
+ sleep 20
+ cd ipld-eth-beacon-indexer
+ make system-test-ci
+
+ - name: Clean up the docker containers
+ if: always() && steps.compose.outcome == 'success'
+ shell: bash
+ run: |
+ /usr/local/bin/docker-compose \
+ -f "./stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
+ --env-file ./config.sh \
+ down -v
diff --git a/.gitignore b/.gitignore
index 1a3bc3a..90badf2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,5 +4,7 @@ ipld-ethcl-indexer.log
report.json
cover.profile
temp/*
+.vscode/*
pkg/beaconclient/ssz-data/
-*.test
\ No newline at end of file
+*.test
+ipld-eth-beacon-indexer.log
diff --git a/Dockerfile b/Dockerfile
index a9a2c58..908313d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,6 @@
FROM golang:1.18-alpine as builder
-WORKDIR /go/src/github.com/vulcanize/ipld-ethcl-indexer
+WORKDIR /go/src/github.com/vulcanize/ipld-eth-beacon-indexer
RUN apk --no-cache add ca-certificates make git g++ linux-headers
ENV GO111MODULE=on
@@ -9,12 +9,12 @@ COPY go.sum .
RUN go mod tidy; go mod download
COPY . .
-RUN GCO_ENABLED=0 GOOS=linux go build -race -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-ethcl-indexer .
-RUN chmod +x ipld-ethcl-indexer
+RUN GCO_ENABLED=0 GOOS=linux go build -race -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-eth-beacon-indexer .
+RUN chmod +x ipld-eth-beacon-indexer
FROM frolvlad/alpine-bash:latest
RUN apk --no-cache add ca-certificates
WORKDIR /root/
-COPY --from=builder /go/src/github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer /root/ipld-ethcl-indexer
+COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer /root/ipld-eth-beacon-indexer
ADD entrypoint.sh .
ENTRYPOINT ["./entrypoint.sh"]
\ No newline at end of file
diff --git a/Makefile b/Makefile
index e9ed9b4..4306273 100644
--- a/Makefile
+++ b/Makefile
@@ -80,6 +80,24 @@ unit-test-ci:
--cover --coverprofile=cover.profile \
--trace --json-report=report.json
+.PHONY: system-test-ci
+system-test-ci:
+ go vet ./...
+ go fmt ./...
+ $(GINKGO) -r --label-filter system \
+ --randomize-all --randomize-suites \
+ --fail-on-pending --keep-going \
+ --cover --coverprofile=cover.profile \
+ --trace --json-report=report.json
+
+.PHONY: system-test-local
+system-test-local:
+ go vet ./...
+ go fmt ./...
+ $(GINKGO) -r --label-filter system \
+ --randomize-all --randomize-suites \
+ --fail-on-pending --keep-going \
+ --trace
.PHONY: build
build:
@@ -89,4 +107,4 @@ build:
## Build docker image
.PHONY: docker-build
docker-build:
- docker build -t vulcanize/ipld-ethcl-indexer .
\ No newline at end of file
+ docker build -t vulcanize/ipld-eth-beacon-indexer .
\ No newline at end of file
diff --git a/README.md b/README.md
index cf285e2..98beecf 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-- [ipld-ethcl-indexer](#ipld-ethcl-indexer)
+- [ipld-eth-beacon-indexer](#ipld-eth-beacon-indexer)
- [Running the Application](#running-the-application)
- [Development Patterns](#development-patterns)
- [Logging](#logging)
@@ -8,7 +8,7 @@
Table of contents generated with markdown-toc
-# ipld-ethcl-indexer
+# ipld-eth-beacon-indexer
This application will capture all the `BeaconState`'s and `SignedBeaconBlock`'s from the consensus chain on Ethereum. This application is going to connect to the lighthouse client, but hypothetically speaking, it should be interchangeable with any eth2 beacon node.
@@ -22,12 +22,12 @@ To run the application, do as follows:
1. Setup the prerequisite applications.
a. Run a beacon client (such as lighthouse).
- b. Run a postgres DB for ethcl.
+ b. Run a postgres DB for eth-beacon.
c. You can utilize the `stack-orchestrator` [repository](https://github.com/vulcanize/stack-orchestrato).
```
./wrapper.sh -e skip \
- -d ../docker/local/docker-compose-ethcl-db.yml \
+ -d ../docker/local/docker-compose-ipld-eth-beacon-db.yml \
-d ../docker/latest/docker-compose-lighthouse.yml \
-v remove \
-p ../local-config.sh
@@ -37,19 +37,7 @@ To run the application, do as follows:
2. Run the start up command.
```
-go run -race main.go capture head --db.address localhost \
- --db.password password \
- --db.port 8076 \
- --db.username vdbm \
- --db.name vulcanize_testing \
- --db.driver PGX \
- --bc.address localhost \
- --bc.port 5052 \
- --bc.connectionProtocol http \
- --t.skipSync=true \
- --log.level info \
- --log.output=true \
- --kg.increment 100
+go run -race main.go capture full --config ./example.ipld-eth-beacon-indexer-config.json
```
## Running Tests
diff --git a/application_component.md b/application_component.md
index d4cba2f..05b5bb2 100644
--- a/application_component.md
+++ b/application_component.md
@@ -51,4 +51,4 @@ This package contains useful functions for logging.
## `internal/shutdown`
-This package is used to shutdown the `ipld-ethcl-indexer`. It calls the `pkg/gracefulshutdown` package.
+This package is used to shutdown the `ipld-eth-beacon-indexer`. It calls the `pkg/gracefulshutdown` package.
diff --git a/cmd/boot.go b/cmd/boot.go
index 9cab893..63e15b9 100644
--- a/cmd/boot.go
+++ b/cmd/boot.go
@@ -23,9 +23,10 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
- "github.com/vulcanize/ipld-ethcl-indexer/internal/boot"
- "github.com/vulcanize/ipld-ethcl-indexer/internal/shutdown"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
+ "github.com/spf13/viper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
)
// bootCmd represents the boot command
@@ -44,9 +45,11 @@ func bootApp() {
log.Info("Starting the application in boot mode.")
ctx := context.Background()
- BC, DB, err := boot.BootApplicationWithRetry(ctx, dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, testDisregardSync)
+ Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
+ viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
+ viper.GetInt("kg.increment"), "boot", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"))
if err != nil {
- loghelper.LogError(err).Error("Unable to Start application")
+ StopApplicationPreBoot(err, Db)
}
log.Info("Boot complete, we are going to shutdown.")
@@ -57,11 +60,11 @@ func bootApp() {
notifierCh <- syscall.SIGTERM
}()
- err = shutdown.ShutdownServices(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC)
+ err = shutdown.ShutdownBoot(ctx, notifierCh, maxWaitSecondsShutdown, Db, Bc)
if err != nil {
- loghelper.LogError(err).Error("Ungracefully Shutdown ipld-ethcl-indexer!")
+ loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
} else {
- log.Info("Gracefully shutdown ipld-ethcl-indexer")
+ log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
}
}
diff --git a/cmd/capture.go b/cmd/capture.go
index e8da70b..f5693a6 100644
--- a/cmd/capture.go
+++ b/cmd/capture.go
@@ -17,6 +17,7 @@
package cmd
import (
+ "fmt"
"os"
"time"
@@ -25,19 +26,30 @@ import (
)
var (
- dbUsername string
- dbPassword string
- dbName string
- dbAddress string
- dbDriver string
- dbPort int
- bcAddress string
- bcPort int
- bcConnectionProtocol string
- bcType string
- maxWaitSecondsShutdown time.Duration = time.Duration(5) * time.Second
- notifierCh chan os.Signal = make(chan os.Signal, 1)
- testDisregardSync bool
+ dbUsername string
+ dbPassword string
+ dbName string
+ dbAddress string
+ dbDriver string
+ dbPort int
+ bcAddress string
+ bcPort int
+ bcBootRetryInterval int
+ bcBootMaxRetry int
+ bcConnectionProtocol string
+ bcType string
+ bcMaxHistoricProcessWorker int
+ bcUniqueNodeIdentifier int
+ bcCheckDb bool
+ kgMaxWorker int
+ kgTableIncrement int
+ kgProcessGaps bool
+ pmMetrics bool
+ pmAddress string
+ pmPort int
+ maxWaitSecondsShutdown time.Duration = time.Duration(20) * time.Second
+ notifierCh chan os.Signal = make(chan os.Signal, 1)
+ testDisregardSync bool
)
// captureCmd represents the capture command
@@ -62,35 +74,50 @@ func init() {
captureCmd.PersistentFlags().StringVarP(&dbName, "db.name", "n", "", "Database name connect to DB(required)")
captureCmd.PersistentFlags().StringVarP(&dbDriver, "db.driver", "", "", "Database Driver to connect to DB(required)")
captureCmd.PersistentFlags().IntVarP(&dbPort, "db.port", "", 0, "Port to connect to DB(required)")
- err := captureCmd.MarkPersistentFlagRequired("db.username")
- exitErr(err)
- err = captureCmd.MarkPersistentFlagRequired("db.password")
- exitErr(err)
- err = captureCmd.MarkPersistentFlagRequired("db.address")
- exitErr(err)
- err = captureCmd.MarkPersistentFlagRequired("db.port")
- exitErr(err)
- err = captureCmd.MarkPersistentFlagRequired("db.name")
- exitErr(err)
- err = captureCmd.MarkPersistentFlagRequired("db.driver")
- exitErr(err)
+ //err := captureCmd.MarkPersistentFlagRequired("db.username")
+ // exitErr(err)
+ // err = captureCmd.MarkPersistentFlagRequired("db.password")
+ // exitErr(err)
+ // err = captureCmd.MarkPersistentFlagRequired("db.address")
+ // exitErr(err)
+ // err = captureCmd.MarkPersistentFlagRequired("db.port")
+ // exitErr(err)
+ // err = captureCmd.MarkPersistentFlagRequired("db.name")
+ // exitErr(err)
+ // err = captureCmd.MarkPersistentFlagRequired("db.driver")
+ // exitErr(err)
//// Beacon Client Specific
captureCmd.PersistentFlags().StringVarP(&bcAddress, "bc.address", "l", "", "Address to connect to beacon node (required)")
captureCmd.PersistentFlags().StringVarP(&bcType, "bc.type", "", "lighthouse", "The beacon client we are using, options are prysm and lighthouse.")
captureCmd.PersistentFlags().IntVarP(&bcPort, "bc.port", "r", 0, "Port to connect to beacon node (required )")
captureCmd.PersistentFlags().StringVarP(&bcConnectionProtocol, "bc.connectionProtocol", "", "http", "protocol for connecting to the beacon node.")
- err = captureCmd.MarkPersistentFlagRequired("bc.address")
- exitErr(err)
- err = captureCmd.MarkPersistentFlagRequired("bc.port")
- exitErr(err)
+ captureCmd.PersistentFlags().IntVarP(&bcBootRetryInterval, "bc.bootRetryInterval", "", 30, "The amount of time to wait between retries while booting the application")
+ captureCmd.PersistentFlags().IntVarP(&bcBootMaxRetry, "bc.bootMaxRetry", "", 5, "The amount of time to wait between retries while booting the application")
+ captureCmd.PersistentFlags().IntVarP(&bcMaxHistoricProcessWorker, "bc.maxHistoricProcessWorker", "", 30, "The number of workers that should be actively processing slots from the eth-beacon.historic_process table. Be careful of system memory.")
+ captureCmd.PersistentFlags().IntVarP(&bcUniqueNodeIdentifier, "bc.uniqueNodeIdentifier", "", 0, "The unique identifier of this application. Each application connecting to the DB should have a unique identifier.")
+ captureCmd.PersistentFlags().BoolVarP(&bcCheckDb, "bc.checkDb", "", true, "Should we check to see if the slot exists in the DB before writing it?")
+ // err = captureCmd.MarkPersistentFlagRequired("bc.address")
+ // exitErr(err)
+ // err = captureCmd.MarkPersistentFlagRequired("bc.port")
+ // exitErr(err)
+
+ //// Known Gaps specific
+ captureCmd.PersistentFlags().BoolVarP(&kgProcessGaps, "kg.processKnownGaps", "", true, "Should we process the slots within the eth-beacon.known_gaps table.")
+ captureCmd.PersistentFlags().IntVarP(&kgTableIncrement, "kg.increment", "", 10000, "The max slots within a single entry to the known_gaps table.")
+ captureCmd.PersistentFlags().IntVarP(&kgMaxWorker, "kg.maxKnownGapsWorker", "", 30, "The number of workers that should be actively processing slots from the eth-beacon.known_gaps table. Be careful of system memory.")
+
+ // Prometheus Specific
+ captureCmd.PersistentFlags().BoolVarP(&pmMetrics, "pm.metrics", "", true, "Should we capture prometheus metrics.")
+ captureCmd.PersistentFlags().StringVarP(&pmAddress, "pm.address", "", "localhost", "Address to send the prometheus metrics.")
+ captureCmd.PersistentFlags().IntVarP(&pmPort, "pm.port", "", 9000, "The port to send prometheus metrics.")
//// Testing Specific
captureCmd.PersistentFlags().BoolVar(&testDisregardSync, "t.skipSync", false, "Should we disregard the head sync?")
// Bind Flags with Viper
//// DB Flags
- err = viper.BindPFlag("db.username", captureCmd.PersistentFlags().Lookup("db.username"))
+ err := viper.BindPFlag("db.username", captureCmd.PersistentFlags().Lookup("db.username"))
exitErr(err)
err = viper.BindPFlag("db.password", captureCmd.PersistentFlags().Lookup("db.password"))
exitErr(err)
@@ -100,14 +127,14 @@ func init() {
exitErr(err)
err = viper.BindPFlag("db.name", captureCmd.PersistentFlags().Lookup("db.name"))
exitErr(err)
+ err = viper.BindPFlag("db.driver", captureCmd.PersistentFlags().Lookup("db.driver"))
+ exitErr(err)
+
+ //// Testing Specific
err = viper.BindPFlag("t.skipSync", captureCmd.PersistentFlags().Lookup("t.skipSync"))
exitErr(err)
- // Testing Specific
- err = viper.BindPFlag("t.driver", captureCmd.PersistentFlags().Lookup("db.driver"))
- exitErr(err)
-
- // LH specific
+ //// LH specific
err = viper.BindPFlag("bc.address", captureCmd.PersistentFlags().Lookup("bc.address"))
exitErr(err)
err = viper.BindPFlag("bc.type", captureCmd.PersistentFlags().Lookup("bc.type"))
@@ -116,14 +143,40 @@ func init() {
exitErr(err)
err = viper.BindPFlag("bc.connectionProtocol", captureCmd.PersistentFlags().Lookup("bc.connectionProtocol"))
exitErr(err)
+ err = viper.BindPFlag("bc.bootRetryInterval", captureCmd.PersistentFlags().Lookup("bc.bootRetryInterval"))
+ exitErr(err)
+ err = viper.BindPFlag("bc.bootMaxRetry", captureCmd.PersistentFlags().Lookup("bc.bootMaxRetry"))
+ exitErr(err)
+ err = viper.BindPFlag("bc.maxHistoricProcessWorker", captureCmd.PersistentFlags().Lookup("bc.maxHistoricProcessWorker"))
+ exitErr(err)
+ err = viper.BindPFlag("bc.uniqueNodeIdentifier", captureCmd.PersistentFlags().Lookup("bc.uniqueNodeIdentifier"))
+ exitErr(err)
+ err = viper.BindPFlag("bc.checkDb", captureCmd.PersistentFlags().Lookup("bc.checkDb"))
+ exitErr(err)
// Here you will define your flags and configuration settings.
+ //// Known Gap Specific
+ err = viper.BindPFlag("kg.processKnownGaps", captureCmd.PersistentFlags().Lookup("kg.processKnownGaps"))
+ exitErr(err)
+ err = viper.BindPFlag("kg.increment", captureCmd.PersistentFlags().Lookup("kg.increment"))
+ exitErr(err)
+ err = viper.BindPFlag("kg.processKnownGaps", captureCmd.PersistentFlags().Lookup("kg.maxKnownGapsWorker"))
+ exitErr(err)
+
+ // Prometheus Specific
+ err = viper.BindPFlag("pm.metrics", captureCmd.PersistentFlags().Lookup("pm.metrics"))
+ exitErr(err)
+ err = viper.BindPFlag("pm.address", captureCmd.PersistentFlags().Lookup("pm.address"))
+ exitErr(err)
+ err = viper.BindPFlag("pm.port", captureCmd.PersistentFlags().Lookup("pm.port"))
+ exitErr(err)
}
// Helper function to catch any errors.
// We need to capture these errors for the linter.
func exitErr(err error) {
if err != nil {
+ fmt.Println("Error: ", err)
os.Exit(1)
}
}
diff --git a/cmd/full.go b/cmd/full.go
new file mode 100644
index 0000000..0c4b9d2
--- /dev/null
+++ b/cmd/full.go
@@ -0,0 +1,118 @@
+// VulcanizeDB
+// Copyright © 2022 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package cmd
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ log "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
+ "golang.org/x/sync/errgroup"
+)
+
+// fullCmd represents the full command
+var fullCmd = &cobra.Command{
+ Use: "full",
+ Short: "Capture all components of the application (head and historical)",
+ Long: `Capture all components of the application (head and historical`,
+ Run: func(cmd *cobra.Command, args []string) {
+ startFullProcessing()
+ },
+}
+
+func init() {
+ captureCmd.AddCommand(fullCmd)
+
+ // Here you will define your flags and configuration settings.
+
+ // Cobra supports Persistent Flags which will work for this command
+ // and all subcommands, e.g.:
+ // fullCmd.PersistentFlags().String("foo", "", "A help for foo")
+
+ // Cobra supports local flags which will only run when this command
+ // is called directly, e.g.:
+ // fullCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
+}
+
+// Start the application to track at head and historical processing.
+func startFullProcessing() {
+ // Boot the application
+ log.Info("Starting the application in head tracking mode.")
+ ctx := context.Background()
+
+ Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
+ viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
+ viper.GetInt("kg.increment"), "head", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"))
+ if err != nil {
+ StopApplicationPreBoot(err, Db)
+ }
+
+ if viper.GetBool("pm.metrics") {
+ addr := viper.GetString("pm.address") + ":" + strconv.Itoa(viper.GetInt("pm.port"))
+ serveProm(addr)
+ }
+
+ log.Info("The Beacon Client has booted successfully!")
+ // Capture head blocks
+ go Bc.CaptureHead()
+
+ hpContext, hpCancel := context.WithCancel(context.Background())
+
+ errG, _ := errgroup.WithContext(context.Background())
+ errG.Go(func() error {
+ errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"))
+ if len(errs) != 0 {
+ if len(errs) != 0 {
+ log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
+ return fmt.Errorf("Application ended because there were too many error when attempting to process historic")
+ }
+ }
+ return nil
+ })
+ kgCtx, KgCancel := context.WithCancel(context.Background())
+ if viper.GetBool("kg.processKnownGaps") {
+ go func() {
+ errG := new(errgroup.Group)
+ errG.Go(func() error {
+ errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"))
+ if len(errs) != 0 {
+ log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
+ return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
+ }
+ return nil
+ })
+ if err := errG.Wait(); err != nil {
+ loghelper.LogError(err).Error("Error with knownGaps processing")
+ }
+ }()
+ }
+
+ // Shutdown when the time is right.
+ err = shutdown.ShutdownFull(ctx, KgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc)
+ if err != nil {
+ loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
+ } else {
+ log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
+ }
+
+}
diff --git a/cmd/head.go b/cmd/head.go
index 9e95f9f..ba70f8c 100644
--- a/cmd/head.go
+++ b/cmd/head.go
@@ -18,18 +18,18 @@ package cmd
import (
"context"
- "os"
+ "fmt"
+ "net/http"
+ "strconv"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
- "github.com/vulcanize/ipld-ethcl-indexer/internal/boot"
- "github.com/vulcanize/ipld-ethcl-indexer/internal/shutdown"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
-)
-
-var (
- kgTableIncrement int
+ "github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
+ "golang.org/x/sync/errgroup"
)
// headCmd represents the head command
@@ -48,34 +48,65 @@ func startHeadTracking() {
log.Info("Starting the application in head tracking mode.")
ctx := context.Background()
- BC, DB, err := boot.BootApplicationWithRetry(ctx, dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, testDisregardSync)
+ Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
+ viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
+ viper.GetInt("kg.increment"), "head", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"))
if err != nil {
- loghelper.LogError(err).Error("Unable to Start application")
- if DB != nil {
- DB.Close()
- }
- os.Exit(1)
+ StopApplicationPreBoot(err, Db)
+ }
+
+ if viper.GetBool("pm.metrics") {
+ addr := viper.GetString("pm.address") + ":" + strconv.Itoa(viper.GetInt("pm.port"))
+ serveProm(addr)
}
log.Info("The Beacon Client has booted successfully!")
// Capture head blocks
- go BC.CaptureHead(kgTableIncrement)
+ go Bc.CaptureHead()
+ kgCtx, KgCancel := context.WithCancel(context.Background())
+ if viper.GetBool("kg.processKnownGaps") {
+ go func() {
+ errG := new(errgroup.Group)
+ errG.Go(func() error {
+ errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"))
+ if len(errs) != 0 {
+ log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
+ return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
+ }
+ return nil
+ })
+ if err := errG.Wait(); err != nil {
+ loghelper.LogError(err).Error("Error with knownGaps processing")
+ }
+ }()
+ }
// Shutdown when the time is right.
- err = shutdown.ShutdownServices(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC)
+ err = shutdown.ShutdownHeadTracking(ctx, KgCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc)
if err != nil {
- loghelper.LogError(err).Error("Ungracefully Shutdown ipld-ethcl-indexer!")
+ loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
} else {
- log.Info("Gracefully shutdown ipld-ethcl-indexer")
+ log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
}
}
func init() {
captureCmd.AddCommand(headCmd)
-
- // Known Gaps specific
- captureCmd.PersistentFlags().IntVarP(&kgTableIncrement, "kg.increment", "", 10000, "The max slots within a single entry to the known_gaps table.")
- err := viper.BindPFlag("kg.increment", captureCmd.PersistentFlags().Lookup("kg.increment"))
- exitErr(err)
+}
+
+// Start prometheus server
+func serveProm(addr string) {
+ mux := http.NewServeMux()
+ mux.Handle("/metrics", promhttp.Handler())
+
+ srv := http.Server{
+ Addr: addr,
+ Handler: mux,
+ }
+ go func() {
+ if err := srv.ListenAndServe(); err != nil {
+ loghelper.LogError(err).WithField("endpoint", addr).Error("Error with prometheus")
+ }
+ }()
}
diff --git a/cmd/historic.go b/cmd/historic.go
index 2211503..1c6b653 100644
--- a/cmd/historic.go
+++ b/cmd/historic.go
@@ -17,9 +17,19 @@
package cmd
import (
+ "context"
"fmt"
+ "os"
+ "strconv"
+ log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
+ "github.com/spf13/viper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
+ "golang.org/x/sync/errgroup"
)
// historicCmd represents the historic command
@@ -28,10 +38,69 @@ var historicCmd = &cobra.Command{
Short: "Capture the historic blocks and states.",
Long: `Capture the historic blocks and states.`,
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("historic called")
+ startHistoricProcessing()
},
}
+// Start the application to process historical slots.
+func startHistoricProcessing() {
+ // Boot the application
+ log.Info("Starting the application in head tracking mode.")
+ ctx := context.Background()
+
+ Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
+ viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
+ viper.GetInt("kg.increment"), "historic", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"))
+ if err != nil {
+ StopApplicationPreBoot(err, Db)
+ }
+
+ if viper.GetBool("pm.metrics") {
+ addr := viper.GetString("pm.address") + ":" + strconv.Itoa(viper.GetInt("pm.port"))
+ serveProm(addr)
+ }
+
+ hpContext, hpCancel := context.WithCancel(context.Background())
+
+ errG, _ := errgroup.WithContext(context.Background())
+ errG.Go(func() error {
+ errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"))
+ if len(errs) != 0 {
+ if len(errs) != 0 {
+ log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
+ return fmt.Errorf("Application ended because there were too many error when attempting to process historic")
+ }
+ }
+ return nil
+ })
+
+ kgContext, kgCancel := context.WithCancel(context.Background())
+ if viper.GetBool("kg.processKnownGaps") {
+ go func() {
+ errG := new(errgroup.Group)
+ errG.Go(func() error {
+ errs := Bc.ProcessKnownGaps(kgContext, viper.GetInt("kg.maxKnownGapsWorker"))
+ if len(errs) != 0 {
+ log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
+ return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
+ }
+ return nil
+ })
+ if err := errG.Wait(); err != nil {
+ loghelper.LogError(err).Error("Error with knownGaps processing")
+ }
+ }()
+ }
+
+ // Shutdown when the time is right.
+ err = shutdown.ShutdownHistoricProcessing(ctx, kgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc)
+ if err != nil {
+ loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
+ } else {
+ log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
+ }
+}
+
func init() {
captureCmd.AddCommand(historicCmd)
@@ -45,3 +114,12 @@ func init() {
// is called directly, e.g.:
// historicCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
+
+// Stop the application during its initial boot phases.
+func StopApplicationPreBoot(startErr error, db sql.Database) {
+ loghelper.LogError(startErr).Error("Unable to Start application")
+ if db != nil {
+ db.Close()
+ }
+ os.Exit(1)
+}
diff --git a/cmd/root.go b/cmd/root.go
index 2625dbe..0073871 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -32,7 +32,7 @@ var (
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
- Use: "ipld-ethcl-indexer",
+ Use: "ipld-eth-beacon-indexer",
Short: "This application will keep track of all BeaconState's and SignedBeaconBlock's on the Beacon Chain.",
Long: `This is an application that will capture the BeaconState's and SignedBeaconBlock's on the Beacon Chain.
It can either do this will keeping track of head, or backfilling historic data.`,
@@ -47,6 +47,7 @@ It can either do this will keeping track of head, or backfilling historic data.`
func Execute() {
err := rootCmd.Execute()
if err != nil {
+ fmt.Println("Err when executing rootCmd", err)
os.Exit(1)
}
}
@@ -126,9 +127,9 @@ func init() {
// will be global for your application.
// Optional Flags
- rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.ipld-ethcl-indexer.yaml)")
+ rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.ipld-eth-beacon-indexer.yaml)")
rootCmd.PersistentFlags().String("log.level", log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
- rootCmd.PersistentFlags().String("log.file", "ipld-ethcl-indexer.log", "file path for logging")
+ rootCmd.PersistentFlags().String("log.file", "ipld-eth-beacon-indexer.log", "file path for logging")
rootCmd.PersistentFlags().Bool("log.output", true, "Should we log to STDOUT")
rootCmd.PersistentFlags().String("log.format", "json", "json or text")
@@ -159,10 +160,10 @@ func initConfig() {
home, err := os.UserHomeDir()
cobra.CheckErr(err)
- // Search config in home directory with name ".ipld-ethcl-indexer" (without extension).
+ // Search config in home directory with name ".ipld-eth-beacon-indexer" (without extension).
viper.AddConfigPath(home)
viper.SetConfigType("yaml")
- viper.SetConfigName(".ipld-ethcl-indexer")
+ viper.SetConfigName(".ipld-eth-beacon-indexer")
}
viper.AutomaticEnv() // read in environment variables that match
diff --git a/cmd/version.go b/cmd/version.go
index ff10f6e..5aaf719 100644
--- a/cmd/version.go
+++ b/cmd/version.go
@@ -20,7 +20,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
- v "github.com/vulcanize/ipld-ethcl-indexer/pkg/version"
+ v "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/version"
)
var (
@@ -47,7 +47,7 @@ to quickly create a Cobra application.`,
Patch: Patch,
Meta: Meta,
}
- log.Infof("ipld-ethcl-indexer version: %s", version.GetVersionWithMeta())
+ log.Infof("ipld-eth-beacon-indexer version: %s", version.GetVersionWithMeta())
fmt.Println(version.GetVersionWithMeta())
},
}
diff --git a/config/cicd/boot.ipld-eth-beacon-indexer.json b/config/cicd/boot.ipld-eth-beacon-indexer.json
new file mode 100644
index 0000000..b10cc13
--- /dev/null
+++ b/config/cicd/boot.ipld-eth-beacon-indexer.json
@@ -0,0 +1,40 @@
+{
+ "db": {
+ "address": "ipld-eth-beacon-db",
+ "password": "password",
+ "port": 5432,
+ "username": "vdbm",
+ "name": "vulcanize_testing",
+ "driver": "PGX"
+ },
+ "bc": {
+ "address": "lighthouse",
+ "port": 5052,
+ "type": "lighthouse",
+ "bootRetryInterval": 30,
+ "bootMaxRetry": 5,
+ "maxHistoricProcessWorker": 2,
+ "connectionProtocol": "http",
+ "uniqueNodeIdentifier": 100,
+ "checkDb": true
+ },
+ "t": {
+ "skipSync": true
+ },
+ "log": {
+ "level": "debug",
+ "output": true,
+ "file": "./ipld-eth-beacon-indexer.log",
+ "format": "json"
+ },
+ "kg": {
+ "increment": 10000,
+ "processKnownGaps": true,
+ "maxKnownGapsWorker": 2
+ },
+ "pm": {
+ "address": "localhost",
+ "port": 9000,
+ "metrics": true
+ }
+}
diff --git a/config/example.ipld-eth-beacon-indexer-config.json b/config/example.ipld-eth-beacon-indexer-config.json
new file mode 100644
index 0000000..7481284
--- /dev/null
+++ b/config/example.ipld-eth-beacon-indexer-config.json
@@ -0,0 +1,40 @@
+{
+ "db": {
+ "address": "localhost",
+ "password": "password",
+ "port": 8076,
+ "username": "vdbm",
+ "name": "vulcanize_testing",
+ "driver": "PGX"
+ },
+ "bc": {
+ "address": "localhost",
+ "port": 5052,
+ "type": "lighthouse",
+ "bootRetryInterval": 30,
+ "bootMaxRetry": 5,
+ "maxHistoricProcessWorker": 2,
+ "connectionProtocol": "http",
+ "uniqueNodeIdentifier": 100,
+ "checkDb": true
+ },
+ "t": {
+ "skipSync": true
+ },
+ "log": {
+ "level": "debug",
+ "output": true,
+ "file": "./ipld-eth-beacon-indexer.log",
+ "format": "json"
+ },
+ "kg": {
+ "increment": 10000,
+ "processKnownGaps": true,
+ "maxKnownGapsWorker": 2
+ },
+ "pm": {
+ "address": "localhost",
+ "port": 9000,
+ "metrics": true
+ }
+}
diff --git a/entrypoint.sh b/entrypoint.sh
index 6dc5eb6..13da8da 100755
--- a/entrypoint.sh
+++ b/entrypoint.sh
@@ -1,39 +1,18 @@
#!/bin/bash
sleep 10
-echo "Starting ipld-ethcl-indexer"
+echo "Starting ipld-eth-beacon-indexer"
-echo /root/ipld-ethcl-indexer capture ${CAPTURE_MODE} --db.address $DB_ADDRESS \
- --db.password $DB_PASSWORD \
- --db.port $DB_PORT \
- --db.username $DB_USER \
- --db.name $DB_NAME \
- --db.driver $DB_DRIVER \
- --bc.address $BC_ADDRESS \
- --bc.port $BC_PORT \
- --log.level $LOG_LEVEL\
- --t.skipSync=$SKIP_SYNC \
- --kg.increment $KNOWN_GAP_INCREMENT
-
-/root/ipld-ethcl-indexer capture ${CAPTURE_MODE} --db.address $DB_ADDRESS \
- --db.password $DB_PASSWORD \
- --db.port $DB_PORT \
- --db.username $DB_USER \
- --db.name $DB_NAME \
- --db.driver $DB_DRIVER \
- --bc.address $BC_ADDRESS \
- --bc.port $BC_PORT \
- --log.level $LOG_LEVEL \
- --t.skipSync=$SKIP_SYNC \
- --kg.increment $KNOWN_GAP_INCREMENT
+echo /root/ipld-eth-beacon-indexer capture ${CAPTURE_MODE} --config /root/ipld-eth-beacon-config.json
+/root/ipld-eth-beacon-indexer capture ${CAPTURE_MODE} --config /root/ipld-eth-beacon-config.json
rv=$?
if [ $rv != 0 ]; then
- echo "ipld-ethcl-indexer startup failed"
+ echo "ipld-eth-beacon-indexer startup failed"
echo 1 > /root/HEALTH
else
- echo "ipld-ethcl-indexer startup succeeded"
+ echo "ipld-eth-beacon-indexer startup succeeded"
echo 0 > /root/HEALTH
fi
diff --git a/go.mod b/go.mod
index 5bb8589..f3082d4 100644
--- a/go.mod
+++ b/go.mod
@@ -1,4 +1,4 @@
-module github.com/vulcanize/ipld-ethcl-indexer
+module github.com/vulcanize/ipld-eth-beacon-indexer
go 1.18
@@ -9,6 +9,7 @@ require (
github.com/multiformats/go-multihash v0.1.0
github.com/onsi/ginkgo/v2 v2.1.4
github.com/onsi/gomega v1.19.0
+ github.com/prometheus/client_golang v1.12.1
github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc
github.com/sirupsen/logrus v1.8.1
)
@@ -57,7 +58,6 @@ require (
github.com/multiformats/go-varint v0.0.6 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
diff --git a/internal/boot/boot.go b/internal/boot/boot.go
index 7edeafa..b407562 100644
--- a/internal/boot/boot.go
+++ b/internal/boot/boot.go
@@ -18,19 +18,18 @@ package boot
import (
"context"
"fmt"
+ "strings"
"time"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql/postgres"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql/postgres"
)
var (
- maxRetry = 5 // Max times to try to connect to the DB or BC at boot.
- retryInterval = 30 // The time to wait between each try.
- DB sql.Database = &postgres.DB{}
- BC *beaconclient.BeaconClient = &beaconclient.BeaconClient{}
+ DB sql.Database = &postgres.DB{}
+ BC *beaconclient.BeaconClient = &beaconclient.BeaconClient{}
)
// This function will perform some boot operations. If any steps fail, the application will fail to start.
@@ -42,14 +41,18 @@ var (
// 2. Connect to the database.
//
// 3. Make sure the node is synced, unless disregardSync is true.
-func BootApplication(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string, bcAddress string, bcPort int, bcConnectionProtocol string, disregardSync bool) (*beaconclient.BeaconClient, sql.Database, error) {
+func BootApplication(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string,
+ bcAddress string, bcPort int, bcConnectionProtocol string, bcKgTableIncrement int, disregardSync bool, uniqueNodeIdentifier int, checkDb bool) (*beaconclient.BeaconClient, sql.Database, error) {
log.Info("Booting the Application")
log.Debug("Creating the Beacon Client")
- BC = beaconclient.CreateBeaconClient(ctx, bcConnectionProtocol, bcAddress, bcPort)
+ Bc, err := beaconclient.CreateBeaconClient(ctx, bcConnectionProtocol, bcAddress, bcPort, bcKgTableIncrement, uniqueNodeIdentifier, checkDb)
+ if err != nil {
+ return Bc, nil, err
+ }
log.Debug("Checking Beacon Client")
- err := BC.CheckBeaconClient()
+ err = Bc.CheckBeaconClient()
if err != nil {
return nil, nil, err
}
@@ -60,40 +63,90 @@ func BootApplication(ctx context.Context, dbHostname string, dbPort int, dbName
return nil, nil, err
}
- BC.Db = DB
+ Bc.Db = DB
var status bool
if !disregardSync {
- status, err = BC.CheckHeadSync()
+ status, err = Bc.CheckHeadSync()
if err != nil {
log.Error("Unable to get the nodes sync status")
- return BC, DB, err
+ return Bc, DB, err
}
if status {
log.Error("The node is still syncing..")
err = fmt.Errorf("The node is still syncing.")
- return BC, DB, err
+ return Bc, DB, err
}
} else {
log.Warn("We are not checking to see if the node has synced to head.")
}
- return BC, DB, nil
+ return Bc, DB, nil
}
// Add retry logic to ensure that we are give the Beacon Client and the DB time to start.
-func BootApplicationWithRetry(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string, bcAddress string, bcPort int, bcConnectionProtocol string, disregardSync bool) (*beaconclient.BeaconClient, sql.Database, error) {
+func BootApplicationWithRetry(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string,
+ bcAddress string, bcPort int, bcConnectionProtocol string, bcType string, bcRetryInterval int, bcMaxRetry int, bcKgTableIncrement int,
+ startUpMode string, disregardSync bool, uniqueNodeIdentifier int, checkDb bool) (*beaconclient.BeaconClient, sql.Database, error) {
var err error
- for i := 0; i < maxRetry; i++ {
- BC, DB, err = BootApplication(ctx, dbHostname, dbPort, dbName, dbUsername, dbPassword, driverName, bcAddress, bcPort, bcConnectionProtocol, disregardSync)
- if err != nil {
- log.WithFields(log.Fields{
- "retryNumber": i,
- "err": err,
- }).Warn("Unable to boot application. Going to try again")
- time.Sleep(time.Duration(retryInterval) * time.Second)
- continue
+
+ if bcMaxRetry < 0 {
+ i := 0
+ for {
+ BC, DB, err = BootApplication(ctx, dbHostname, dbPort, dbName, dbUsername, dbPassword, driverName,
+ bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, disregardSync, uniqueNodeIdentifier, checkDb)
+ if err != nil {
+ log.WithFields(log.Fields{
+ "retryNumber": i,
+ "err": err,
+ }).Warn("Unable to boot application. Going to try again")
+ time.Sleep(time.Duration(bcRetryInterval) * time.Second)
+ i = i + 1
+ continue
+ }
+ break
+ }
+ } else {
+ for i := 0; i < bcMaxRetry; i++ {
+ BC, DB, err = BootApplication(ctx, dbHostname, dbPort, dbName, dbUsername, dbPassword, driverName,
+ bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, disregardSync, uniqueNodeIdentifier, checkDb)
+ if err != nil {
+ log.WithFields(log.Fields{
+ "retryNumber": i,
+ "err": err,
+ }).Warn("Unable to boot application. Going to try again")
+ time.Sleep(time.Duration(bcRetryInterval) * time.Second)
+ continue
+ }
+ break
}
- break
}
+
+ switch strings.ToLower(startUpMode) {
+ case "head":
+ BC.PerformHeadTracking = true
+ case "historic":
+ log.Debug("Performing additional boot steps for historical processing")
+ BC.PerformHistoricalProcessing = true
+ // This field is not currently used.
+ // The idea is, that if we are doing historially processing and we get a slot
+ // greater than this slot, then we would rerun this function.
+ // this would ensure that we have the slots necessary for processing
+ // within the beacon server.
+
+ // We can implement this feature if we notice any errors.
+ headSlot, err := BC.GetLatestSlotInBeaconServer(bcType)
+ if err != nil {
+ return BC, DB, err
+ }
+ BC.UpdateLatestSlotInBeaconServer(int64(headSlot))
+ // Add another switch case for bcType if its ever needed.
+ case "boot":
+ log.Debug("Running application in boot mode.")
+ default:
+ log.WithFields(log.Fields{
+ "startUpMode": startUpMode,
+ }).Error("The startUpMode provided is not handled.")
+ }
+
return BC, DB, err
}
diff --git a/internal/boot/boot_test.go b/internal/boot/boot_test.go
index 21daba1..8cb6977 100644
--- a/internal/boot/boot_test.go
+++ b/internal/boot/boot_test.go
@@ -20,7 +20,7 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
- "github.com/vulcanize/ipld-ethcl-indexer/internal/boot"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
)
var _ = Describe("Boot", func() {
@@ -34,37 +34,57 @@ var _ = Describe("Boot", func() {
bcAddress string = "localhost"
bcPort int = 5052
bcConnectionProtocol string = "http"
+ bcType string = "lighthouse"
+ bcBootRetryInterval int = 1
+ bcBootMaxRetry int = 5
+ bcKgTableIncrement int = 10
+ bcUniqueIdentifier int = 100
+ bcCheckDb bool = false
)
Describe("Booting the application", Label("integration"), func() {
- Context("When the DB and BC are both up and running, and we skip checking for a synced head", func() {
+ Context("When the DB and BC are both up and running, we skip checking for a synced head, and we are processing head", func() {
It("Should connect successfully", func() {
- _, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, true)
+ _, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", true, bcUniqueIdentifier, bcCheckDb)
+ defer db.Close()
+ Expect(err).ToNot(HaveOccurred())
+ })
+ })
+ Context("When the DB and BC are both up and running, we skip checking for a synced head, and we are processing historic ", func() {
+ It("Should connect successfully", func() {
+ _, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "historic", true, bcUniqueIdentifier, bcCheckDb)
defer db.Close()
Expect(err).ToNot(HaveOccurred())
})
})
Context("When the DB and BC are both up and running, and we check for a synced head", func() {
It("Should not connect successfully", func() {
- _, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, false)
+ _, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", false, bcUniqueIdentifier, bcCheckDb)
+ defer db.Close()
+ Expect(err).To(HaveOccurred())
+ })
+ })
+ Context("When the DB and BC are both up and running, we skip checking for a synced head, but the unique identifier is 0", func() {
+ It("Should not connect successfully", func() {
+ _, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", false, 0, bcCheckDb)
defer db.Close()
Expect(err).To(HaveOccurred())
})
})
Context("When the DB is running but not the BC", func() {
It("Should not connect successfully", func() {
- _, _, err := boot.BootApplication(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, true)
+ _, _, err := boot.BootApplication(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb)
Expect(err).To(HaveOccurred())
})
})
Context("When the BC is running but not the DB", func() {
It("Should not connect successfully", func() {
- _, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, true)
+ _, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb)
Expect(err).To(HaveOccurred())
})
})
Context("When neither the BC or DB are running", func() {
It("Should not connect successfully", func() {
- _, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, true)
+ _, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb)
Expect(err).To(HaveOccurred())
})
})
diff --git a/internal/shutdown/shutdown.go b/internal/shutdown/shutdown.go
index 22c7310..13181b4 100644
--- a/internal/shutdown/shutdown.go
+++ b/internal/shutdown/shutdown.go
@@ -20,25 +20,16 @@ import (
"os"
"time"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/gracefulshutdown"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/gracefulshutdown"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
)
// Shutdown all the internal services for the application.
-func ShutdownServices(ctx context.Context, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
- successCh, errCh := gracefulshutdown.Shutdown(ctx, notifierCh, waitTime, map[string]gracefulshutdown.Operation{
- // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
- "beaconClient": func(ctx context.Context) error {
- defer DB.Close()
- err := BC.StopHeadTracking()
- if err != nil {
- loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking")
- }
- return err
- },
- })
+func ShutdownServices(ctx context.Context, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient, shutdownOperations map[string]gracefulshutdown.Operation) error {
+ //successCh, errCh := gracefulshutdown.Shutdown(ctx, notifierCh, waitTime, )
+ successCh, errCh := gracefulshutdown.Shutdown(ctx, notifierCh, waitTime, shutdownOperations)
select {
case <-successCh:
@@ -47,3 +38,82 @@ func ShutdownServices(ctx context.Context, notifierCh chan os.Signal, waitTime t
return err
}
}
+
+// Wrapper function for shutting down the head tracking process.
+func ShutdownHeadTracking(ctx context.Context, kgCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
+ return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{
+ // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
+ "beaconClient": func(ctx context.Context) error {
+ defer DB.Close()
+ err := BC.StopHeadTracking()
+ if err != nil {
+ loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking")
+ }
+ if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) {
+ err = BC.StopKnownGapsProcessing(kgCancel)
+ if err != nil {
+ loghelper.LogError(err).Error("Unable to stop processing known gaps")
+ }
+ }
+ return err
+ },
+ })
+}
+
+// Wrapper function for shutting down the head tracking process.
+func ShutdownHistoricProcessing(ctx context.Context, kgCancel, hpCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
+ return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{
+ // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
+ "beaconClient": func(ctx context.Context) error {
+ defer DB.Close()
+ err := BC.StopHistoric(hpCancel)
+ if err != nil {
+ loghelper.LogError(err).Error("Unable to stop processing historic")
+ }
+ if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) {
+ err = BC.StopKnownGapsProcessing(kgCancel)
+ if err != nil {
+ loghelper.LogError(err).Error("Unable to stop processing known gaps")
+ }
+ }
+ return err
+ },
+ })
+}
+
+// Shutdown the head and historical processing
+func ShutdownFull(ctx context.Context, kgCancel, hpCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
+ return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{
+ // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
+ "beaconClient": func(ctx context.Context) error {
+ defer DB.Close()
+ err := BC.StopHistoric(hpCancel)
+ if err != nil {
+ loghelper.LogError(err).Error("Unable to stop processing historic")
+ }
+ if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) {
+ err = BC.StopKnownGapsProcessing(kgCancel)
+ if err != nil {
+ loghelper.LogError(err).Error("Unable to stop processing known gaps")
+ }
+ }
+ err = BC.StopHeadTracking()
+ if err != nil {
+ loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking")
+ }
+
+ return err
+ },
+ })
+
+}
+
+// Wrapper function for shutting down the application in boot mode.
+func ShutdownBoot(ctx context.Context, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
+ return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{
+ // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
+ "Database": func(ctx context.Context) error {
+ return DB.Close()
+ },
+ })
+}
diff --git a/internal/shutdown/shutdown_test.go b/internal/shutdown/shutdown_test.go
index b26bc7c..97d83af 100644
--- a/internal/shutdown/shutdown_test.go
+++ b/internal/shutdown/shutdown_test.go
@@ -28,44 +28,53 @@ import (
. "github.com/onsi/gomega"
"github.com/r3labs/sse"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/internal/boot"
- "github.com/vulcanize/ipld-ethcl-indexer/internal/shutdown"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/gracefulshutdown"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/gracefulshutdown"
+)
+
+var (
+ dbAddress string = "localhost"
+ dbPort int = 8076
+ dbName string = "vulcanize_testing"
+ dbUsername string = "vdbm"
+ dbPassword string = "password"
+ dbDriver string = "PGX"
+ bcAddress string = "localhost"
+ bcPort int = 5052
+ bcConnectionProtocol string = "http"
+ bcType string = "lighthouse"
+ bcBootRetryInterval int = 1
+ bcBootMaxRetry int = 5
+ bcKgTableIncrement int = 10
+ bcUniqueIdentifier int = 100
+ bcCheckDb bool = false
+ maxWaitSecondsShutdown time.Duration = time.Duration(1) * time.Second
+ DB sql.Database
+ BC *beaconclient.BeaconClient
+ err error
+ ctx context.Context
+ notifierCh chan os.Signal
)
var _ = Describe("Shutdown", func() {
- var (
- dbAddress string = "localhost"
- dbPort int = 8076
- dbName string = "vulcanize_testing"
- dbUsername string = "vdbm"
- dbPassword string = "password"
- dbDriver string = "PGX"
- bcAddress string = "localhost"
- bcPort int = 5052
- bcConnectionProtocol string = "http"
- maxWaitSecondsShutdown time.Duration = time.Duration(1) * time.Second
- DB sql.Database
- BC *beaconclient.BeaconClient
- err error
- ctx context.Context
- notifierCh chan os.Signal
- )
BeforeEach(func() {
ctx = context.Background()
- BC, DB, err = boot.BootApplicationWithRetry(ctx, dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, true)
+ BC, DB, err = boot.BootApplicationWithRetry(ctx, dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress,
+ bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", true, bcUniqueIdentifier, bcCheckDb)
notifierCh = make(chan os.Signal, 1)
Expect(err).To(BeNil())
})
- Describe("Run Shutdown Function,", Label("integration"), func() {
+ Describe("Run Shutdown Function for head tracking,", Label("integration"), func() {
Context("When Channels are empty,", func() {
It("Should Shutdown Successfully.", func() {
go func() {
+ _, cancel := context.WithCancel(context.Background())
log.Debug("Starting shutdown chan")
- err = shutdown.ShutdownServices(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC)
+ err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC)
log.Debug("We have completed the shutdown...")
Expect(err).ToNot(HaveOccurred())
}()
@@ -76,8 +85,9 @@ var _ = Describe("Shutdown", func() {
shutdownCh := make(chan bool)
//log.SetLevel(log.DebugLevel)
go func() {
+ _, cancel := context.WithCancel(context.Background())
log.Debug("Starting shutdown chan")
- err = shutdown.ShutdownServices(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC)
+ err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC)
log.Debug("We have completed the shutdown...")
Expect(err).ToNot(HaveOccurred())
shutdownCh <- true
@@ -110,7 +120,8 @@ var _ = Describe("Shutdown", func() {
//log.SetLevel(log.DebugLevel)
go func() {
log.Debug("Starting shutdown chan")
- err = shutdown.ShutdownServices(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC)
+ _, cancel := context.WithCancel(context.Background())
+ err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC)
log.Debug("We have completed the shutdown...")
Expect(err).To(MatchError(gracefulshutdown.TimeoutErr(maxWaitSecondsShutdown.String())))
shutdownCh <- true
diff --git a/main.go b/main.go
index 2c366f3..a115417 100644
--- a/main.go
+++ b/main.go
@@ -19,7 +19,7 @@ Copyright © 2022 NAME HERE
*/
package main
-import "github.com/vulcanize/ipld-ethcl-indexer/cmd"
+import "github.com/vulcanize/ipld-eth-beacon-indexer/cmd"
func main() {
cmd.Execute()
diff --git a/pkg/beaconclient/beaconclient.go b/pkg/beaconclient/beaconclient.go
index b1a83ea..4f959e3 100644
--- a/pkg/beaconclient/beaconclient.go
+++ b/pkg/beaconclient/beaconclient.go
@@ -18,10 +18,11 @@ package beaconclient
import (
"context"
"fmt"
+ "math/rand"
"github.com/r3labs/sse"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
)
// TODO: Use prysms config values instead of hardcoding them here.
@@ -31,7 +32,8 @@ var (
bcReorgTopicEndpoint = "/eth/v1/events?topics=chain_reorg" // Endpoint used to subscribe to the head of the chain
BcBlockQueryEndpoint = "/eth/v2/beacon/blocks/" // Endpoint to query individual Blocks
BcStateQueryEndpoint = "/eth/v2/debug/beacon/states/" // Endpoint to query individual States
- BcSyncStatusEndpoint = "/eth/v1/node/syncing"
+ BcSyncStatusEndpoint = "/eth/v1/node/syncing" // The endpoint to check to see if the beacon server is still trying to sync to head.
+ LhDbInfoEndpoint = "/lighthouse/database/info" // The endpoint for the LIGHTHOUSE server to get the database information.
BcBlockRootEndpoint = func(slot string) string {
return "/eth/v1/beacon/blocks/" + slot + "/root"
}
@@ -40,33 +42,34 @@ var (
//bcFinalizedTopicEndpoint = "/eth/v1/events?topics=finalized_checkpoint" // Endpoint used to subscribe to the head of the chain
)
-// A structure utilized for keeping track of various metrics. Currently, mostly used in testing.
-type BeaconClientMetrics struct {
- HeadTrackingInserts uint64 // Number of head events we successfully wrote to the DB.
- HeadTrackingReorgs uint64 // Number of reorg events we successfully wrote to the DB.
- HeadTrackingKnownGaps uint64 // Number of known_gaps we successfully wrote to the DB.
- HeadError uint64 // Number of errors that occurred when decoding the head message.
- HeadReorgError uint64 // Number of errors that occurred when decoding the reorg message.
-}
-
// A struct that capture the Beacon Server that the Beacon Client will be interacting with and querying.
type BeaconClient struct {
- Context context.Context // A context generic context with multiple uses.
- ServerEndpoint string // What is the endpoint of the beacon server.
- PerformHistoricalProcessing bool // Should we perform historical processing?
- Db sql.Database // Database object used for reads and writes.
- Metrics *BeaconClientMetrics // An object used to keep track of certain BeaconClient Metrics.
- KnownGapTableIncrement int // The max number of slots within a single known_gaps table entry.
+ Context context.Context // A context generic context with multiple uses.
+ ServerEndpoint string // What is the endpoint of the beacon server.
+ Db sql.Database // Database object used for reads and writes.
+ Metrics *BeaconClientMetrics // An object used to keep track of certain BeaconClient Metrics.
+ KnownGapTableIncrement int // The max number of slots within a single known_gaps table entry.
+ UniqueNodeIdentifier int // The unique identifier within the cluster of this individual node.
+ KnownGapsProcess KnownGapsProcessing // object keeping track of knowngaps processing
+ CheckDb bool // Should we check the DB to see if the slot exists before processing it?
// Used for Head Tracking
+
PerformHeadTracking bool // Should we track head?
StartingSlot int // If we're performing head tracking. What is the first slot we processed.
PreviousSlot int // Whats the previous slot we processed
PreviousBlockRoot string // Whats the previous block root, used to check the next blocks parent.
- CheckKnownGaps bool // Should we check for gaps at start up.
HeadTracking *SseEvents[Head] // Track the head block
ReOrgTracking *SseEvents[ChainReorg] // Track all Reorgs
//FinalizationTracking *SseEvents[FinalizedCheckpoint] // Track all finalization checkpoints
+
+ // Used for Historical Processing
+
+ // The latest available slot within the Beacon Server. We can't query any slot greater than this.
+ // This value is lazily updated. Therefore at times it will be outdated.
+ LatestSlotInBeaconServer int64
+ PerformHistoricalProcessing bool // Should we perform historical processing?
+ HistoricalProcess HistoricProcessing // object keeping track of historical processing
}
// A struct to keep track of relevant the head event topic.
@@ -85,20 +88,30 @@ type SseError struct {
}
// A Function to create the BeaconClient.
-func CreateBeaconClient(ctx context.Context, connectionProtocol string, bcAddress string, bcPort int) *BeaconClient {
+func CreateBeaconClient(ctx context.Context, connectionProtocol string, bcAddress string, bcPort int, bcKgTableIncrement int, uniqueNodeIdentifier int, checkDb bool) (*BeaconClient, error) {
+ if uniqueNodeIdentifier == 0 {
+ uniqueNodeIdentifier := rand.Int()
+ log.WithField("randomUniqueNodeIdentifier", uniqueNodeIdentifier).Warn("No uniqueNodeIdentifier provided, we are going to use a randomly generated one.")
+ }
+
+ metrics, err := CreateBeaconClientMetrics()
+ if err != nil {
+ return nil, err
+ }
+
endpoint := fmt.Sprintf("%s://%s:%d", connectionProtocol, bcAddress, bcPort)
log.Info("Creating the BeaconClient")
return &BeaconClient{
- Context: ctx,
- ServerEndpoint: endpoint,
- HeadTracking: createSseEvent[Head](endpoint, BcHeadTopicEndpoint),
- ReOrgTracking: createSseEvent[ChainReorg](endpoint, bcReorgTopicEndpoint),
- Metrics: &BeaconClientMetrics{
- HeadTrackingInserts: 0,
- HeadTrackingReorgs: 0,
- },
+ Context: ctx,
+ ServerEndpoint: endpoint,
+ KnownGapTableIncrement: bcKgTableIncrement,
+ HeadTracking: createSseEvent[Head](endpoint, BcHeadTopicEndpoint),
+ ReOrgTracking: createSseEvent[ChainReorg](endpoint, bcReorgTopicEndpoint),
+ Metrics: metrics,
+ UniqueNodeIdentifier: uniqueNodeIdentifier,
+ CheckDb: checkDb,
//FinalizationTracking: createSseEvent[FinalizedCheckpoint](endpoint, bcFinalizedTopicEndpoint),
- }
+ }, nil
}
// Create all the channels to handle a SSE events
diff --git a/pkg/beaconclient/capturehead.go b/pkg/beaconclient/capturehead.go
index df70fd0..a0b6e6b 100644
--- a/pkg/beaconclient/capturehead.go
+++ b/pkg/beaconclient/capturehead.go
@@ -21,16 +21,13 @@ import (
"time"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
)
// This function will perform all the heavy lifting for tracking the head of the chain.
-func (bc *BeaconClient) CaptureHead(knownGapsTableIncrement int) {
- bc.KnownGapTableIncrement = knownGapsTableIncrement
+func (bc *BeaconClient) CaptureHead() {
log.Info("We are tracking the head of the chain.")
- //bc.tempHelper()
go bc.handleHead()
- //go bc.handleFinalizedCheckpoint()
go bc.handleReorg()
bc.captureEventTopic()
}
diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go
index 34f3bbf..1c569df 100644
--- a/pkg/beaconclient/capturehead_test.go
+++ b/pkg/beaconclient/capturehead_test.go
@@ -39,17 +39,219 @@ import (
. "github.com/onsi/gomega"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql/postgres"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql/postgres"
+)
+
+var (
+ address string = "localhost"
+ port int = 8080
+ protocol string = "http"
+ dbHost string = "localhost"
+ dbPort int = 8076
+ dbName string = "vulcanize_testing"
+ dbUser string = "vdbm"
+ dbPassword string = "password"
+ dbDriver string = "pgx"
+ bcUniqueIdentifier int = 100
+ dummyParentRoot string = "46f98c08b54a71dfda4d56e29ec3952b8300cd8d6b67a9b6c562ae96a7a25a42"
+ knownGapsTableIncrement int = 100000
+ maxRetry int = 160
+
+ TestEvents = map[string]Message{
+ "0": {
+ HeadMessage: beaconclient.Head{
+ Slot: "0",
+ Block: "0x4d611d5b93fdab69013a7f0a2f961caca0c853f87cfe9595fe50038163079360",
+ State: "0x7e76880eb67bbdc86250aa578958e9d0675e64e714337855204fb5abaaf82c2b",
+ CurrentDutyDependentRoot: "",
+ PreviousDutyDependentRoot: "",
+ EpochTransition: false,
+ ExecutionOptimistic: false,
+ },
+ SignedBeaconBlock: filepath.Join("ssz-data", "0", "signed-beacon-block.ssz"),
+ BeaconState: filepath.Join("ssz-data", "0", "beacon-state.ssz"),
+ CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2GINRRGFSDKYRZGNTGIYLCGY4TAMJTME3WMMDBGJTDSNRRMNQWGYJQMM4DKM3GHA3WGZTFHE2TSNLGMU2TAMBTHAYTMMZQG44TGNRQ",
+ CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPA3WKNZWHA4DAZLCGY3WEYTEMM4DMMRVGBQWCNJXHA4TKODFHFSDANRXGVSTMNDFG4YTIMZTG44DKNJSGA2GMYRVMFRGCYLGHAZGGMTC",
+ CorrectParentRoot: "0x0000000000000000000000000000000000000000000000000000000000000000",
+ CorrectEth1BlockHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ "100-dummy": {
+ HeadMessage: beaconclient.Head{
+ Slot: "100",
+ Block: "04955400371347e26f61d7a4bbda5b23fa0b25d5fc465160f2a92d52a63b919b",
+ State: "36d5c9a129979b4502bd9a06e57a742810ecbc3fa55a0361c0723c92c1782bfa",
+ CurrentDutyDependentRoot: "",
+ PreviousDutyDependentRoot: "",
+ EpochTransition: false,
+ ExecutionOptimistic: false,
+ },
+ TestNotes: "A block that is supposed to replicate slot 100, but contains some dummy test information.",
+ MimicConfig: &MimicConfig{
+ ForkVersion: "phase0",
+ },
+ SignedBeaconBlock: filepath.Join("ssz-data", "100", "signed-beacon-block.ssz"),
+ BeaconState: filepath.Join("ssz-data", "100", "beacon-state.ssz"),
+ },
+ "100-dummy-2": {
+ HeadMessage: beaconclient.Head{
+ Slot: "100",
+ Block: "04955400371347e26f61d7a4bbda5b23fa0b25d5fc465160f2a9aaaaaaaaaaaa",
+ State: "36d5c9a129979b4502bd9a06e57a742810ecbc3fa55a0361c072bbbbbbbbbbbb",
+ CurrentDutyDependentRoot: "",
+ PreviousDutyDependentRoot: "",
+ EpochTransition: false,
+ ExecutionOptimistic: false,
+ },
+ TestNotes: "A block that is supposed to replicate slot 100, but contains some dummy test information.",
+ MimicConfig: &MimicConfig{
+ ForkVersion: "phase0",
+ },
+ SignedBeaconBlock: filepath.Join("ssz-data", "100", "signed-beacon-block.ssz"),
+ BeaconState: filepath.Join("ssz-data", "100", "beacon-state.ssz"),
+ },
+ "102-wrong-ssz-1": {
+ HeadMessage: beaconclient.Head{
+ Slot: "102",
+ Block: "0x46f98c08b54a71dfda4d56e29ec3952b8300cd8d6b67a9b6c562ae96a7a25a42",
+ State: "0x9b20b114c613c1aa462e02d590b3da902b0a1377e938ed0f94dd3491d763ef67",
+ CurrentDutyDependentRoot: "",
+ PreviousDutyDependentRoot: "",
+ EpochTransition: false,
+ ExecutionOptimistic: false,
+ },
+ TestNotes: "A bad block that returns the wrong ssz objects, used for testing incorrect SSZ decoding.",
+ BeaconState: filepath.Join("ssz-data", "102", "signed-beacon-block.ssz"),
+ SignedBeaconBlock: filepath.Join("ssz-data", "102", "beacon-state.ssz"),
+ },
+ "100": {
+ HeadMessage: beaconclient.Head{
+ Slot: "100",
+ Block: "0x582187e97f7520bb69eea014c3834c964c45259372a0eaaea3f032013797996b",
+ State: "0xf286a0379c0386a3c7be28d05d829f8eb7b280cc9ede15449af20ebcd06a7a56",
+ CurrentDutyDependentRoot: "",
+ PreviousDutyDependentRoot: "",
+ EpochTransition: false,
+ ExecutionOptimistic: false,
+ },
+ TestNotes: "An easy to process Phase 0 block",
+ SignedBeaconBlock: filepath.Join("ssz-data", "100", "signed-beacon-block.ssz"),
+ BeaconState: filepath.Join("ssz-data", "100", "beacon-state.ssz"),
+ CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2TQMRRHA3WKOJXMY3TKMRQMJRDMOLFMVQTAMJUMMZTQMZUMM4TMNDDGQ2TENJZGM3TEYJQMVQWCZLBGNTDAMZSGAYTGNZZG44TSNTC",
+ CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBTDEOBWMEYDGNZZMMYDGOBWMEZWGN3CMUZDQZBQGVSDQMRZMY4GKYRXMIZDQMDDMM4WKZDFGE2TINBZMFTDEMDFMJRWIMBWME3WCNJW",
+ CorrectParentRoot: "0x629ae1587895043076500f4f5dcb202a47c2fc95d5b5c548cb83bc97bd2dbfe1",
+ CorrectEth1BlockHash: "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1",
+ },
+ "101": {
+ HeadMessage: beaconclient.Head{
+ Slot: "101",
+ Block: "0xabe1a972e512182d04f0d4a5c9c25f9ee57c2e9d0ff3f4c4c82fd42d13d31083",
+ State: "0xcb04aa2edbf13c7bb7e7bd9b621ced6832e0075e89147352eac3019a824ce847",
+ CurrentDutyDependentRoot: "",
+ PreviousDutyDependentRoot: "",
+ EpochTransition: false,
+ ExecutionOptimistic: false,
+ },
+ TestNotes: "An easy to process Phase 0 block",
+ SignedBeaconBlock: filepath.Join("ssz-data", "101", "signed-beacon-block.ssz"),
+ BeaconState: filepath.Join("ssz-data", "101", "beacon-state.ssz"),
+ CorrectEth1BlockHash: "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1",
+ CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPBQWEZJRME4TOMTFGUYTEMJYGJSDANDGGBSDIYJVMM4WGMRVMY4WKZJVG5RTEZJZMQYGMZRTMY2GGNDDHAZGMZBUGJSDCM3EGMYTAOBT",
+ CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBRWEMBUMFQTEZLEMJTDCM3DG5RGEN3FG5RGIOLCGYZDCY3FMQ3DQMZSMUYDANZVMU4DSMJUG4ZTKMTFMFRTGMBRHFQTQMRUMNSTQNBX",
+ },
+ "2375703-dummy": {
+ HeadMessage: beaconclient.Head{
+ Slot: "2375703",
+ Block: "c9fb337b62e2a0dae4f27ab49913132570f7f2cab3f23ad99f4d07508a8e648e",
+ State: "0299a145bcda2c8f5e7d2e068ee101861edbee2ec1db2d5e1d850b0d265aef5f",
+ CurrentDutyDependentRoot: "",
+ PreviousDutyDependentRoot: "",
+ EpochTransition: false,
+ ExecutionOptimistic: false,
+ },
+ TestNotes: "This is a dummy message that is used for reorgs",
+ MimicConfig: &MimicConfig{
+ ForkVersion: "altair",
+ },
+ SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"),
+ BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"),
+ },
+ "2375703-dummy-2": {
+ HeadMessage: beaconclient.Head{
+ Slot: "2375703",
+ Block: "c9fb337b62e2a0dae4f27ab49913132570f7f2cab3f23ad99f4d07508aaaaaaa",
+ State: "0299a145bcda2c8f5e7d2e068ee101861edbee2ec1db2d5e1d850b0d2bbbbbbb",
+ CurrentDutyDependentRoot: "",
+ PreviousDutyDependentRoot: "",
+ EpochTransition: false,
+ ExecutionOptimistic: false,
+ },
+ TestNotes: "This is a dummy message that is used for reorgs",
+ MimicConfig: &MimicConfig{
+ ForkVersion: "altair",
+ },
+ SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"),
+ BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"),
+ },
+ "2375703": {
+ HeadMessage: beaconclient.Head{
+ Slot: "2375703",
+ Block: "0x4392372c5f6e39499e31bf924388b5815639103149f0f54f8a453773b1802301",
+ State: "0xb6215b560273af63ec7e011572b60ec1ca0b0232f8ff44fcd4ed55c7526e964e",
+ CurrentDutyDependentRoot: "", PreviousDutyDependentRoot: "", EpochTransition: false, ExecutionOptimistic: false},
+ TestNotes: "An easy to process Altair Block",
+ SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"),
+ BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"),
+ CorrectEth1BlockHash: "0xd74b1c60423651624de6bb301ac25808951c167ba6ecdd9b2e79b4315aee8202",
+ CorrectParentRoot: "0x08736ddc20b77f65d1aa6301f7e6e856a820ff3ce6430ed2c3694ae35580e740",
+ CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2DGOJSGM3TEYZVMY3GKMZZGQ4TSZJTGFRGMOJSGQZTQODCGU4DCNJWGM4TCMBTGE2DSZRQMY2TIZRYME2DKMZXG4ZWEMJYGAZDGMBR",
+ CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBRDMMRRGVRDKNRQGI3TGYLGGYZWKYZXMUYDCMJVG4ZGENRQMVRTCY3BGBRDAMRTGJTDQZTGGQ2GMY3EGRSWINJVMM3TKMRWMU4TMNDF",
+ },
+ "3797056": {
+ HeadMessage: beaconclient.Head{
+ Slot: "3797056",
+ Block: "",
+ State: "",
+ CurrentDutyDependentRoot: "", PreviousDutyDependentRoot: "", EpochTransition: false, ExecutionOptimistic: false},
+ TestNotes: "An easy to process Altair Block",
+ // The file below should not exist, this will trigger an error message and 404 response from the mock.
+ SignedBeaconBlock: filepath.Join("ssz-data", "3797056", "should-not-exist.txt"),
+ BeaconState: filepath.Join("ssz-data", "3797056", "beacon-state.ssz"),
+ },
+ }
+ TestConfig = Config{
+ protocol: protocol,
+ address: address,
+ port: port,
+ dummyParentRoot: dummyParentRoot,
+ dbHost: dbHost,
+ dbPort: dbPort,
+ dbName: dbName,
+ dbUser: dbUser,
+ dbPassword: dbPassword,
+ dbDriver: dbDriver,
+ knownGapsTableIncrement: knownGapsTableIncrement,
+ bcUniqueIdentifier: bcUniqueIdentifier,
+ checkDb: true,
+ }
+
+ BeaconNodeTester = TestBeaconNode{
+ TestEvents: TestEvents,
+ TestConfig: TestConfig,
+ }
)
type Message struct {
- HeadMessage beaconclient.Head // The head messsage that will be streamed to the BeaconClient
- TestNotes string // A small explanation of the purpose this structure plays in the testing landscape.
- MimicConfig *MimicConfig // A configuration of parameters that you are trying to
- SignedBeaconBlock string // The file path output of an SSZ encoded SignedBeaconBlock.
- BeaconState string // The file path output of an SSZ encoded BeaconState.
+ HeadMessage beaconclient.Head // The head messsage that will be streamed to the BeaconClient
+ TestNotes string // A small explanation of the purpose this structure plays in the testing landscape.
+ MimicConfig *MimicConfig // A configuration of parameters that you are trying to
+ SignedBeaconBlock string // The file path output of an SSZ encoded SignedBeaconBlock.
+ BeaconState string // The file path output of an SSZ encoded BeaconState.
+ CorrectSignedBeaconBlockMhKey string // The correct MhKey for the signedBeaconBlock
+ CorrectBeaconStateMhKey string // The correct MhKey beaconState
+ CorrectParentRoot string // The correct parent root
+ CorrectEth1BlockHash string // The correct eth1blockHash
}
// A structure that can be utilized to mimic and existing SSZ object but change it ever so slightly.
@@ -59,178 +261,7 @@ type MimicConfig struct {
ForkVersion string // Specify the fork version. This is needed as a workaround to create dummy SignedBeaconBlocks.
}
-var _ = Describe("Capturehead", func() {
-
- var (
- TestConfig Config
- BeaconNodeTester TestBeaconNode
- address string = "localhost"
- port int = 8080
- protocol string = "http"
- TestEvents map[string]Message
- dbHost string = "localhost"
- dbPort int = 8076
- dbName string = "vulcanize_testing"
- dbUser string = "vdbm"
- dbPassword string = "password"
- dbDriver string = "pgx"
- dummyParentRoot string = "46f98c08b54a71dfda4d56e29ec3952b8300cd8d6b67a9b6c562ae96a7a25a42"
- knownGapsTableIncrement int = 100000
- maxRetry int = 60
- )
-
- BeforeEach(func() {
- TestEvents = map[string]Message{
- "100-dummy": {
- HeadMessage: beaconclient.Head{
- Slot: "100",
- Block: "04955400371347e26f61d7a4bbda5b23fa0b25d5fc465160f2a92d52a63b919b",
- State: "36d5c9a129979b4502bd9a06e57a742810ecbc3fa55a0361c0723c92c1782bfa",
- CurrentDutyDependentRoot: "",
- PreviousDutyDependentRoot: "",
- EpochTransition: false,
- ExecutionOptimistic: false,
- },
- TestNotes: "A block that is supposed to replicate slot 100, but contains some dummy test information.",
- MimicConfig: &MimicConfig{
- ForkVersion: "phase0",
- },
- SignedBeaconBlock: filepath.Join("ssz-data", "100", "signed-beacon-block.ssz"),
- BeaconState: filepath.Join("ssz-data", "100", "beacon-state.ssz"),
- },
- "100-dummy-2": {
- HeadMessage: beaconclient.Head{
- Slot: "100",
- Block: "04955400371347e26f61d7a4bbda5b23fa0b25d5fc465160f2a9aaaaaaaaaaaa",
- State: "36d5c9a129979b4502bd9a06e57a742810ecbc3fa55a0361c072bbbbbbbbbbbb",
- CurrentDutyDependentRoot: "",
- PreviousDutyDependentRoot: "",
- EpochTransition: false,
- ExecutionOptimistic: false,
- },
- TestNotes: "A block that is supposed to replicate slot 100, but contains some dummy test information.",
- MimicConfig: &MimicConfig{
- ForkVersion: "phase0",
- },
- SignedBeaconBlock: filepath.Join("ssz-data", "100", "signed-beacon-block.ssz"),
- BeaconState: filepath.Join("ssz-data", "100", "beacon-state.ssz"),
- },
- "102-wrong-ssz-1": {
- HeadMessage: beaconclient.Head{
- Slot: "102",
- Block: "0x46f98c08b54a71dfda4d56e29ec3952b8300cd8d6b67a9b6c562ae96a7a25a42",
- State: "0x9b20b114c613c1aa462e02d590b3da902b0a1377e938ed0f94dd3491d763ef67",
- CurrentDutyDependentRoot: "",
- PreviousDutyDependentRoot: "",
- EpochTransition: false,
- ExecutionOptimistic: false,
- },
- TestNotes: "A bad block that returns the wrong ssz objects, used for testing incorrect SSZ decoding.",
- BeaconState: filepath.Join("ssz-data", "102", "signed-beacon-block.ssz"),
- SignedBeaconBlock: filepath.Join("ssz-data", "102", "beacon-state.ssz"),
- },
- "100": {
- HeadMessage: beaconclient.Head{
- Slot: "100",
- Block: "0x582187e97f7520bb69eea014c3834c964c45259372a0eaaea3f032013797996b",
- State: "0xf286a0379c0386a3c7be28d05d829f8eb7b280cc9ede15449af20ebcd06a7a56",
- CurrentDutyDependentRoot: "",
- PreviousDutyDependentRoot: "",
- EpochTransition: false,
- ExecutionOptimistic: false,
- },
- TestNotes: "An easy to process Phase 0 block",
- SignedBeaconBlock: filepath.Join("ssz-data", "100", "signed-beacon-block.ssz"),
- BeaconState: filepath.Join("ssz-data", "100", "beacon-state.ssz"),
- },
- "101": {
- HeadMessage: beaconclient.Head{
- Slot: "101",
- Block: "0xabe1a972e512182d04f0d4a5c9c25f9ee57c2e9d0ff3f4c4c82fd42d13d31083",
- State: "0xcb04aa2edbf13c7bb7e7bd9b621ced6832e0075e89147352eac3019a824ce847",
- CurrentDutyDependentRoot: "",
- PreviousDutyDependentRoot: "",
- EpochTransition: false,
- ExecutionOptimistic: false,
- },
- TestNotes: "An easy to process Phase 0 block",
- SignedBeaconBlock: filepath.Join("ssz-data", "101", "signed-beacon-block.ssz"),
- BeaconState: filepath.Join("ssz-data", "101", "beacon-state.ssz"),
- },
- "2375703-dummy": {
- HeadMessage: beaconclient.Head{
- Slot: "2375703",
- Block: "c9fb337b62e2a0dae4f27ab49913132570f7f2cab3f23ad99f4d07508a8e648e",
- State: "0299a145bcda2c8f5e7d2e068ee101861edbee2ec1db2d5e1d850b0d265aef5f",
- CurrentDutyDependentRoot: "",
- PreviousDutyDependentRoot: "",
- EpochTransition: false,
- ExecutionOptimistic: false,
- },
- TestNotes: "This is a dummy message that is used for reorgs",
- MimicConfig: &MimicConfig{
- ForkVersion: "altair",
- },
- SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"),
- BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"),
- },
- "2375703-dummy-2": {
- HeadMessage: beaconclient.Head{
- Slot: "2375703",
- Block: "c9fb337b62e2a0dae4f27ab49913132570f7f2cab3f23ad99f4d07508aaaaaaa",
- State: "0299a145bcda2c8f5e7d2e068ee101861edbee2ec1db2d5e1d850b0d2bbbbbbb",
- CurrentDutyDependentRoot: "",
- PreviousDutyDependentRoot: "",
- EpochTransition: false,
- ExecutionOptimistic: false,
- },
- TestNotes: "This is a dummy message that is used for reorgs",
- MimicConfig: &MimicConfig{
- ForkVersion: "altair",
- },
- SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"),
- BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"),
- },
- "2375703": {
- HeadMessage: beaconclient.Head{
- Slot: "2375703",
- Block: "0x4392372c5f6e39499e31bf924388b5815639103149f0f54f8a453773b1802301",
- State: "0xb6215b560273af63ec7e011572b60ec1ca0b0232f8ff44fcd4ed55c7526e964e",
- CurrentDutyDependentRoot: "", PreviousDutyDependentRoot: "", EpochTransition: false, ExecutionOptimistic: false},
- TestNotes: "An easy to process Altair Block",
- SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"),
- BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"),
- },
- "3797056": {
- HeadMessage: beaconclient.Head{
- Slot: "3797056",
- Block: "",
- State: "0xb6215b560273af63ec7e011572b60ec1ca0b0232f8ff44fcd4ed55c7526e964e",
- CurrentDutyDependentRoot: "", PreviousDutyDependentRoot: "", EpochTransition: false, ExecutionOptimistic: false},
- TestNotes: "An easy to process Altair Block",
- SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"),
- BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"),
- },
- }
- TestConfig = Config{
- protocol: protocol,
- address: address,
- port: port,
- dummyParentRoot: dummyParentRoot,
- dbHost: dbHost,
- dbPort: dbPort,
- dbName: dbName,
- dbUser: dbUser,
- dbPassword: dbPassword,
- dbDriver: dbDriver,
- knownGapsTableIncrement: knownGapsTableIncrement,
- }
-
- BeaconNodeTester = TestBeaconNode{
- TestEvents: TestEvents,
- TestConfig: TestConfig,
- }
- })
+var _ = Describe("Capturehead", Label("head"), func() {
Describe("Receiving New Head SSE messages", Label("unit", "behavioral"), func() {
Context("Correctly formatted Phase0 Block", func() {
@@ -239,8 +270,8 @@ var _ = Describe("Capturehead", func() {
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset()
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0)
- validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, "0x629ae1587895043076500f4f5dcb202a47c2fc95d5b5c548cb83bc97bd2dbfe1", "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1", "/blocks/QHVAEQBQGQ4TKNJUGAYDGNZRGM2DOZJSGZTDMMLEG5QTIYTCMRQTKYRSGNTGCMDCGI2WINLGMM2DMNJRGYYGMMTBHEZGINJSME3DGYRZGE4WE")
- validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, "/blocks/QHVAEQRQPBTDEOBWMEYDGNZZMMYDGOBWMEZWGN3CMUZDQZBQGVSDQMRZMY4GKYRXMIZDQMDDMM4WKZDFGE2TINBZMFTDEMDFMJRWIMBWME3WCNJW")
+ validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey)
+ validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey)
})
})
@@ -250,8 +281,8 @@ var _ = Describe("Capturehead", func() {
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset()
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
- validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, "0x83154c692b9cce50bdf56af5a933da0a020ed7ff809a6a8236301094c7f25276", "0xd74b1c60423651624de6bb301ac25808951c167ba6ecdd9b2e79b4315aee8202", "/blocks/QHVAEQRQPA2DGOJSGM3TEYZVMY3GKMZZGQ4TSZJTGFRGMOJSGQZTQODCGU4DCNJWGM4TCMBTGE2DSZRQMY2TIZRYME2DKMZXG4ZWEMJYGAZDGMBR")
- validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, "/blocks/QHVAEQRQPBRDMMRRGVRDKNRQGI3TGYLGGYZWKYZXMUYDCMJVG4ZGENRQMVRTCY3BGBRDAMRTGJTDQZTGGQ2GMY3EGRSWINJVMM3TKMRWMU4TMNDF")
+ validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey)
+ validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey)
})
})
Context("Correctly formatted Altair Test Blocks", func() {
@@ -315,7 +346,7 @@ var _ = Describe("Capturehead", func() {
// })
//})
- Context("When the proper SSZ objects are not served", Label("now"), func() {
+ Context("When the proper SSZ objects are not served", func() {
It("Should return an error, and add the slot to the knownGaps table.", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "101")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
@@ -403,7 +434,7 @@ var _ = Describe("Capturehead", func() {
BeaconNodeTester.testMultipleReorgs(bc, TestEvents["100-dummy"].HeadMessage, TestEvents["100-dummy-2"].HeadMessage, TestEvents["100"].HeadMessage, 3, maxRetry)
})
})
- Context("Altair: Multiple reorgs have occurred on this slot", Label("new"), func() {
+ Context("Altair: Multiple reorgs have occurred on this slot", func() {
It("The previous blocks should be marked as 'forked', the new block should be the only one marked as 'proposed'.", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "2375702")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
@@ -426,6 +457,8 @@ type Config struct {
dbPassword string
dbDriver string
knownGapsTableIncrement int
+ bcUniqueIdentifier int
+ checkDb bool
}
//////////////////////////////////////////////////////
@@ -435,23 +468,25 @@ type Config struct {
// Must run before each test. We can't use the beforeEach because of the way
// Gingko treats race conditions.
func setUpTest(config Config, maxSlot string) *beaconclient.BeaconClient {
- bc := *beaconclient.CreateBeaconClient(context.Background(), config.protocol, config.address, config.port)
+ bc, err := beaconclient.CreateBeaconClient(context.Background(), config.protocol, config.address, config.port, config.knownGapsTableIncrement, config.bcUniqueIdentifier, config.checkDb)
+ Expect(err).ToNot(HaveOccurred())
db, err := postgres.SetupPostgresDb(config.dbHost, config.dbPort, config.dbName, config.dbUser, config.dbPassword, config.dbDriver)
Expect(err).ToNot(HaveOccurred())
// Drop all records from the DB.
- clearEthclDbTables(db)
+ clearEthBeaconDbTables(db)
- // Add an slot to the ethcl.slots table so it we can control how known_gaps are handled.
+ // Add an slot to the eth_beacon.slots table so it we can control how known_gaps are handled.
writeSlot(db, maxSlot)
bc.Db = db
- return &bc
+ return bc
}
-// A helper function to validate the expected output from the ethcl.slots table.
+// A helper function to validate the expected output from the eth_beacon.slots table.
func validateSlot(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctEpoch int, correctStatus string) {
epoch, dbSlot, blockRoot, stateRoot, status := queryDbSlotAndBlock(bc.Db, headMessage.Slot, headMessage.Block)
+ log.Info("validateSlot: ", headMessage)
baseSlot, err := strconv.Atoi(headMessage.Slot)
Expect(err).ToNot(HaveOccurred())
Expect(dbSlot).To(Equal(baseSlot))
@@ -461,27 +496,29 @@ func validateSlot(bc *beaconclient.BeaconClient, headMessage beaconclient.Head,
Expect(status).To(Equal(correctStatus))
}
-// A helper function to validate the expected output from the ethcl.signed_beacon_block table.
+// A helper function to validate the expected output from the eth_beacon.signed_block table.
func validateSignedBeaconBlock(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctParentRoot string, correctEth1BlockHash string, correctMhKey string) {
dbSlot, blockRoot, parentRoot, eth1BlockHash, mhKey := queryDbSignedBeaconBlock(bc.Db, headMessage.Slot, headMessage.Block)
+ log.Info("validateSignedBeaconBlock: ", headMessage)
baseSlot, err := strconv.Atoi(headMessage.Slot)
Expect(err).ToNot(HaveOccurred())
Expect(dbSlot).To(Equal(baseSlot))
Expect(blockRoot).To(Equal(headMessage.Block))
- Expect(parentRoot, correctParentRoot)
- Expect(eth1BlockHash, correctEth1BlockHash)
- Expect(mhKey, correctMhKey)
+ Expect(parentRoot).To(Equal(correctParentRoot))
+ Expect(eth1BlockHash).To(Equal(correctEth1BlockHash))
+ Expect(mhKey).To(Equal(correctMhKey))
}
-// A helper function to validate the expected output from the ethcl.beacon_state table.
+// A helper function to validate the expected output from the eth_beacon.state table.
func validateBeaconState(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctMhKey string) {
dbSlot, stateRoot, mhKey := queryDbBeaconState(bc.Db, headMessage.Slot, headMessage.State)
+ log.Info("validateBeaconState: ", headMessage)
baseSlot, err := strconv.Atoi(headMessage.Slot)
Expect(err).ToNot(HaveOccurred())
Expect(dbSlot).To(Equal(baseSlot))
Expect(stateRoot).To(Equal(headMessage.State))
- Expect(mhKey, correctMhKey)
+ Expect(mhKey).To(Equal(correctMhKey))
}
@@ -491,7 +528,7 @@ func sendHeadMessage(bc *beaconclient.BeaconClient, head beaconclient.Head, maxR
data, err := json.Marshal(head)
Expect(err).ToNot(HaveOccurred())
- startInserts := atomic.LoadUint64(&bc.Metrics.HeadTrackingInserts)
+ startInserts := atomic.LoadUint64(&bc.Metrics.SlotInserts)
bc.HeadTracking.MessagesCh <- &sse.Event{
ID: []byte{},
Data: data,
@@ -499,33 +536,35 @@ func sendHeadMessage(bc *beaconclient.BeaconClient, head beaconclient.Head, maxR
Retry: []byte{},
}
curRetry := 0
- for atomic.LoadUint64(&bc.Metrics.HeadTrackingInserts) != startInserts+expectedSuccessfulInserts {
+ for atomic.LoadUint64(&bc.Metrics.SlotInserts) != startInserts+expectedSuccessfulInserts {
time.Sleep(1 * time.Second)
curRetry = curRetry + 1
if curRetry == maxRetry {
log.WithFields(log.Fields{
- "startInsert": startInserts,
- "currentValue": atomic.LoadUint64(&bc.Metrics.HeadTrackingInserts),
+ "startInsert": startInserts,
+ "expectedSuccessfulInserts": expectedSuccessfulInserts,
+ "currentValue": atomic.LoadUint64(&bc.Metrics.SlotInserts),
}).Error("HeadTracking Insert wasn't incremented properly.")
Fail("Too many retries have occurred.")
}
}
}
-// A helper function to query the ethcl.slots table based on the slot and block_root
+// A helper function to query the eth_beacon.slots table based on the slot and block_root
func queryDbSlotAndBlock(db sql.Database, querySlot string, queryBlockRoot string) (int, int, string, string, string) {
- sqlStatement := `SELECT epoch, slot, block_root, state_root, status FROM ethcl.slots WHERE slot=$1 AND block_root=$2;`
+ sqlStatement := `SELECT epoch, slot, block_root, state_root, status FROM eth_beacon.slots WHERE slot=$1 AND block_root=$2;`
var epoch, slot int
var blockRoot, stateRoot, status string
- row := db.QueryRow(context.Background(), sqlStatement, querySlot, queryBlockRoot)
- err := row.Scan(&epoch, &slot, &blockRoot, &stateRoot, &status)
+ log.Debug("Starting to query the eth_beacon.slots table, ", querySlot, " ", queryBlockRoot)
+ err := db.QueryRow(context.Background(), sqlStatement, querySlot, queryBlockRoot).Scan(&epoch, &slot, &blockRoot, &stateRoot, &status)
Expect(err).ToNot(HaveOccurred())
+ log.Debug("Querying the eth_beacon.slots table complete")
return epoch, slot, blockRoot, stateRoot, status
}
-// A helper function to query the ethcl.signed_beacon_block table based on the slot and block_root.
+// A helper function to query the eth_beacon.signed_block table based on the slot and block_root.
func queryDbSignedBeaconBlock(db sql.Database, querySlot string, queryBlockRoot string) (int, string, string, string, string) {
- sqlStatement := `SELECT slot, block_root, parent_block_root, eth1_block_hash, mh_key FROM ethcl.signed_beacon_block WHERE slot=$1 AND block_root=$2;`
+ sqlStatement := `SELECT slot, block_root, parent_block_root, eth1_block_hash, mh_key FROM eth_beacon.signed_block WHERE slot=$1 AND block_root=$2;`
var slot int
var blockRoot, parent_block_root, eth1_block_hash, mh_key string
row := db.QueryRow(context.Background(), sqlStatement, querySlot, queryBlockRoot)
@@ -534,9 +573,9 @@ func queryDbSignedBeaconBlock(db sql.Database, querySlot string, queryBlockRoot
return slot, blockRoot, parent_block_root, eth1_block_hash, mh_key
}
-// A helper function to query the ethcl.signed_beacon_block table based on the slot and block_root.
+// A helper function to query the eth_beacon.signed_block table based on the slot and block_root.
func queryDbBeaconState(db sql.Database, querySlot string, queryStateRoot string) (int, string, string) {
- sqlStatement := `SELECT slot, state_root, mh_key FROM ethcl.beacon_state WHERE slot=$1 AND state_root=$2;`
+ sqlStatement := `SELECT slot, state_root, mh_key FROM eth_beacon.state WHERE slot=$1 AND state_root=$2;`
var slot int
var stateRoot, mh_key string
row := db.QueryRow(context.Background(), sqlStatement, querySlot, queryStateRoot)
@@ -548,7 +587,7 @@ func queryDbBeaconState(db sql.Database, querySlot string, queryStateRoot string
// Count the entries in the knownGaps table.
func countKnownGapsTable(db sql.Database) int {
var count int
- sqlStatement := "SELECT COUNT(*) FROM ethcl.known_gaps"
+ sqlStatement := "SELECT COUNT(*) FROM eth_beacon.known_gaps"
err := db.QueryRow(context.Background(), sqlStatement).Scan(&count)
Expect(err).ToNot(HaveOccurred())
return count
@@ -556,25 +595,24 @@ func countKnownGapsTable(db sql.Database) int {
// Return the start and end slot
func queryKnownGaps(db sql.Database, queryStartGap string, QueryEndGap string) (int, int) {
- sqlStatement := `SELECT start_slot, end_slot FROM ethcl.known_gaps WHERE start_slot=$1 AND end_slot=$2;`
+ sqlStatement := `SELECT start_slot, end_slot FROM eth_beacon.known_gaps WHERE start_slot=$1 AND end_slot=$2;`
var startGap, endGap int
row := db.QueryRow(context.Background(), sqlStatement, queryStartGap, QueryEndGap)
err := row.Scan(&startGap, &endGap)
Expect(err).ToNot(HaveOccurred())
return startGap, endGap
-
}
-// A function that will remove all entries from the ethcl tables for you.
-func clearEthclDbTables(db sql.Database) {
- deleteQueries := []string{"DELETE FROM ethcl.slots;", "DELETE FROM ethcl.signed_beacon_block;", "DELETE FROM ethcl.beacon_state;", "DELETE FROM ethcl.known_gaps;"}
+// A function that will remove all entries from the eth_beacon tables for you.
+func clearEthBeaconDbTables(db sql.Database) {
+ deleteQueries := []string{"DELETE FROM eth_beacon.slots;", "DELETE FROM eth_beacon.signed_block;", "DELETE FROM eth_beacon.state;", "DELETE FROM eth_beacon.known_gaps;", "DELETE FROM eth_beacon.historic_process;", "DELETE FROM public.blocks;"}
for _, queries := range deleteQueries {
_, err := db.Exec(context.Background(), queries)
Expect(err).ToNot(HaveOccurred())
}
}
-// Write an entry to the ethcl.slots table with just a slot number
+// Write an entry to the eth_beacon.slots table with just a slot number
func writeSlot(db sql.Database, slot string) {
_, err := db.Exec(context.Background(), beaconclient.UpsertSlotsStmt, "0", slot, "", "", "")
Expect(err).ToNot(HaveOccurred())
@@ -650,8 +688,7 @@ func (tbc TestBeaconNode) SetupBeaconNodeMock(TestEvents map[string]Message, pro
id := httpmock.MustGetSubmatch(req, 1)
dat, err := tbc.provideSsz(id, "state", dummyParentRoot)
if err != nil {
- Expect(err).NotTo(HaveOccurred())
- return httpmock.NewStringResponse(404, fmt.Sprintf("Unable to find file for %s", id)), err
+ return httpmock.NewStringResponse(404, fmt.Sprintf("Unable to find file for %s", id)), nil
}
return httpmock.NewBytesResponse(200, dat), nil
},
@@ -664,12 +701,37 @@ func (tbc TestBeaconNode) SetupBeaconNodeMock(TestEvents map[string]Message, pro
id := httpmock.MustGetSubmatch(req, 1)
dat, err := tbc.provideSsz(id, "block", dummyParentRoot)
if err != nil {
- Expect(err).NotTo(HaveOccurred())
- return httpmock.NewStringResponse(404, fmt.Sprintf("Unable to find file for %s", id)), err
+ return httpmock.NewStringResponse(404, fmt.Sprintf("Unable to find file for %s", id)), nil
}
return httpmock.NewBytesResponse(200, dat), nil
},
)
+ // Not needed but could be useful to have.
+ blockRootUrl := `=~^` + protocol + "://" + address + ":" + strconv.Itoa(port) + "/eth/v1/beacon/blocks/" + `([^/]+)` + "/root"
+ httpmock.RegisterResponder("GET", blockRootUrl,
+ func(req *http.Request) (*http.Response, error) {
+ // Get ID from request
+ slot := httpmock.MustGetSubmatch(req, 1)
+ dat, err := tbc.provideBlockRoot(slot)
+ if err != nil {
+ Expect(err).NotTo(HaveOccurred())
+ return httpmock.NewStringResponse(404, fmt.Sprintf("Unable to find block root for %s", slot)), err
+ }
+ return httpmock.NewBytesResponse(200, dat), nil
+ },
+ )
+}
+
+// Provide the Block root
+func (tbc TestBeaconNode) provideBlockRoot(slot string) ([]byte, error) {
+ for _, val := range tbc.TestEvents {
+ if val.HeadMessage.Slot == slot && val.MimicConfig == nil {
+ block, err := hex.DecodeString(val.HeadMessage.Block[2:])
+ Expect(err).ToNot(HaveOccurred())
+ return block, nil
+ }
+ }
+ return nil, fmt.Errorf("Unable to find the Blockroot in test object.")
}
// A function to mimic querying the state from the beacon node. We simply get the SSZ file are return it.
@@ -679,12 +741,12 @@ func (tbc TestBeaconNode) provideSsz(slotIdentifier string, sszIdentifier string
for _, val := range tbc.TestEvents {
if sszIdentifier == "state" {
- if val.HeadMessage.Slot == slotIdentifier || val.HeadMessage.State == slotIdentifier {
+ if (val.HeadMessage.Slot == slotIdentifier && val.MimicConfig == nil) || val.HeadMessage.State == slotIdentifier {
slotFile = val.BeaconState
Message = val
}
} else if sszIdentifier == "block" {
- if val.HeadMessage.Slot == slotIdentifier || val.HeadMessage.Block == slotIdentifier {
+ if (val.HeadMessage.Slot == slotIdentifier && val.MimicConfig == nil) || val.HeadMessage.Block == slotIdentifier {
slotFile = val.SignedBeaconBlock
Message = val
}
@@ -770,16 +832,16 @@ func (tbc TestBeaconNode) provideSsz(slotIdentifier string, sszIdentifier string
// Helper function to test three reorg messages. There are going to be many functions like this,
// Because we need to test the same logic for multiple phases.
func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, thirdHead beaconclient.Head, epoch int, maxRetry int) {
- go bc.CaptureHead(tbc.TestConfig.knownGapsTableIncrement)
+ go bc.CaptureHead()
time.Sleep(1 * time.Second)
- log.Info("Sending Phase0 Messages to BeaconClient")
+ log.Info("Sending Messages to BeaconClient")
sendHeadMessage(bc, firstHead, maxRetry, 1)
sendHeadMessage(bc, secondHead, maxRetry, 1)
sendHeadMessage(bc, thirdHead, maxRetry, 1)
curRetry := 0
- for atomic.LoadUint64(&bc.Metrics.HeadTrackingReorgs) != 2 {
+ for atomic.LoadUint64(&bc.Metrics.ReorgInserts) != 2 {
time.Sleep(1 * time.Second)
curRetry = curRetry + 1
if curRetry == maxRetry {
@@ -810,7 +872,7 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs
}
curRetry = 0
- for atomic.LoadUint64(&bc.Metrics.HeadTrackingReorgs) != 3 {
+ for atomic.LoadUint64(&bc.Metrics.ReorgInserts) != 3 {
time.Sleep(1 * time.Second)
curRetry = curRetry + 1
if curRetry == maxRetry {
@@ -818,7 +880,7 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs
}
}
- if bc.Metrics.HeadTrackingKnownGaps != 0 {
+ if bc.Metrics.KnownGapsInserts != 0 {
Fail("We found gaps when processing a single block")
}
@@ -832,25 +894,25 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs
// A test to validate a single block was processed correctly
func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head beaconclient.Head, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) {
- go bc.CaptureHead(tbc.TestConfig.knownGapsTableIncrement)
+ go bc.CaptureHead()
time.Sleep(1 * time.Second)
sendHeadMessage(bc, head, maxRetry, expectedSuccessInsert)
curRetry := 0
- for atomic.LoadUint64(&bc.Metrics.HeadTrackingKnownGaps) != expectedKnownGaps {
+ for atomic.LoadUint64(&bc.Metrics.KnownGapsInserts) != expectedKnownGaps {
time.Sleep(1 * time.Second)
curRetry = curRetry + 1
if curRetry == maxRetry {
- Fail(fmt.Sprintf("Wrong gap metrics, got: %d, wanted %d", bc.Metrics.HeadTrackingKnownGaps, expectedKnownGaps))
+ Fail(fmt.Sprintf("Wrong gap metrics, got: %d, wanted %d", bc.Metrics.KnownGapsInserts, expectedKnownGaps))
}
}
curRetry = 0
- for atomic.LoadUint64(&bc.Metrics.HeadTrackingReorgs) != expectedReorgs {
+ for atomic.LoadUint64(&bc.Metrics.ReorgInserts) != expectedReorgs {
time.Sleep(1 * time.Second)
curRetry = curRetry + 1
if curRetry == maxRetry {
- Fail(fmt.Sprintf("Wrong reorg metrics, got: %d, wanted %d", bc.Metrics.HeadTrackingKnownGaps, expectedKnownGaps))
+ Fail(fmt.Sprintf("Wrong reorg metrics, got: %d, wanted %d", bc.Metrics.KnownGapsInserts, expectedKnownGaps))
}
}
@@ -862,14 +924,14 @@ func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head b
// A test that ensures that if two HeadMessages occur for a single slot they are marked
// as proposed and forked correctly.
func (tbc TestBeaconNode) testMultipleHead(bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, epoch int, maxRetry int) {
- go bc.CaptureHead(tbc.TestConfig.knownGapsTableIncrement)
+ go bc.CaptureHead()
time.Sleep(1 * time.Second)
sendHeadMessage(bc, firstHead, maxRetry, 1)
sendHeadMessage(bc, secondHead, maxRetry, 1)
curRetry := 0
- for atomic.LoadUint64(&bc.Metrics.HeadTrackingReorgs) != 1 {
+ for atomic.LoadUint64(&bc.Metrics.ReorgInserts) != 1 {
time.Sleep(1 * time.Second)
curRetry = curRetry + 1
if curRetry == maxRetry {
@@ -877,7 +939,7 @@ func (tbc TestBeaconNode) testMultipleHead(bc *beaconclient.BeaconClient, firstH
}
}
- if bc.Metrics.HeadTrackingKnownGaps != 0 {
+ if bc.Metrics.KnownGapsInserts != 0 {
Fail("We found gaps when processing a single block")
}
@@ -889,7 +951,8 @@ func (tbc TestBeaconNode) testMultipleHead(bc *beaconclient.BeaconClient, firstH
// A test that ensures that if two HeadMessages occur for a single slot they are marked
// as proposed and forked correctly.
func (tbc TestBeaconNode) testKnownGapsMessages(bc *beaconclient.BeaconClient, tableIncrement int, expectedEntries uint64, maxRetry int, msg ...beaconclient.Head) {
- go bc.CaptureHead(tableIncrement)
+ bc.KnownGapTableIncrement = tableIncrement
+ go bc.CaptureHead()
time.Sleep(1 * time.Second)
for _, headMsg := range msg {
@@ -897,7 +960,7 @@ func (tbc TestBeaconNode) testKnownGapsMessages(bc *beaconclient.BeaconClient, t
}
curRetry := 0
- for atomic.LoadUint64(&bc.Metrics.HeadTrackingKnownGaps) != expectedEntries {
+ for atomic.LoadUint64(&bc.Metrics.KnownGapsInserts) != expectedEntries {
time.Sleep(1 * time.Second)
curRetry = curRetry + 1
if curRetry == maxRetry {
@@ -909,7 +972,7 @@ func (tbc TestBeaconNode) testKnownGapsMessages(bc *beaconclient.BeaconClient, t
knownGapCount := countKnownGapsTable(bc.Db)
Expect(knownGapCount).To(Equal(int(expectedEntries)))
- if atomic.LoadUint64(&bc.Metrics.HeadTrackingReorgs) != 0 {
+ if atomic.LoadUint64(&bc.Metrics.ReorgInserts) != 0 {
Fail("We found reorgs when we didn't expect it")
}
}
diff --git a/pkg/beaconclient/capturehistoric.go b/pkg/beaconclient/capturehistoric.go
new file mode 100644
index 0000000..a6764db
--- /dev/null
+++ b/pkg/beaconclient/capturehistoric.go
@@ -0,0 +1,171 @@
+// VulcanizeDB
+// Copyright © 2022 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+// This file will call all the functions to start and stop capturing the head of the beacon chain.
+
+package beaconclient
+
+import (
+ "context"
+ "fmt"
+
+ log "github.com/sirupsen/logrus"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
+ "golang.org/x/sync/errgroup"
+)
+
+// This function will perform all the heavy lifting for tracking the head of the chain.
+func (bc *BeaconClient) CaptureHistoric(ctx context.Context, maxWorkers int) []error {
+ log.Info("We are starting the historical processing service.")
+ bc.HistoricalProcess = HistoricProcessing{db: bc.Db, metrics: bc.Metrics, uniqueNodeIdentifier: bc.UniqueNodeIdentifier}
+ errs := handleBatchProcess(ctx, maxWorkers, bc.HistoricalProcess, bc.HistoricalProcess.db, bc.ServerEndpoint, bc.Metrics, bc.CheckDb)
+ log.Debug("Exiting Historical")
+ return errs
+}
+
+// This function will perform all the necessary clean up tasks for stopping historical processing.
+func (bc *BeaconClient) StopHistoric(cancel context.CancelFunc) error {
+ log.Info("We are stopping the historical processing service.")
+ err := bc.HistoricalProcess.releaseDbLocks(cancel)
+ if err != nil {
+ loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the eth_beacon.historic_processing table. Manual Intervention is needed!")
+ }
+ return nil
+}
+
+// An interface to enforce any batch processing. Currently there are two use cases for this.
+//
+// 1. Historic Processing
+//
+// 2. Known Gaps Processing
+type BatchProcessing interface {
+ getSlotRange(context.Context, chan<- slotsToProcess) []error // Write the slots to process in a channel, return an error if you cant get the next slots to write.
+ handleProcessingErrors(context.Context, <-chan batchHistoricError) // Custom logic to handle errors.
+ removeTableEntry(context.Context, <-chan slotsToProcess) error // With the provided start and end slot, remove the entry from the database.
+ releaseDbLocks(context.CancelFunc) error // Update the checked_out column to false for whatever table is being updated.
+}
+
+/// ^^^
+// Might be better to remove the interface and create a single struct that historicalProcessing
+// and knownGapsProcessing can use. The struct would contain all the SQL strings that they need.
+// And the only difference in logic for processing would be within the error handling.
+// Which can be a function we pass into handleBatchProcess()
+
+// A struct to pass around indicating a table entry for slots to process.
+type slotsToProcess struct {
+ startSlot int // The start slot
+ endSlot int // The end slot
+}
+
+type batchHistoricError struct {
+ err error // The error that occurred when attempting to a slot
+ errProcess string // The process that caused the error.
+ slot int // The slot which the error is for.
+}
+
+// Wrapper function for the BatchProcessing interface.
+// This function will take the structure that needs batch processing.
+// It follows a generic format.
+// Get new entries from any given table.
+// 1. Add it to the slotsCh.
+//
+// 2. Run the maximum specified workers to handle individual slots. We need a maximum because we don't want
+// To store too many SSZ objects in memory.
+//
+// 3. Process the slots and send the err to the ErrCh. Each structure can define how it wants its own errors handled.
+//
+// 4. Remove the slot entry from the DB.
+//
+// 5. Handle any errors.
+func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing, db sql.Database, serverEndpoint string, metrics *BeaconClientMetrics, checkDb bool) []error {
+ slotsCh := make(chan slotsToProcess)
+ workCh := make(chan int)
+ processedCh := make(chan slotsToProcess)
+ errCh := make(chan batchHistoricError)
+ finalErrCh := make(chan []error, 1)
+
+ // Start workers
+ for w := 1; w <= maxWorkers; w++ {
+ log.WithFields(log.Fields{"maxWorkers": maxWorkers}).Debug("Starting batch processing workers")
+ go processSlotRangeWorker(ctx, workCh, errCh, db, serverEndpoint, metrics, checkDb)
+ }
+
+ // Process all ranges and send each individual slot to the worker.
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case slots := <-slotsCh:
+ if slots.startSlot > slots.endSlot {
+ log.Error("We received a batch process request where the startSlot is greater than the end slot.")
+ errCh <- batchHistoricError{
+ err: fmt.Errorf("We received a startSlot where the start was greater than the end."),
+ errProcess: "RangeOrder",
+ slot: slots.startSlot,
+ }
+ errCh <- batchHistoricError{
+ err: fmt.Errorf("We received a endSlot where the start was greater than the end."),
+ errProcess: "RangeOrder",
+ slot: slots.endSlot,
+ }
+ } else if slots.startSlot == slots.endSlot {
+ log.WithField("slot", slots.startSlot).Debug("Added new slot to workCh")
+ workCh <- slots.startSlot
+ } else {
+ for i := slots.startSlot; i <= slots.endSlot; i++ {
+ workCh <- i
+ log.WithField("slot", i).Debug("Added new slot to workCh")
+ }
+ processedCh <- slots
+ }
+ }
+
+ }
+ }()
+
+ // Remove entries, end the application if a row cannot be removed..
+ go func() {
+ errG := new(errgroup.Group)
+ errG.Go(func() error {
+ return bp.removeTableEntry(ctx, processedCh)
+ })
+ if err := errG.Wait(); err != nil {
+ finalErrCh <- []error{err}
+ }
+ }()
+ // Process errors from slot processing.
+ go bp.handleProcessingErrors(ctx, errCh)
+
+ // Get slots from the DB.
+ go func() {
+ errs := bp.getSlotRange(ctx, slotsCh) // Periodically adds new entries....
+ if errs != nil {
+ finalErrCh <- errs
+ }
+ finalErrCh <- nil
+ log.Debug("We are stopping the processing of adding new entries")
+ }()
+ log.Debug("Waiting for shutdown signal from channel")
+ select {
+ case <-ctx.Done():
+ log.Debug("Received shutdown signal from channel")
+ return nil
+ case errs := <-finalErrCh:
+ log.Debug("Finishing the batchProcess")
+ return errs
+ }
+}
diff --git a/pkg/beaconclient/capturehistoric_test.go b/pkg/beaconclient/capturehistoric_test.go
new file mode 100644
index 0000000..5571a7e
--- /dev/null
+++ b/pkg/beaconclient/capturehistoric_test.go
@@ -0,0 +1,290 @@
+package beaconclient_test
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/jarcoal/httpmock"
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ log "github.com/sirupsen/logrus"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+)
+
+var (
+ kgCheckCheckedOutStmt = `SELECT * FROM eth_beacon.known_gaps WHERE checked_out=true `
+ hpCheckCheckedOutStmt = `SELECT * FROM eth_beacon.historic_process WHERE checked_out=true `
+)
+
+var _ = Describe("Capturehistoric", func() {
+
+ Describe("Run the application in historic mode", Label("unit", "behavioral", "historical"), func() {
+ Context("Phase0 + Altairs: When we need to process a multiple blocks in a multiple entries in the eth_beacon.historic_process table.", Label("deb"), func() {
+ It("Successfully Process the Blocks", func() {
+ bc := setUpTest(BeaconNodeTester.TestConfig, "99")
+ BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
+ defer httpmock.DeactivateAndReset()
+ BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 101, 10)
+ BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0)
+ // Run Two seperate processes
+ BeaconNodeTester.writeEventToHistoricProcess(bc, 2375703, 2375703, 10)
+ BeaconNodeTester.runHistoricalProcess(bc, 2, 3, 0, 0, 0)
+
+ time.Sleep(2 * time.Second)
+ validatePopularBatchBlocks(bc)
+ })
+ })
+ Context("When the start block is greater than the endBlock", func() {
+ It("Should Add two entries to the knownGaps table", func() {
+ bc := setUpTest(BeaconNodeTester.TestConfig, "99")
+ BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
+ defer httpmock.DeactivateAndReset()
+ BeaconNodeTester.writeEventToHistoricProcess(bc, 101, 100, 10)
+ BeaconNodeTester.runHistoricalProcess(bc, 2, 0, 0, 2, 0)
+ })
+ })
+ Context("Processing the Genesis block", Label("genesis"), func() {
+ It("Should Process properly", func() {
+ bc := setUpTest(BeaconNodeTester.TestConfig, "100")
+ BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
+ defer httpmock.DeactivateAndReset()
+ BeaconNodeTester.writeEventToHistoricProcess(bc, 0, 0, 10)
+ BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0)
+ validateSlot(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, 0, "proposed")
+ validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, BeaconNodeTester.TestEvents["0"].CorrectParentRoot, BeaconNodeTester.TestEvents["0"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["0"].CorrectSignedBeaconBlockMhKey)
+ validateBeaconState(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, BeaconNodeTester.TestEvents["0"].CorrectBeaconStateMhKey)
+ })
+ })
+ Context("When there is a skipped slot", func() {
+ It("Should process the slot properly.", func() {
+ bc := setUpTest(BeaconNodeTester.TestConfig, "3797055")
+ BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
+ defer httpmock.DeactivateAndReset()
+ BeaconNodeTester.writeEventToHistoricProcess(bc, 3797056, 3797056, 10)
+ BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0)
+ validateSlot(bc, BeaconNodeTester.TestEvents["3797056"].HeadMessage, 118658, "skipped")
+ })
+ })
+ })
+ Describe("Running the Application to process Known Gaps", Label("unit", "behavioral", "knownGaps"), func() {
+ Context("Phase0 + Altairs: When we need to process a multiple blocks in a multiple entries in the eth_beacon.known_gaps table.", func() {
+ It("Successfully Process the Blocks", func() {
+ bc := setUpTest(BeaconNodeTester.TestConfig, "99")
+ BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
+ defer httpmock.DeactivateAndReset()
+ BeaconNodeTester.writeEventToKnownGaps(bc, 100, 101)
+ BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 0, 0)
+ // Run Two seperate processes
+ BeaconNodeTester.writeEventToKnownGaps(bc, 2375703, 2375703)
+ BeaconNodeTester.runKnownGapsProcess(bc, 2, 3, 0, 0, 0)
+
+ time.Sleep(2 * time.Second)
+ validatePopularBatchBlocks(bc)
+ })
+ })
+ Context("When the start block is greater than the endBlock", func() {
+ It("Should Add two entries to the knownGaps table", func() {
+ bc := setUpTest(BeaconNodeTester.TestConfig, "104")
+ BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
+ defer httpmock.DeactivateAndReset()
+ BeaconNodeTester.writeEventToKnownGaps(bc, 101, 100)
+ BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 2, 0)
+ })
+ })
+ Context("When theres a reprocessing error", Label("reprocessingError"), func() {
+ It("Should update the reprocessing error.", func() {
+ bc := setUpTest(BeaconNodeTester.TestConfig, "99")
+ BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
+ defer httpmock.DeactivateAndReset()
+ // We dont have an entry in the BeaconNodeTester for this slot
+ BeaconNodeTester.writeEventToHistoricProcess(bc, 105, 105, 10)
+ BeaconNodeTester.runHistoricalProcess(bc, 2, 0, 0, 1, 0)
+ BeaconNodeTester.runKnownGapsProcess(bc, 2, 0, 0, 1, 1)
+ })
+ })
+ })
+ Describe("Running the application in Historic, Head, and KnownGaps mode", Label("unit", "historical", "full"), func() {
+ Context("When it recieves a head, historic and known Gaps message (in order)", func() {
+ It("Should process them all successfully.", func() {
+ bc := setUpTest(BeaconNodeTester.TestConfig, "2375702")
+ BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
+ defer httpmock.DeactivateAndReset()
+ // Head
+ BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
+
+ // Historical
+ BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10)
+ BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0)
+
+ // Known Gaps
+ BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101)
+ BeaconNodeTester.runKnownGapsProcess(bc, 2, 3, 0, 0, 0)
+
+ time.Sleep(2 * time.Second)
+ validatePopularBatchBlocks(bc)
+ })
+ })
+ Context("When it recieves a historic, head and known Gaps message (in order)", func() {
+ It("Should process them all successfully.", func() {
+ bc := setUpTest(BeaconNodeTester.TestConfig, "2375702")
+ BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
+ defer httpmock.DeactivateAndReset()
+ // Historical
+ BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10)
+ BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0)
+
+ // Head
+ BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
+
+ // Known Gaps
+ BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101)
+ BeaconNodeTester.runKnownGapsProcess(bc, 2, 3, 0, 0, 0)
+
+ time.Sleep(2 * time.Second)
+ validatePopularBatchBlocks(bc)
+ })
+ })
+ Context("When it recieves a known Gaps, historic and head message (in order)", func() {
+ It("Should process them all successfully.", func() {
+ bc := setUpTest(BeaconNodeTester.TestConfig, "2375702")
+ BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
+ defer httpmock.DeactivateAndReset()
+ // Known Gaps
+ BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101)
+ BeaconNodeTester.runKnownGapsProcess(bc, 2, 1, 0, 0, 0)
+
+ // Historical
+ BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10)
+ BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0)
+
+ // Head
+ BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
+
+ time.Sleep(2 * time.Second)
+ validatePopularBatchBlocks(bc)
+ })
+ })
+ })
+})
+
+// This function will write an even to the eth_beacon.known_gaps table
+func (tbc TestBeaconNode) writeEventToKnownGaps(bc *beaconclient.BeaconClient, startSlot, endSlot int) {
+ log.Debug("We are writing the necessary events to batch process")
+ insertKnownGapsStmt := `INSERT INTO eth_beacon.known_gaps (start_slot, end_slot)
+ VALUES ($1, $2);`
+ res, err := bc.Db.Exec(context.Background(), insertKnownGapsStmt, startSlot, endSlot)
+ Expect(err).ToNot(HaveOccurred())
+ rows, err := res.RowsAffected()
+ if rows != 1 {
+ Fail("We didnt write...")
+ }
+ Expect(err).ToNot(HaveOccurred())
+}
+
+// This function will write an even to the eth_beacon.known_gaps table
+func (tbc TestBeaconNode) writeEventToHistoricProcess(bc *beaconclient.BeaconClient, startSlot, endSlot, priority int) {
+ log.Debug("We are writing the necessary events to batch process")
+ insertHistoricProcessingStmt := `INSERT INTO eth_beacon.historic_process (start_slot, end_slot, priority)
+ VALUES ($1, $2, $3);`
+ res, err := bc.Db.Exec(context.Background(), insertHistoricProcessingStmt, startSlot, endSlot, priority)
+ Expect(err).ToNot(HaveOccurred())
+ rows, err := res.RowsAffected()
+ if rows != 1 {
+ Fail("We didnt write...")
+ }
+ Expect(err).ToNot(HaveOccurred())
+}
+
+// Start the CaptureHistoric function, and check for the correct inserted slots.
+func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
+ ctx, cancel := context.WithCancel(context.Background())
+ go bc.CaptureHistoric(ctx, maxWorkers)
+ validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
+ log.Debug("Calling the stop function for historical processing..")
+ err := bc.StopHistoric(cancel)
+ time.Sleep(5 * time.Second)
+ Expect(err).ToNot(HaveOccurred())
+ validateAllRowsCheckedOut(bc.Db, hpCheckCheckedOutStmt)
+}
+
+// Wrapper function that processes knownGaps
+func (tbc TestBeaconNode) runKnownGapsProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
+ ctx, cancel := context.WithCancel(context.Background())
+ go bc.ProcessKnownGaps(ctx, maxWorkers)
+ validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
+ err := bc.StopKnownGapsProcessing(cancel)
+ time.Sleep(5 * time.Second)
+ Expect(err).ToNot(HaveOccurred())
+ validateAllRowsCheckedOut(bc.Db, kgCheckCheckedOutStmt)
+}
+
+func validateMetrics(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
+ curRetry := 0
+ value := atomic.LoadUint64(&bc.Metrics.SlotInserts)
+ for value != expectedInserts {
+ time.Sleep(1 * time.Second)
+ curRetry = curRetry + 1
+ if curRetry == maxRetry {
+ Fail(fmt.Sprintf("Too many retries have occurred. The number of inserts expected %d, the number that actually occurred, %d", expectedInserts, atomic.LoadUint64(&bc.Metrics.SlotInserts)))
+ }
+ value = atomic.LoadUint64(&bc.Metrics.SlotInserts)
+ }
+ curRetry = 0
+ value = atomic.LoadUint64(&bc.Metrics.KnownGapsInserts)
+ for value != expectedKnownGaps {
+ time.Sleep(1 * time.Second)
+ curRetry = curRetry + 1
+ if curRetry == maxRetry {
+ Fail(fmt.Sprintf("Too many retries have occurred. The number of knownGaps expected %d, the number that actually occurred, %d", expectedKnownGaps, atomic.LoadUint64(&bc.Metrics.KnownGapsInserts)))
+ }
+ value = atomic.LoadUint64(&bc.Metrics.KnownGapsInserts)
+ }
+ curRetry = 0
+ value = atomic.LoadUint64(&bc.Metrics.KnownGapsReprocessError)
+ for value != expectedKnownGapsReprocessError {
+ time.Sleep(1 * time.Second)
+ curRetry = curRetry + 1
+ if curRetry == maxRetry {
+ Fail(fmt.Sprintf("Too many retries have occurred. The number of knownGapsReprocessingErrors expected %d, the number that actually occurred, %d", expectedKnownGapsReprocessError, value))
+ }
+ log.Debug("&bc.Metrics.KnownGapsReprocessError: ", &bc.Metrics.KnownGapsReprocessError)
+ value = atomic.LoadUint64(&bc.Metrics.KnownGapsReprocessError)
+ }
+ curRetry = 0
+ value = atomic.LoadUint64(&bc.Metrics.ReorgInserts)
+ for value != expectedReorgs {
+ time.Sleep(1 * time.Second)
+ curRetry = curRetry + 1
+ if curRetry == maxRetry {
+ Fail(fmt.Sprintf("Too many retries have occurred. The number of Reorgs expected %d, the number that actually occurred, %d", expectedReorgs, atomic.LoadUint64(&bc.Metrics.ReorgInserts)))
+ }
+ value = atomic.LoadUint64(&bc.Metrics.ReorgInserts)
+ }
+}
+
+// A wrapper function to validate a few popular blocks
+func validatePopularBatchBlocks(bc *beaconclient.BeaconClient) {
+ validateSlot(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, "proposed")
+ validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey)
+ validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey)
+
+ validateSlot(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, "proposed")
+ validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, BeaconNodeTester.TestEvents["100"].HeadMessage.Block, BeaconNodeTester.TestEvents["101"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["101"].CorrectSignedBeaconBlockMhKey)
+ validateBeaconState(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, BeaconNodeTester.TestEvents["101"].CorrectBeaconStateMhKey)
+
+ validateSlot(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, "proposed")
+ validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey)
+ validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey)
+}
+
+// Make sure all rows have checked_out as false.
+func validateAllRowsCheckedOut(db sql.Database, checkStmt string) {
+ res, err := db.Exec(context.Background(), checkStmt)
+ Expect(err).ToNot(HaveOccurred())
+ rows, err := res.RowsAffected()
+ Expect(err).ToNot(HaveOccurred())
+ Expect(rows).To(Equal(int64(0)))
+}
diff --git a/pkg/beaconclient/checkbeaconserverstatus.go b/pkg/beaconclient/checkbeaconserverstatus.go
new file mode 100644
index 0000000..55b3305
--- /dev/null
+++ b/pkg/beaconclient/checkbeaconserverstatus.go
@@ -0,0 +1,207 @@
+// VulcanizeDB
+// Copyright © 2022 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+package beaconclient
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync/atomic"
+
+ log "github.com/sirupsen/logrus"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
+)
+
+var (
+ MissingBeaconServerType error = fmt.Errorf("The beacon server type provided is not handled.")
+ LighthouseMissingSlots error = fmt.Errorf("Anchor is not nil. This means lighthouse has not backfilled all the slots from Genesis to head.")
+)
+
+// The sync response when checking if the node is synced.
+type Sync struct {
+ Data SyncData `json:"data"`
+}
+
+// The sync data
+type SyncData struct {
+ IsSync bool `json:"is_syncing"`
+ HeadSlot string `json:"head_slot"`
+ SyncDistance string `json:"sync_distance"`
+}
+
+// This function will check to see if we are synced up with the head of chain.
+//{"data":{"is_syncing":true,"head_slot":"62528","sync_distance":"3734299"}}
+func (bc BeaconClient) CheckHeadSync() (bool, error) {
+ syncStatus, err := bc.QueryHeadSync()
+ if err != nil {
+ return true, nil
+ }
+ return syncStatus.Data.IsSync, nil
+}
+
+func (bc BeaconClient) QueryHeadSync() (Sync, error) {
+ var syncStatus Sync
+ bcSync := bc.ServerEndpoint + BcSyncStatusEndpoint
+ resp, err := http.Get(bcSync)
+
+ if err != nil {
+ loghelper.LogEndpoint(bcSync).Error("Unable to check the sync status")
+ return syncStatus, err
+ }
+
+ if resp.StatusCode < 200 || resp.StatusCode > 299 {
+ loghelper.LogEndpoint(bcSync).WithFields(log.Fields{"returnCode": resp.StatusCode}).Error("Error when getting the sync status")
+ return syncStatus, fmt.Errorf("Querying the sync status returned a non 2xx status code, code provided: %d", resp.StatusCode)
+ }
+
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return syncStatus, err
+ }
+
+ if err := json.Unmarshal(body, &syncStatus); err != nil {
+ loghelper.LogEndpoint(bcSync).WithFields(log.Fields{
+ "rawMessage": string(body),
+ "err": err,
+ }).Error("Unable to unmarshal sync status")
+ return syncStatus, err
+ }
+ return syncStatus, nil
+}
+
+// The response when checking the lighthouse nodes DB info: /lighthouse/database/info
+type LighthouseDatabaseInfo struct {
+ SchemaVersion int `json:"schema_version"`
+ Config LhDbConfig `json:"config"`
+ Split LhDbSplit `json:"split"`
+ Anchor LhDbAnchor `json:"anchor"`
+}
+
+// The config field within the DatabaseInfo response.
+type LhDbConfig struct {
+ SlotsPerRestorePoint int `json:"slots_per_restore_point"`
+ SlotsPerRestorePointSetExplicitly bool `json:"slots_per_restore_point_set_explicitly"`
+ BlockCacheSize int `json:"block_cache_size"`
+ CompactOnInit bool `json:"compact_on_init"`
+ CompactOnPrune bool `json:"compact_on_prune"`
+}
+
+// The split field within the DatabaseInfo response.
+type LhDbSplit struct {
+ Slot string `json:"slot"`
+ StateRoot string `json:"state_root"`
+}
+
+// The anchor field within the DatabaseInfo response.
+type LhDbAnchor struct {
+ AnchorSlot string `json:"anchor_slot"`
+ OldestBlockSlot string `json:"oldest_block_slot"`
+ OldestBlockParent string `json:"oldest_block_parent"`
+ StateUpperLimit string `json:"state_upper_limit"`
+ StateLowerLimit string `json:"state_lower_limit"`
+}
+
+// This function will notify us what the head slot is.
+func (bc BeaconClient) queryHeadSlotInBeaconServer() (int, error) {
+ syncStatus, err := bc.QueryHeadSync()
+ if err != nil {
+ return 0, nil
+ }
+ headSlot, err := strconv.Atoi(syncStatus.Data.HeadSlot)
+ if err != nil {
+ return 0, nil
+ }
+ return headSlot, nil
+}
+
+// return the lighthouse Database Info
+func (bc BeaconClient) queryLighthouseDbInfo() (LighthouseDatabaseInfo, error) {
+ var dbInfo LighthouseDatabaseInfo
+
+ lhDbInfo := bc.ServerEndpoint + LhDbInfoEndpoint
+ resp, err := http.Get(lhDbInfo)
+
+ if err != nil {
+ loghelper.LogEndpoint(lhDbInfo).Error("Unable to get the lighthouse database information")
+ return dbInfo, err
+ }
+
+ if resp.StatusCode < 200 || resp.StatusCode > 299 {
+ loghelper.LogEndpoint(lhDbInfo).WithFields(log.Fields{"returnCode": resp.StatusCode}).Error("Error when getting the lighthouse database information")
+ return dbInfo, fmt.Errorf("Querying the lighthouse database information returned a non 2xx status code, code provided: %d", resp.StatusCode)
+ }
+
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return dbInfo, err
+ }
+
+ if err := json.Unmarshal(body, &dbInfo); err != nil {
+ loghelper.LogEndpoint(lhDbInfo).WithFields(log.Fields{
+ "rawMessage": string(body),
+ "err": err,
+ }).Error("Unable to unmarshal the lighthouse database information")
+ return dbInfo, err
+ }
+ return dbInfo, nil
+}
+
+// This function will tell us what the latest slot is that the beacon server has available. This is important as
+// it will ensure us that we have all slots prior to the given slot.
+func (bc BeaconClient) GetLatestSlotInBeaconServer(beaconServerType string) (int, error) {
+ switch strings.ToLower(beaconServerType) {
+ case "lighthouse":
+ headSlot, err := bc.queryHeadSlotInBeaconServer()
+ if err != nil {
+ return 0, err
+ }
+ lhDb, err := bc.queryLighthouseDbInfo()
+ if err != nil {
+ return 0, err
+ }
+ if lhDb.Anchor == (LhDbAnchor{}) {
+ //atomic.StoreInt64(&bc.LatestSlotInBeaconServer, int64(headSlot))
+ log.WithFields(log.Fields{
+ "headSlot": headSlot,
+ }).Info("Anchor is nil, the lighthouse client has all the nodes from genesis to head.")
+ return headSlot, nil
+ } else {
+ log.WithFields(log.Fields{
+ "lhDb.Anchor": lhDb.Anchor,
+ }).Info(LighthouseMissingSlots.Error())
+ log.Info("We will add a feature down the road to wait for anchor to be null, if its needed.")
+ return 0, LighthouseMissingSlots
+ }
+ default:
+ log.WithFields(log.Fields{"BeaconServerType": beaconServerType}).Error(MissingBeaconServerType.Error())
+ return 0, MissingBeaconServerType
+ }
+}
+
+// A wrapper function for updating the latest slot.
+func (bc BeaconClient) UpdateLatestSlotInBeaconServer(headSlot int64) {
+ curr := atomic.LoadInt64(&bc.LatestSlotInBeaconServer)
+ log.WithFields(log.Fields{
+ "Previous Latest Slot": curr,
+ "New Latest Slot": headSlot,
+ }).Debug("Swapping Head Slot")
+ atomic.SwapInt64(&bc.LatestSlotInBeaconServer, int64(headSlot))
+}
diff --git a/pkg/beaconclient/checksyncstatus.go b/pkg/beaconclient/checksyncstatus.go
deleted file mode 100644
index 3f88398..0000000
--- a/pkg/beaconclient/checksyncstatus.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// VulcanizeDB
-// Copyright © 2022 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-package beaconclient
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
-
- log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
-)
-
-// The sync response
-type Sync struct {
- Data SyncData `json:"data"`
-}
-
-// The sync data
-type SyncData struct {
- IsSync bool `json:"is_syncing"`
- HeadSlot string `json:"head_slot"`
- SyncDistance string `json:"sync_distance"`
-}
-
-// This function will check to see if we are synced up with the head of chain.
-//{"data":{"is_syncing":true,"head_slot":"62528","sync_distance":"3734299"}}
-func (bc BeaconClient) CheckHeadSync() (bool, error) {
- bcSync := bc.ServerEndpoint + BcSyncStatusEndpoint
- resp, err := http.Get(bcSync)
-
- if err != nil {
- loghelper.LogEndpoint(bcSync).Error("Unable to check the sync status")
- return true, err
- }
-
- if resp.StatusCode < 200 || resp.StatusCode > 299 {
- loghelper.LogEndpoint(bcSync).WithFields(log.Fields{"returnCode": resp.StatusCode}).Error("Error when getting the sync status")
- return true, fmt.Errorf("Querying the sync status returned a non 2xx status code, code provided: %d", resp.StatusCode)
- }
-
- defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return true, err
- }
-
- var syncStatus Sync
- if err := json.Unmarshal(body, &syncStatus); err != nil {
- loghelper.LogEndpoint(bcSync).WithFields(log.Fields{
- "rawMessage": string(body),
- "err": err,
- }).Error("Unable to unmarshal sync status")
- return true, err
- }
-
- return syncStatus.Data.IsSync, nil
-}
diff --git a/pkg/beaconclient/databasewrite.go b/pkg/beaconclient/databasewrite.go
index 84d160e..b8f8aaf 100644
--- a/pkg/beaconclient/databasewrite.go
+++ b/pkg/beaconclient/databasewrite.go
@@ -20,44 +20,63 @@ import (
"fmt"
"strconv"
+ "github.com/jackc/pgx/v4"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
+ "golang.org/x/sync/errgroup"
)
var (
- // Statement to upsert to the ethcl.slots table.
+ // Statement to upsert to the eth_beacon.slots table.
UpsertSlotsStmt string = `
-INSERT INTO ethcl.slots (epoch, slot, block_root, state_root, status)
+INSERT INTO eth_beacon.slots (epoch, slot, block_root, state_root, status)
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
- // Statement to upsert to the ethcl.signed_beacon_blocks table.
+ // Statement to upsert to the eth_beacon.signed_blocks table.
UpsertSignedBeaconBlockStmt string = `
-INSERT INTO ethcl.signed_beacon_block (slot, block_root, parent_block_root, eth1_block_hash, mh_key)
+INSERT INTO eth_beacon.signed_block (slot, block_root, parent_block_root, eth1_block_hash, mh_key)
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
- // Statement to upsert to the ethcl.beacon_state table.
+ // Statement to upsert to the eth_beacon.state table.
UpsertBeaconState string = `
-INSERT INTO ethcl.beacon_state (slot, state_root, mh_key)
+INSERT INTO eth_beacon.state (slot, state_root, mh_key)
VALUES ($1, $2, $3) ON CONFLICT (slot, state_root) DO NOTHING`
// Statement to upsert to the public.blocks table.
UpsertBlocksStmt string = `
INSERT INTO public.blocks (key, data)
VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`
- UpdateForkedStmt string = `UPDATE ethcl.slots
+ UpdateForkedStmt string = `UPDATE eth_beacon.slots
SET status='forked'
WHERE slot=$1 AND block_root<>$2
RETURNING block_root;`
- UpdateProposedStmt string = `UPDATE ethcl.slots
+ UpdateProposedStmt string = `UPDATE eth_beacon.slots
SET status='proposed'
WHERE slot=$1 AND block_root=$2
RETURNING block_root;`
CheckProposedStmt string = `SELECT slot, block_root
- FROM ethcl.slots
+ FROM eth_beacon.slots
WHERE slot=$1 AND block_root=$2;`
+ // Check to see if the slot and block_root exist in eth_beacon.signed_block
+ CheckSignedBeaconBlockStmt string = `SELECT slot, block_root
+ FROM eth_beacon.signed_block
+ WHERE slot=$1 AND block_root=$2`
+ // Check to see if the slot and state_root exist in eth_beacon.state
+ CheckBeaconStateStmt string = `SELECT slot, state_root
+ FROM eth_beacon.state
+ WHERE slot=$1 AND state_root=$2`
+ // Used to get a single slot from the table if it exists
+ QueryBySlotStmt string = `SELECT slot
+ FROM eth_beacon.slots
+ WHERE slot=$1`
// Statement to insert known_gaps. We don't pass in timestamp, we let the server take care of that one.
UpsertKnownGapsStmt string = `
-INSERT INTO ethcl.known_gaps (start_slot, end_slot, checked_out, reprocessing_error, entry_error, entry_process)
+INSERT INTO eth_beacon.known_gaps (start_slot, end_slot, checked_out, reprocessing_error, entry_error, entry_process)
VALUES ($1, $2, $3, $4, $5, $6) on CONFLICT (start_slot, end_slot) DO NOTHING`
- QueryHighestSlotStmt string = "SELECT COALESCE(MAX(slot), 0) FROM ethcl.slots"
+ UpsertKnownGapsErrorStmt string = `
+ UPDATE eth_beacon.known_gaps
+ SET reprocessing_error=$3, priority=priority+1
+ WHERE start_slot=$1 AND end_slot=$2;`
+ // Get the highest slot if one exists
+ QueryHighestSlotStmt string = "SELECT COALESCE(MAX(slot), 0) FROM eth_beacon.slots"
)
// Put all functionality to prepare the write object
@@ -65,6 +84,8 @@ VALUES ($1, $2, $3, $4, $5, $6) on CONFLICT (start_slot, end_slot) DO NOTHING`
// Remove any of it from the processslot file.
type DatabaseWriter struct {
Db sql.Database
+ Tx sql.Tx
+ Ctx context.Context
Metrics *BeaconClientMetrics
DbSlots *DbSlots
DbSignedBeaconBlock *DbSignedBeaconBlock
@@ -75,14 +96,21 @@ type DatabaseWriter struct {
func CreateDatabaseWrite(db sql.Database, slot int, stateRoot string, blockRoot string, parentBlockRoot string,
eth1BlockHash string, status string, rawSignedBeaconBlock []byte, rawBeaconState []byte, metrics *BeaconClientMetrics) (*DatabaseWriter, error) {
+ ctx := context.Background()
+ tx, err := db.Begin(ctx)
+ if err != nil {
+ loghelper.LogError(err).Error("We are unable to Begin a SQL transaction")
+ }
dw := &DatabaseWriter{
Db: db,
+ Tx: tx,
+ Ctx: ctx,
rawBeaconState: rawBeaconState,
rawSignedBeaconBlock: rawSignedBeaconBlock,
Metrics: metrics,
}
dw.prepareSlotsModel(slot, stateRoot, blockRoot, status)
- err := dw.prepareSignedBeaconBlockModel(slot, blockRoot, parentBlockRoot, eth1BlockHash)
+ err = dw.prepareSignedBeaconBlockModel(slot, blockRoot, parentBlockRoot, eth1BlockHash)
if err != nil {
return nil, err
}
@@ -95,7 +123,7 @@ func CreateDatabaseWrite(db sql.Database, slot int, stateRoot string, blockRoot
// Write functions to write each all together...
// Should I do one atomic write?
-// Create the model for the ethcl.slots table
+// Create the model for the eth_beacon.slots table
func (dw *DatabaseWriter) prepareSlotsModel(slot int, stateRoot string, blockRoot string, status string) {
dw.DbSlots = &DbSlots{
Epoch: calculateEpoch(slot, bcSlotsPerEpoch),
@@ -108,7 +136,7 @@ func (dw *DatabaseWriter) prepareSlotsModel(slot int, stateRoot string, blockRoo
}
-// Create the model for the ethcl.signed_beacon_block table.
+// Create the model for the eth_beacon.signed_block table.
func (dw *DatabaseWriter) prepareSignedBeaconBlockModel(slot int, blockRoot string, parentBlockRoot string, eth1BlockHash string) error {
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.BlockRoot))
if err != nil {
@@ -125,7 +153,7 @@ func (dw *DatabaseWriter) prepareSignedBeaconBlockModel(slot int, blockRoot stri
return nil
}
-// Create the model for the ethcl.beacon_state table.
+// Create the model for the eth_beacon.state table.
func (dw *DatabaseWriter) prepareBeaconStateModel(slot int, stateRoot string) error {
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.StateRoot))
if err != nil {
@@ -140,49 +168,66 @@ func (dw *DatabaseWriter) prepareBeaconStateModel(slot int, stateRoot string) er
return nil
}
-// Write all the data for a given slot.
-func (dw *DatabaseWriter) writeFullSlot() error {
+// Add all the data for a given slot to a SQL transaction.
+// Originally it wrote to each table individually.
+func (dw *DatabaseWriter) transactFullSlot() error {
// If an error occurs, write to knownGaps table.
log.WithFields(log.Fields{
"slot": dw.DbSlots.Slot,
}).Debug("Starting to write to the DB.")
- err := dw.writeSlots()
+ err := dw.transactSlots()
if err != nil {
+ loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon.slots table...")
return err
}
+ log.Debug("We finished writing to the eth_beacon.slots table.")
if dw.DbSlots.Status != "skipped" {
- err = dw.writeSignedBeaconBlocks()
+ //errG, _ := errgroup.WithContext(context.Background())
+ //errG.Go(func() error {
+ // return dw.transactSignedBeaconBlocks()
+ //})
+ //errG.Go(func() error {
+ // return dw.transactBeaconState()
+ //})
+ //if err := errG.Wait(); err != nil {
+ // loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon block or state table...")
+ // return err
+ //}
+ // Might want to seperate writing to public.blocks so we can do this concurrently...
+ err := dw.transactSignedBeaconBlocks()
if err != nil {
+ loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon block table...")
return err
}
- err = dw.writeBeaconState()
+ err = dw.transactBeaconState()
if err != nil {
+ loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon state table...")
return err
}
}
- dw.Metrics.IncrementHeadTrackingInserts(1)
+ dw.Metrics.IncrementSlotInserts(1)
return nil
}
-// Write the information for the generic slots table. For now this is only one function.
+// Add data for the eth_beacon.slots table to a transaction. For now this is only one function.
// But in the future if we need to incorporate any FK's or perform any actions to write to the
// slots table we can do it all here.
-func (dw *DatabaseWriter) writeSlots() error {
+func (dw *DatabaseWriter) transactSlots() error {
return dw.upsertSlots()
}
-// Upsert to the ethcl.slots table.
+// Upsert to the eth_beacon.slots table.
func (dw *DatabaseWriter) upsertSlots() error {
- _, err := dw.Db.Exec(context.Background(), UpsertSlotsStmt, dw.DbSlots.Epoch, dw.DbSlots.Slot, dw.DbSlots.BlockRoot, dw.DbSlots.StateRoot, dw.DbSlots.Status)
+ _, err := dw.Tx.Exec(dw.Ctx, UpsertSlotsStmt, dw.DbSlots.Epoch, dw.DbSlots.Slot, dw.DbSlots.BlockRoot, dw.DbSlots.StateRoot, dw.DbSlots.Status)
if err != nil {
- loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the ethcl.slots table")
+ loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the eth_beacon.slots table")
return err
}
return nil
}
-// Write the information for the signed_beacon_block.
-func (dw *DatabaseWriter) writeSignedBeaconBlocks() error {
+// Add the information for the signed_block to a transaction.
+func (dw *DatabaseWriter) transactSignedBeaconBlocks() error {
err := dw.upsertPublicBlocks(dw.DbSignedBeaconBlock.MhKey, dw.rawSignedBeaconBlock)
if err != nil {
return err
@@ -196,7 +241,7 @@ func (dw *DatabaseWriter) writeSignedBeaconBlocks() error {
// Upsert to public.blocks.
func (dw *DatabaseWriter) upsertPublicBlocks(key string, data []byte) error {
- _, err := dw.Db.Exec(context.Background(), UpsertBlocksStmt, key, data)
+ _, err := dw.Tx.Exec(dw.Ctx, UpsertBlocksStmt, key, data)
if err != nil {
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the public.blocks table")
return err
@@ -204,18 +249,18 @@ func (dw *DatabaseWriter) upsertPublicBlocks(key string, data []byte) error {
return nil
}
-// Upsert to the ethcl.signed_beacon_block table.
+// Upsert to the eth_beacon.signed_block table.
func (dw *DatabaseWriter) upsertSignedBeaconBlock() error {
- _, err := dw.Db.Exec(context.Background(), UpsertSignedBeaconBlockStmt, dw.DbSignedBeaconBlock.Slot, dw.DbSignedBeaconBlock.BlockRoot, dw.DbSignedBeaconBlock.ParentBlock, dw.DbSignedBeaconBlock.Eth1BlockHash, dw.DbSignedBeaconBlock.MhKey)
+ _, err := dw.Tx.Exec(dw.Ctx, UpsertSignedBeaconBlockStmt, dw.DbSignedBeaconBlock.Slot, dw.DbSignedBeaconBlock.BlockRoot, dw.DbSignedBeaconBlock.ParentBlock, dw.DbSignedBeaconBlock.Eth1BlockHash, dw.DbSignedBeaconBlock.MhKey)
if err != nil {
- loghelper.LogSlotError(dw.DbSlots.Slot, err).WithFields(log.Fields{"block_root": dw.DbSignedBeaconBlock.BlockRoot}).Error("Unable to write to the slot to the ethcl.signed_beacon_block table")
+ loghelper.LogSlotError(dw.DbSlots.Slot, err).WithFields(log.Fields{"block_root": dw.DbSignedBeaconBlock.BlockRoot}).Error("Unable to write to the slot to the eth_beacon.signed_block table")
return err
}
return nil
}
-// Write the information for the beacon_state.
-func (dw *DatabaseWriter) writeBeaconState() error {
+// Add the information for the state to a transaction.
+func (dw *DatabaseWriter) transactBeaconState() error {
err := dw.upsertPublicBlocks(dw.DbBeaconState.MhKey, dw.rawBeaconState)
if err != nil {
return err
@@ -227,33 +272,33 @@ func (dw *DatabaseWriter) writeBeaconState() error {
return nil
}
-// Upsert to the ethcl.beacon_state table.
+// Upsert to the eth_beacon.state table.
func (dw *DatabaseWriter) upsertBeaconState() error {
- _, err := dw.Db.Exec(context.Background(), UpsertBeaconState, dw.DbBeaconState.Slot, dw.DbBeaconState.StateRoot, dw.DbBeaconState.MhKey)
+ _, err := dw.Tx.Exec(dw.Ctx, UpsertBeaconState, dw.DbBeaconState.Slot, dw.DbBeaconState.StateRoot, dw.DbBeaconState.MhKey)
if err != nil {
- loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the ethcl.beacon_state table")
+ loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the eth_beacon.state table")
return err
}
return nil
}
-// Update a given slot to be marked as forked. Provide the slot and the latest latestBlockRoot.
+// Update a given slot to be marked as forked within a transaction. Provide the slot and the latest latestBlockRoot.
// We will mark all entries for the given slot that don't match the provided latestBlockRoot as forked.
-func writeReorgs(db sql.Database, slot string, latestBlockRoot string, metrics *BeaconClientMetrics) {
+func transactReorgs(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot string, metrics *BeaconClientMetrics) {
slotNum, strErr := strconv.Atoi(slot)
if strErr != nil {
loghelper.LogReorgError(slot, latestBlockRoot, strErr).Error("We can't convert the slot to an int...")
}
- forkCount, err := updateForked(db, slot, latestBlockRoot)
+ forkCount, err := updateForked(tx, ctx, slot, latestBlockRoot)
if err != nil {
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We ran into some trouble while updating all forks.")
- writeKnownGaps(db, 1, slotNum, slotNum, err, "reorg", metrics)
+ transactKnownGaps(tx, ctx, 1, slotNum, slotNum, err, "reorg", metrics)
}
- proposedCount, err := updateProposed(db, slot, latestBlockRoot)
+ proposedCount, err := updateProposed(tx, ctx, slot, latestBlockRoot)
if err != nil {
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We ran into some trouble while trying to update the proposed slot.")
- writeKnownGaps(db, 1, slotNum, slotNum, err, "reorg", metrics)
+ transactKnownGaps(tx, ctx, 1, slotNum, slotNum, err, "reorg", metrics)
}
if forkCount > 0 {
@@ -274,31 +319,39 @@ func writeReorgs(db sql.Database, slot string, latestBlockRoot string, metrics *
loghelper.LogReorg(slot, latestBlockRoot).WithFields(log.Fields{
"proposedCount": proposedCount,
}).Error("Too many rows were marked as proposed!")
- writeKnownGaps(db, 1, slotNum, slotNum, err, "reorg", metrics)
+ transactKnownGaps(tx, ctx, 1, slotNum, slotNum, fmt.Errorf("Too many rows were marked as unproposed."), "reorg", metrics)
} else if proposedCount == 0 {
- var count int
- err := db.QueryRow(context.Background(), CheckProposedStmt, slot, latestBlockRoot).Scan(count)
- if err != nil {
- loghelper.LogReorgError(slot, latestBlockRoot, err).Error("Unable to query proposed rows after reorg.")
- writeKnownGaps(db, 1, slotNum, slotNum, err, "reorg", metrics)
- } else if count != 1 {
- loghelper.LogReorg(slot, latestBlockRoot).WithFields(log.Fields{
- "proposedCount": count,
- }).Warn("The proposed block was not marked as proposed...")
- writeKnownGaps(db, 1, slotNum, slotNum, err, "reorg", metrics)
- } else {
- loghelper.LogReorg(slot, latestBlockRoot).Info("Updated the row that should have been marked as proposed.")
- }
+ transactKnownGaps(tx, ctx, 1, slotNum, slotNum, fmt.Errorf("Unable to find properly proposed row in DB"), "reorg", metrics)
+ loghelper.LogReorg(slot, latestBlockRoot).Info("Updated the row that should have been marked as proposed.")
}
- metrics.IncrementHeadTrackingReorgs(1)
+ metrics.IncrementReorgsInsert(1)
+}
+
+// Wrapper function that will create a transaction and execute the function.
+func writeReorgs(db sql.Database, slot string, latestBlockRoot string, metrics *BeaconClientMetrics) {
+ ctx := context.Background()
+ tx, err := db.Begin(ctx)
+ if err != nil {
+ loghelper.LogReorgError(slot, latestBlockRoot, err).Fatal("Unable to create a new transaction for reorgs")
+ }
+ defer func() {
+ err := tx.Rollback(ctx)
+ if err != nil && err != pgx.ErrTxClosed {
+ loghelper.LogError(err).Error("We were unable to Rollback a transaction for reorgs")
+ }
+ }()
+ transactReorgs(tx, ctx, slot, latestBlockRoot, metrics)
+ if err = tx.Commit(ctx); err != nil {
+ loghelper.LogReorgError(slot, latestBlockRoot, err).Fatal("Unable to execute the transaction for reorgs")
+ }
}
// Update the slots table by marking the old slot's as forked.
-func updateForked(db sql.Database, slot string, latestBlockRoot string) (int64, error) {
- res, err := db.Exec(context.Background(), UpdateForkedStmt, slot, latestBlockRoot)
+func updateForked(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot string) (int64, error) {
+ res, err := tx.Exec(ctx, UpdateForkedStmt, slot, latestBlockRoot)
if err != nil {
- loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the ethcl.slots table with the forked slots")
+ loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the eth_beacon.slots table with the forked slots")
return 0, err
}
count, err := res.RowsAffected()
@@ -309,10 +362,11 @@ func updateForked(db sql.Database, slot string, latestBlockRoot string) (int64,
return count, err
}
-func updateProposed(db sql.Database, slot string, latestBlockRoot string) (int64, error) {
- res, err := db.Exec(context.Background(), UpdateProposedStmt, slot, latestBlockRoot)
+// Mark a slot as proposed.
+func updateProposed(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot string) (int64, error) {
+ res, err := tx.Exec(ctx, UpdateProposedStmt, slot, latestBlockRoot)
if err != nil {
- loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the ethcl.slots table with the proposed slot.")
+ loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the eth_beacon.slots table with the proposed slot.")
return 0, err
}
count, err := res.RowsAffected()
@@ -324,20 +378,26 @@ func updateProposed(db sql.Database, slot string, latestBlockRoot string) (int64
return count, err
}
-// A wrapper function to call upsertKnownGaps. This function will break down the range of known_gaos into
+// A wrapper function to call upsertKnownGaps. This function will break down the range of known_gaps into
// smaller chunks. For example, instead of having an entry of 1-101, if we increment the entries by 10 slots, we would
// have 10 entries as follows: 1-10, 11-20, etc...
-func writeKnownGaps(db sql.Database, tableIncrement int, startSlot int, endSlot int, entryError error, entryProcess string, metric *BeaconClientMetrics) {
+func transactKnownGaps(tx sql.Tx, ctx context.Context, tableIncrement int, startSlot int, endSlot int, entryError error, entryProcess string, metric *BeaconClientMetrics) {
+ var entryErrorMsg string
+ if entryError == nil {
+ entryErrorMsg = ""
+ } else {
+ entryErrorMsg = entryError.Error()
+ }
if endSlot-startSlot <= tableIncrement {
kgModel := DbKnownGaps{
StartSlot: strconv.Itoa(startSlot),
EndSlot: strconv.Itoa(endSlot),
CheckedOut: false,
ReprocessingError: "",
- EntryError: entryError.Error(),
+ EntryError: entryErrorMsg,
EntryProcess: entryProcess,
}
- upsertKnownGaps(db, kgModel, metric)
+ upsertKnownGaps(tx, ctx, kgModel, metric)
} else {
totalSlots := endSlot - startSlot
var chunks int
@@ -359,31 +419,50 @@ func writeKnownGaps(db sql.Database, tableIncrement int, startSlot int, endSlot
EndSlot: strconv.Itoa(tempEnd),
CheckedOut: false,
ReprocessingError: "",
- EntryError: entryError.Error(),
+ EntryError: entryErrorMsg,
EntryProcess: entryProcess,
}
- upsertKnownGaps(db, kgModel, metric)
+ upsertKnownGaps(tx, ctx, kgModel, metric)
}
}
-
}
-// A function to upsert a single entry to the ethcl.known_gaps table.
-func upsertKnownGaps(db sql.Database, knModel DbKnownGaps, metric *BeaconClientMetrics) {
- _, err := db.Exec(context.Background(), UpsertKnownGapsStmt, knModel.StartSlot, knModel.EndSlot,
+// Wrapper function, instead of adding the knownGaps entries to a transaction, it will
+// create the transaction and write it.
+func writeKnownGaps(db sql.Database, tableIncrement int, startSlot int, endSlot int, entryError error, entryProcess string, metric *BeaconClientMetrics) {
+ ctx := context.Background()
+ tx, err := db.Begin(ctx)
+ if err != nil {
+ loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Fatal("Unable to create a new transaction for knownGaps")
+ }
+ defer func() {
+ err := tx.Rollback(ctx)
+ if err != nil && err != pgx.ErrTxClosed {
+ loghelper.LogError(err).Error("We were unable to Rollback a transaction for reorgs")
+ }
+ }()
+ transactKnownGaps(tx, ctx, tableIncrement, startSlot, endSlot, entryError, entryProcess, metric)
+ if err = tx.Commit(ctx); err != nil {
+ loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Fatal("Unable to execute the transaction for knownGaps")
+ }
+}
+
+// A function to upsert a single entry to the eth_beacon.known_gaps table.
+func upsertKnownGaps(tx sql.Tx, ctx context.Context, knModel DbKnownGaps, metric *BeaconClientMetrics) {
+ _, err := tx.Exec(ctx, UpsertKnownGapsStmt, knModel.StartSlot, knModel.EndSlot,
knModel.CheckedOut, knModel.ReprocessingError, knModel.EntryError, knModel.EntryProcess)
if err != nil {
log.WithFields(log.Fields{
"err": err,
"startSlot": knModel.StartSlot,
"endSlot": knModel.EndSlot,
- }).Fatal("We are unable to write to the ethcl.known_gaps table!!! We will stop the application because of that.")
+ }).Fatal("We are unable to write to the eth_beacon.known_gaps table!!! We will stop the application because of that.")
}
log.WithFields(log.Fields{
"startSlot": knModel.StartSlot,
"endSlot": knModel.EndSlot,
- }).Warn("A new gap has been added to the ethcl.known_gaps table.")
- metric.IncrementHeadTrackingKnownGaps(1)
+ }).Warn("A new gap has been added to the eth_beacon.known_gaps table.")
+ metric.IncrementKnownGapsInserts(1)
}
// A function to write the gap between the highest slot in the DB and the first processed slot.
@@ -400,12 +479,121 @@ func writeStartUpGaps(db sql.Database, tableIncrement int, firstSlot int, metric
}).Fatal("Unable to get convert max block from DB to int. We must close the application or we might have undetected gaps.")
}
if maxSlot != firstSlot-1 {
- writeKnownGaps(db, tableIncrement, maxSlot+1, firstSlot-1, fmt.Errorf(""), "startup", metric)
+ if maxSlot < firstSlot-1 {
+ if maxSlot == 0 {
+ writeKnownGaps(db, tableIncrement, maxSlot, firstSlot-1, fmt.Errorf(""), "startup", metric)
+ } else {
+ writeKnownGaps(db, tableIncrement, maxSlot+1, firstSlot-1, fmt.Errorf(""), "startup", metric)
+ }
+ } else {
+ log.WithFields(log.Fields{
+ "maxSlot": maxSlot,
+ "firstSlot": firstSlot,
+ }).Warn("The maxSlot in the DB is greater than or equal to the first Slot we are processing.")
+ }
}
}
+// A function to update a knownGap range with a reprocessing error.
+func updateKnownGapErrors(db sql.Database, startSlot int, endSlot int, reprocessingErr error, metric *BeaconClientMetrics) error {
+ res, err := db.Exec(context.Background(), UpsertKnownGapsErrorStmt, startSlot, endSlot, reprocessingErr.Error())
+ if err != nil {
+ loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Error("Unable to update reprocessing_error")
+ return err
+ }
+ row, err := res.RowsAffected()
+ if err != nil {
+ loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Error("Unable to count rows affected when trying to update reprocessing_error.")
+ return err
+ }
+ if row != 1 {
+ loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).WithFields(log.Fields{
+ "rowCount": row,
+ }).Error("The rows affected by the upsert for reprocessing_error is not 1.")
+ metric.IncrementKnownGapsReprocessError(1)
+ return err
+ }
+ metric.IncrementKnownGapsReprocessError(1)
+ return nil
+}
+
// A quick helper function to calculate the epoch.
func calculateEpoch(slot int, slotPerEpoch int) string {
epoch := slot / slotPerEpoch
return strconv.Itoa(epoch)
}
+
+// A helper function to check to see if the slot is processed.
+func isSlotProcessed(db sql.Database, checkProcessStmt string, slot string) (bool, error) {
+ processRow, err := db.Exec(context.Background(), checkProcessStmt, slot)
+ if err != nil {
+ return false, err
+ }
+ row, err := processRow.RowsAffected()
+ if err != nil {
+ return false, err
+ }
+ if row > 0 {
+ return true, nil
+ }
+ return false, nil
+}
+
+// Check to see if this slot is in the DB. Check eth_beacon.slots, eth_beacon.signed_block
+// and eth_beacon.state. If the slot exists, return true
+func IsSlotInDb(ctx context.Context, db sql.Database, slot string, blockRoot string, stateRoot string) (bool, error) {
+ var (
+ isInBeaconState bool
+ isInSignedBeaconBlock bool
+ err error
+ )
+ errG, _ := errgroup.WithContext(context.Background())
+ errG.Go(func() error {
+ select {
+ case <-ctx.Done():
+ return nil
+ default:
+ isInBeaconState, err = checkSlotAndRoot(db, CheckBeaconStateStmt, slot, stateRoot)
+ if err != nil {
+ loghelper.LogError(err).Error("Unable to check if the slot and stateroot exist in eth_beacon.state")
+ }
+ return err
+ }
+ })
+ errG.Go(func() error {
+ select {
+ case <-ctx.Done():
+ return nil
+ default:
+ isInSignedBeaconBlock, err = checkSlotAndRoot(db, CheckSignedBeaconBlockStmt, slot, blockRoot)
+ if err != nil {
+ loghelper.LogError(err).Error("Unable to check if the slot and block_root exist in eth_beacon.signed_block")
+ }
+ return err
+ }
+ })
+ if err := errG.Wait(); err != nil {
+ return false, err
+ }
+ if isInBeaconState && isInSignedBeaconBlock {
+ return true, nil
+ }
+ return false, nil
+}
+
+// Provide a statement, slot, and root, and this function will check to see
+// if the slot and root exist in the table.
+func checkSlotAndRoot(db sql.Database, statement, slot, root string) (bool, error) {
+ processRow, err := db.Exec(context.Background(), statement, slot, root)
+ if err != nil {
+ return false, err
+ }
+ row, err := processRow.RowsAffected()
+ if err != nil {
+ return false, err
+ }
+ if row > 0 {
+ return true, nil
+ }
+ return false, nil
+}
diff --git a/pkg/beaconclient/healthcheck.go b/pkg/beaconclient/healthcheck.go
index fb0a0e5..57ced99 100644
--- a/pkg/beaconclient/healthcheck.go
+++ b/pkg/beaconclient/healthcheck.go
@@ -20,7 +20,7 @@ import (
"net/http"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
)
// This function will ensure that we can connect to the beacon client.
diff --git a/pkg/beaconclient/healthcheck_test.go b/pkg/beaconclient/healthcheck_test.go
index dd60bef..c7a642a 100644
--- a/pkg/beaconclient/healthcheck_test.go
+++ b/pkg/beaconclient/healthcheck_test.go
@@ -20,18 +20,27 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
- beaconclient "github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient"
+ beaconclient "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
)
var _ = Describe("Healthcheck", func() {
var (
- BC = beaconclient.CreateBeaconClient(context.Background(), "http", "localhost", 5052)
- errBc = beaconclient.CreateBeaconClient(context.Background(), "http", "blah-blah", 1010)
+ Bc *beaconclient.BeaconClient
+ errBc *beaconclient.BeaconClient
)
+
+ BeforeEach(func() {
+ var err error
+ Bc, err = beaconclient.CreateBeaconClient(context.Background(), "http", "localhost", 5052, 10, bcUniqueIdentifier, false)
+ Expect(err).ToNot(HaveOccurred())
+ errBc, err = beaconclient.CreateBeaconClient(context.Background(), "http", "blah-blah", 1010, 10, bcUniqueIdentifier, false)
+ Expect(err).ToNot(HaveOccurred())
+
+ })
Describe("Connecting to the lighthouse client", Label("integration"), func() {
Context("When the client is running", func() {
It("We should connect successfully", func() {
- err := BC.CheckBeaconClient()
+ err := Bc.CheckBeaconClient()
Expect(err).To(BeNil())
})
})
diff --git a/pkg/beaconclient/incomingsse.go b/pkg/beaconclient/incomingsse.go
index 42fee02..cdb4891 100644
--- a/pkg/beaconclient/incomingsse.go
+++ b/pkg/beaconclient/incomingsse.go
@@ -22,7 +22,7 @@ import (
"time"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
"golang.org/x/sync/errgroup"
)
@@ -94,5 +94,5 @@ func processMsg[P ProcessedEvents](msg []byte, processCh chan<- *P, errorCh chan
func (bc *BeaconClient) captureEventTopic() {
log.Info("We are capturing all SSE events")
go handleIncomingSseEvent(bc.HeadTracking, bc.Metrics.IncrementHeadError)
- go handleIncomingSseEvent(bc.ReOrgTracking, bc.Metrics.IncrementHeadReorgError)
+ go handleIncomingSseEvent(bc.ReOrgTracking, bc.Metrics.IncrementReorgError)
}
diff --git a/pkg/beaconclient/metrics.go b/pkg/beaconclient/metrics.go
index a6a45c4..d704f1f 100644
--- a/pkg/beaconclient/metrics.go
+++ b/pkg/beaconclient/metrics.go
@@ -17,24 +17,107 @@ package beaconclient
import (
"sync/atomic"
+
+ "github.com/prometheus/client_golang/prometheus"
+ log "github.com/sirupsen/logrus"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
)
+//Create a metric struct and register each channel with prometheus
+func CreateBeaconClientMetrics() (*BeaconClientMetrics, error) {
+ metrics := &BeaconClientMetrics{
+ SlotInserts: 0,
+ ReorgInserts: 0,
+ KnownGapsInserts: 0,
+ KnownGapsProcessed: 0,
+ KnownGapsReprocessError: 0,
+ HeadError: 0,
+ HeadReorgError: 0,
+ }
+ err := prometheusRegisterHelper("slot_inserts", "Keeps track of the number of slots we have inserted.", &metrics.SlotInserts)
+ if err != nil {
+ return nil, err
+ }
+ err = prometheusRegisterHelper("reorg_inserts", "Keeps track of the number of reorgs we have inserted.", &metrics.ReorgInserts)
+ if err != nil {
+ return nil, err
+ }
+ err = prometheusRegisterHelper("known_gaps_inserts", "Keeps track of the number of known gaps we have inserted.", &metrics.KnownGapsInserts)
+ if err != nil {
+ return nil, err
+ }
+ err = prometheusRegisterHelper("known_gaps_reprocess_error", "Keeps track of the number of known gaps that had errors when reprocessing, but the error was updated successfully.", &metrics.KnownGapsReprocessError)
+ if err != nil {
+ return nil, err
+ }
+ err = prometheusRegisterHelper("known_gaps_processed", "Keeps track of the number of known gaps we successfully processed.", &metrics.KnownGapsProcessed)
+ if err != nil {
+ return nil, err
+ }
+ err = prometheusRegisterHelper("head_error", "Keeps track of the number of errors we had processing head messages.", &metrics.HeadError)
+ if err != nil {
+ return nil, err
+ }
+ err = prometheusRegisterHelper("head_reorg_error", "Keeps track of the number of errors we had processing reorg messages.", &metrics.HeadReorgError)
+ if err != nil {
+ return nil, err
+ }
+ return metrics, nil
+}
+
+func prometheusRegisterHelper(name string, help string, varPointer *uint64) error {
+ err := prometheus.Register(prometheus.NewCounterFunc(
+ prometheus.CounterOpts{
+ Namespace: "beacon_client",
+ Subsystem: "",
+ Name: name,
+ Help: help,
+ ConstLabels: map[string]string{},
+ },
+ func() float64 {
+ return float64(atomic.LoadUint64(varPointer))
+ }))
+ if err != nil && err.Error() != "duplicate metrics collector registration attempted" {
+ loghelper.LogError(err).WithField("name", name).Error("Unable to register counter.")
+ return err
+ }
+ return nil
+}
+
+// A structure utilized for keeping track of various metrics. Currently, mostly used in testing.
+type BeaconClientMetrics struct {
+ SlotInserts uint64 // Number of head events we successfully wrote to the DB.
+ ReorgInserts uint64 // Number of reorg events we successfully wrote to the DB.
+ KnownGapsInserts uint64 // Number of known_gaps we successfully wrote to the DB.
+ KnownGapsProcessed uint64 // Number of knownGaps processed.
+ KnownGapsReprocessError uint64 // Number of knownGaps that were updated with an error.
+ HeadError uint64 // Number of errors that occurred when decoding the head message.
+ HeadReorgError uint64 // Number of errors that occurred when decoding the reorg message.
+}
+
// Wrapper function to increment inserts. If we want to use mutexes later we can easily update all
// occurrences here.
-func (m *BeaconClientMetrics) IncrementHeadTrackingInserts(inc uint64) {
- atomic.AddUint64(&m.HeadTrackingInserts, inc)
+func (m *BeaconClientMetrics) IncrementSlotInserts(inc uint64) {
+ log.Debug("Incrementing Slot Insert")
+ atomic.AddUint64(&m.SlotInserts, inc)
}
// Wrapper function to increment reorgs. If we want to use mutexes later we can easily update all
// occurrences here.
-func (m *BeaconClientMetrics) IncrementHeadTrackingReorgs(inc uint64) {
- atomic.AddUint64(&m.HeadTrackingReorgs, inc)
+func (m *BeaconClientMetrics) IncrementReorgsInsert(inc uint64) {
+ atomic.AddUint64(&m.ReorgInserts, inc)
}
// Wrapper function to increment known gaps. If we want to use mutexes later we can easily update all
// occurrences here.
-func (m *BeaconClientMetrics) IncrementHeadTrackingKnownGaps(inc uint64) {
- atomic.AddUint64(&m.HeadTrackingKnownGaps, inc)
+func (m *BeaconClientMetrics) IncrementKnownGapsInserts(inc uint64) {
+ atomic.AddUint64(&m.KnownGapsInserts, inc)
+}
+
+// Wrapper function to increment known gaps processed. If we want to use mutexes later we can easily update all
+// occurrences here.
+func (m *BeaconClientMetrics) IncrementKnownGapsProcessed(inc uint64) {
+ atomic.AddUint64(&m.KnownGapsProcessed, inc)
}
// Wrapper function to increment head errors. If we want to use mutexes later we can easily update all
@@ -45,6 +128,13 @@ func (m *BeaconClientMetrics) IncrementHeadError(inc uint64) {
// Wrapper function to increment reorg errors. If we want to use mutexes later we can easily update all
// occurrences here.
-func (m *BeaconClientMetrics) IncrementHeadReorgError(inc uint64) {
+func (m *BeaconClientMetrics) IncrementReorgError(inc uint64) {
atomic.AddUint64(&m.HeadReorgError, inc)
}
+
+// Wrapper function to increment the number of knownGaps that were updated with reprocessing errors.
+//If we want to use mutexes later we can easily update all occurrences here.
+func (m *BeaconClientMetrics) IncrementKnownGapsReprocessError(inc uint64) {
+ log.Debug("Incrementing Known Gap Reprocessing: ", &m.KnownGapsReprocessError)
+ atomic.AddUint64(&m.KnownGapsReprocessError, inc)
+}
diff --git a/pkg/beaconclient/models.go b/pkg/beaconclient/models.go
index a80f15c..b36faff 100644
--- a/pkg/beaconclient/models.go
+++ b/pkg/beaconclient/models.go
@@ -51,7 +51,7 @@ type ChainReorg struct {
ExecutionOptimistic bool `json:"execution_optimistic"`
}
-// A struct to capture whats being written to the ethcl.slots table.
+// A struct to capture whats being written to the eth-beacon.slots table.
type DbSlots struct {
Epoch string // The epoch.
Slot string // The slot.
@@ -60,7 +60,7 @@ type DbSlots struct {
Status string // The status, it can be proposed | forked | skipped.
}
-// A struct to capture whats being written to ethcl.signed_beacon_block table.
+// A struct to capture whats being written to eth-beacon.signed_block table.
type DbSignedBeaconBlock struct {
Slot string // The slot.
BlockRoot string // The block root
@@ -70,14 +70,14 @@ type DbSignedBeaconBlock struct {
}
-// A struct to capture whats being written to ethcl.beacon_state table.
+// A struct to capture whats being written to eth-beacon.state table.
type DbBeaconState struct {
Slot string // The slot.
StateRoot string // The state root
MhKey string // The ipld multihash key.
}
-// A structure to capture whats being written to the ethcl.known_gaps table.
+// A structure to capture whats being written to the eth-beacon.known_gaps table.
type DbKnownGaps struct {
StartSlot string // The start slot for known_gaps, inclusive.
EndSlot string // The end slot for known_gaps, inclusive.
diff --git a/pkg/beaconclient/multihash.go b/pkg/beaconclient/multihash.go
index a9918b7..875018b 100644
--- a/pkg/beaconclient/multihash.go
+++ b/pkg/beaconclient/multihash.go
@@ -20,10 +20,10 @@ import (
dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/multiformats/go-multihash"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
)
-const SSZ_SHA2_256_PREFIX uint64 = 0xb501
+const SSZ_SHA2_256_PREFIX uint64 = 0xb502
// MultihashKeyFromSSZRoot converts a SSZ-SHA2-256 root hash into a blockstore prefixed multihash key
func MultihashKeyFromSSZRoot(root []byte) (string, error) {
diff --git a/pkg/beaconclient/processevents.go b/pkg/beaconclient/processevents.go
index 15e4e16..8dd5520 100644
--- a/pkg/beaconclient/processevents.go
+++ b/pkg/beaconclient/processevents.go
@@ -23,9 +23,6 @@ import (
"strconv"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
- "golang.org/x/sync/errgroup"
)
// This function will perform the necessary steps to handle a reorg.
@@ -56,26 +53,20 @@ func (bc *BeaconClient) handleHead() {
if errorSlots != 0 && bc.PreviousSlot != 0 {
log.WithFields(log.Fields{
"lastProcessedSlot": bc.PreviousSlot,
- "errorMessages": errorSlots,
+ "errorSlots": errorSlots,
}).Warn("We added slots to the knownGaps table because we got bad head messages.")
- writeKnownGaps(bc.Db, bc.KnownGapTableIncrement, bc.PreviousSlot, bcSlotsPerEpoch+errorSlots, fmt.Errorf("Bad Head Messages"), "headProcessing", bc.Metrics)
+ writeKnownGaps(bc.Db, bc.KnownGapTableIncrement, bc.PreviousSlot+1, slot, fmt.Errorf("Bad Head Messages"), "headProcessing", bc.Metrics)
+ errorSlots = 0
}
log.WithFields(log.Fields{"head": head}).Debug("We are going to start processing the slot.")
- go func(db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, metrics *BeaconClientMetrics, knownGapsTableIncrement int) {
- errG := new(errgroup.Group)
- errG.Go(func() error {
- err = processHeadSlot(db, serverAddress, slot, blockRoot, stateRoot, previousSlot, previousBlockRoot, metrics, knownGapsTableIncrement)
- if err != nil {
- return err
- }
- return nil
- })
- if err := errG.Wait(); err != nil {
- loghelper.LogSlotError(strconv.Itoa(slot), err).Error("Unable to process a slot")
- }
- }(bc.Db, bc.ServerEndpoint, slot, head.Block, head.State, bc.PreviousSlot, bc.PreviousBlockRoot, bc.Metrics, bc.KnownGapTableIncrement)
+ // Not used anywhere yet but might be useful to have.
+ if bc.PreviousSlot == 0 && bc.PreviousBlockRoot == "" {
+ bc.StartingSlot = slot
+ }
+
+ go processHeadSlot(bc.Db, bc.ServerEndpoint, slot, head.Block, head.State, bc.PreviousSlot, bc.PreviousBlockRoot, bc.Metrics, bc.KnownGapTableIncrement, bc.CheckDb)
log.WithFields(log.Fields{"head": head.Slot}).Debug("We finished calling processHeadSlot.")
@@ -83,5 +74,4 @@ func (bc *BeaconClient) handleHead() {
bc.PreviousSlot = slot
bc.PreviousBlockRoot = head.Block
}
-
}
diff --git a/pkg/beaconclient/processhistoric.go b/pkg/beaconclient/processhistoric.go
new file mode 100644
index 0000000..dcff8f4
--- /dev/null
+++ b/pkg/beaconclient/processhistoric.go
@@ -0,0 +1,266 @@
+// VulcanizeDB
+// Copyright © 2022 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+// This file contains all the code to process historic slots.
+
+package beaconclient
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/jackc/pgx/v4"
+ log "github.com/sirupsen/logrus"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
+)
+
+var (
+ // Get a single highest priority and non-checked out row row from eth_beacon.historical_process
+ getHpEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.historic_process
+ WHERE checked_out=false
+ ORDER BY priority ASC
+ LIMIT 1;`
+ // Used to periodically check to see if there is a new entry in the eth_beacon.historic_process table.
+ checkHpEntryStmt string = `SELECT * FROM eth_beacon.historic_process WHERE checked_out=false;`
+ // Used to checkout a row from the eth_beacon.historic_process table
+ lockHpEntryStmt string = `UPDATE eth_beacon.historic_process
+ SET checked_out=true, checked_out_by=$3
+ WHERE start_slot=$1 AND end_slot=$2;`
+ // Used to delete an entry from the eth_beacon.historic_process table
+ deleteHpEntryStmt string = `DELETE FROM eth_beacon.historic_process
+ WHERE start_slot=$1 AND end_slot=$2;`
+ // Used to update every single row that this node has checked out.
+ releaseHpLockStmt string = `UPDATE eth_beacon.historic_process
+ SET checked_out=false, checked_out_by=null
+ WHERE checked_out_by=$1`
+)
+
+type HistoricProcessing struct {
+ db sql.Database //db connection
+ metrics *BeaconClientMetrics // metrics for beaconclient
+ uniqueNodeIdentifier int // node unique identifier.
+}
+
+// Get a single row of historical slots from the table.
+func (hp HistoricProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess) []error {
+ return getBatchProcessRow(ctx, hp.db, getHpEntryStmt, checkHpEntryStmt, lockHpEntryStmt, slotCh, strconv.Itoa(hp.uniqueNodeIdentifier))
+}
+
+// Remove the table entry.
+func (hp HistoricProcessing) removeTableEntry(ctx context.Context, processCh <-chan slotsToProcess) error {
+ return removeRowPostProcess(ctx, hp.db, processCh, QueryBySlotStmt, deleteHpEntryStmt)
+}
+
+// Remove the table entry.
+func (hp HistoricProcessing) handleProcessingErrors(ctx context.Context, errMessages <-chan batchHistoricError) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case errMs := <-errMessages:
+ loghelper.LogSlotError(strconv.Itoa(errMs.slot), errMs.err)
+ writeKnownGaps(hp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, hp.metrics)
+ }
+ }
+}
+
+// "un"-checkout the rows held by this DB in the eth_beacon.historical_process table.
+func (hp HistoricProcessing) releaseDbLocks(cancel context.CancelFunc) error {
+ cancel()
+ log.Debug("Updating all the entries to eth_beacon.historical processing")
+ log.Debug("Db: ", hp.db)
+ log.Debug("hp.uniqueNodeIdentifier ", hp.uniqueNodeIdentifier)
+ res, err := hp.db.Exec(context.Background(), releaseHpLockStmt, hp.uniqueNodeIdentifier)
+ if err != nil {
+ return fmt.Errorf("Unable to remove lock from eth_beacon.historical_processing table for node %d, error is %e", hp.uniqueNodeIdentifier, err)
+ }
+ log.Debug("Update all the entries to eth_beacon.historical processing")
+ rows, err := res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("Unable to calculated number of rows affected by releasing locks from eth_beacon.historical_processing table for node %d, error is %e", hp.uniqueNodeIdentifier, err)
+ }
+ log.WithField("rowCount", rows).Info("Released historicalProcess locks for specified rows.")
+ return nil
+}
+
+// Process the slot range.
+func processSlotRangeWorker(ctx context.Context, workCh <-chan int, errCh chan<- batchHistoricError, db sql.Database, serverAddress string, metrics *BeaconClientMetrics, checkDb bool) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case slot := <-workCh:
+ log.Debug("Handling slot: ", slot)
+ err, errProcess := handleHistoricSlot(ctx, db, serverAddress, slot, metrics, checkDb)
+ if err != nil {
+ errMs := batchHistoricError{
+ err: err,
+ errProcess: errProcess,
+ slot: slot,
+ }
+ errCh <- errMs
+ }
+ }
+ }
+}
+
+// A wrapper function that insert the start_slot and end_slot from a single row into a channel.
+// It also locks the row by updating the checked_out column.
+// The statement for getting the start_slot and end_slot must be provided.
+// The statement for "locking" the row must also be provided.
+func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStmt string, checkNewRowsStmt string, checkOutRowStmt string, slotCh chan<- slotsToProcess, uniqueNodeIdentifier string) []error {
+ errCount := make([]error, 0)
+
+ // 5 is an arbitrary number. It allows us to retry a few times before
+ // ending the application.
+ prevErrCount := 0
+ for len(errCount) < 5 {
+ select {
+ case <-ctx.Done():
+ return errCount
+ default:
+ if len(errCount) != prevErrCount {
+ log.WithFields(log.Fields{
+ "errCount": errCount,
+ }).Error("New error entry added")
+ }
+ processRow, err := db.Exec(context.Background(), checkNewRowsStmt)
+ if err != nil {
+ errCount = append(errCount, err)
+ }
+ row, err := processRow.RowsAffected()
+ if err != nil {
+ errCount = append(errCount, err)
+ }
+ if row < 1 {
+ time.Sleep(1000 * time.Millisecond)
+ log.Debug("We are checking rows, be patient")
+ break
+ }
+ log.Debug("We found a new row")
+ dbCtx := context.Background()
+
+ // Setup TX
+ tx, err := db.Begin(dbCtx)
+ if err != nil {
+ loghelper.LogError(err).Error("We are unable to Begin a SQL transaction")
+ errCount = append(errCount, err)
+ break
+ }
+ defer func() {
+ err := tx.Rollback(dbCtx)
+ if err != nil && err != pgx.ErrTxClosed {
+ loghelper.LogError(err).Error("We were unable to Rollback a transaction")
+ errCount = append(errCount, err)
+ }
+ }()
+
+ // Query the DB for slots.
+ sp := slotsToProcess{}
+ err = tx.QueryRow(dbCtx, getStartEndSlotStmt).Scan(&sp.startSlot, &sp.endSlot)
+ if err != nil {
+ if err == pgx.ErrNoRows {
+ time.Sleep(100 * time.Millisecond)
+ break
+ }
+ loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), getStartEndSlotStmt, err).Error("Unable to get a row")
+ errCount = append(errCount, err)
+ break
+ }
+
+ // Checkout the Row
+ res, err := tx.Exec(dbCtx, checkOutRowStmt, sp.startSlot, sp.endSlot, uniqueNodeIdentifier)
+ if err != nil {
+ loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).Error("Unable to checkout the row")
+ errCount = append(errCount, err)
+ break
+ }
+ rows, err := res.RowsAffected()
+ if err != nil {
+ loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, fmt.Errorf("Unable to determine the rows affected when trying to checkout a row."))
+ errCount = append(errCount, err)
+ break
+ }
+ if rows > 1 {
+ loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).WithFields(log.Fields{
+ "rowsReturn": rows,
+ }).Error("We locked too many rows.....")
+ errCount = append(errCount, err)
+ break
+ }
+ if rows == 0 {
+ loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).WithFields(log.Fields{
+ "rowsReturn": rows,
+ }).Error("We did not lock a single row.")
+ errCount = append(errCount, err)
+ break
+ }
+ err = tx.Commit(dbCtx)
+ if err != nil {
+ loghelper.LogSlotRangeError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), err).Error("Unable commit transactions.")
+ errCount = append(errCount, err)
+ break
+ }
+ log.WithField("slots", sp).Debug("Added a new slots to be processed")
+ slotCh <- sp
+ }
+ }
+ log.WithFields(log.Fields{
+ "ErrCount": errCount,
+ }).Error("The ErrCounter")
+ return errCount
+}
+
+// After a row has been processed it should be removed from its appropriate table.
+func removeRowPostProcess(ctx context.Context, db sql.Database, processCh <-chan slotsToProcess, checkProcessedStmt, removeStmt string) error {
+ errCh := make(chan error)
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ case slots := <-processCh:
+ // Make sure the start and end slot exist in the slots table.
+ go func() {
+ finishedProcess := false
+ for !finishedProcess {
+ isStartProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.startSlot))
+ if err != nil {
+ errCh <- err
+ }
+ isEndProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.endSlot))
+ if err != nil {
+ errCh <- err
+ }
+ if isStartProcess && isEndProcess {
+ finishedProcess = true
+ }
+ }
+
+ _, err := db.Exec(context.Background(), removeStmt, strconv.Itoa(slots.startSlot), strconv.Itoa(slots.endSlot))
+ if err != nil {
+ errCh <- err
+ }
+
+ }()
+ if len(errCh) != 0 {
+ return <-errCh
+ }
+ }
+ }
+}
diff --git a/pkg/beaconclient/processknowngaps.go b/pkg/beaconclient/processknowngaps.go
new file mode 100644
index 0000000..c54d37c
--- /dev/null
+++ b/pkg/beaconclient/processknowngaps.go
@@ -0,0 +1,139 @@
+// VulcanizeDB
+// Copyright © 2022 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+// This file contains all the code to process historic slots.
+
+package beaconclient
+
+import (
+ "context"
+ "strconv"
+
+ log "github.com/sirupsen/logrus"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
+)
+
+var (
+ // Get a single non-checked out row row from eth_beacon.known_gaps.
+ getKgEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.known_gaps
+ WHERE checked_out=false
+ ORDER BY priority ASC
+ LIMIT 1;`
+ // Used to periodically check to see if there is a new entry in the eth_beacon.known_gaps table.
+ checkKgEntryStmt string = `SELECT * FROM eth_beacon.known_gaps WHERE checked_out=false;`
+ // Used to checkout a row from the eth_beacon.known_gaps table
+ lockKgEntryStmt string = `UPDATE eth_beacon.known_gaps
+ SET checked_out=true, checked_out_by=$3
+ WHERE start_slot=$1 AND end_slot=$2;`
+ // Used to delete an entry from the knownGaps table
+ deleteKgEntryStmt string = `DELETE FROM eth_beacon.known_gaps
+ WHERE start_slot=$1 AND end_slot=$2;`
+ // Used to check to see if a single slot exists in the known_gaps table.
+ checkKgSingleSlotStmt string = `SELECT start_slot, end_slot FROM eth_beacon.known_gaps
+ WHERE start_slot=$1 AND end_slot=$2;`
+ // Used to update every single row that this node has checked out.
+ releaseKgLockStmt string = `UPDATE eth_beacon.known_gaps
+ SET checked_out=false, checked_out_by=null
+ WHERE checked_out_by=$1`
+)
+
+type KnownGapsProcessing struct {
+ db sql.Database //db connection
+ metrics *BeaconClientMetrics // metrics for beaconclient
+ uniqueNodeIdentifier int // node unique identifier.
+}
+
+// This function will perform all the heavy lifting for tracking the head of the chain.
+func (bc *BeaconClient) ProcessKnownGaps(ctx context.Context, maxWorkers int) []error {
+ log.Info("We are starting the known gaps processing service.")
+ bc.KnownGapsProcess = KnownGapsProcessing{db: bc.Db, uniqueNodeIdentifier: bc.UniqueNodeIdentifier, metrics: bc.Metrics}
+ errs := handleBatchProcess(ctx, maxWorkers, bc.KnownGapsProcess, bc.KnownGapsProcess.db, bc.ServerEndpoint, bc.Metrics, bc.CheckDb)
+ log.Debug("Exiting known gaps processing service")
+ return errs
+}
+
+// This function will perform all the necessary clean up tasks for stopping historical processing.
+func (bc *BeaconClient) StopKnownGapsProcessing(cancel context.CancelFunc) error {
+ log.Info("We are stopping the known gaps processing service.")
+ err := bc.KnownGapsProcess.releaseDbLocks(cancel)
+ if err != nil {
+ loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the eth_beacon.known_gaps table. Manual Intervention is needed!")
+ }
+ return nil
+}
+
+// Get a single row of historical slots from the table.
+func (kgp KnownGapsProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess) []error {
+ return getBatchProcessRow(ctx, kgp.db, getKgEntryStmt, checkKgEntryStmt, lockKgEntryStmt, slotCh, strconv.Itoa(kgp.uniqueNodeIdentifier))
+}
+
+// Remove the table entry.
+func (kgp KnownGapsProcessing) removeTableEntry(ctx context.Context, processCh <-chan slotsToProcess) error {
+ return removeRowPostProcess(ctx, kgp.db, processCh, QueryBySlotStmt, deleteKgEntryStmt)
+}
+
+// Remove the table entry.
+func (kgp KnownGapsProcessing) handleProcessingErrors(ctx context.Context, errMessages <-chan batchHistoricError) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case errMs := <-errMessages:
+ // Check to see if this if this entry already exists.
+ res, err := kgp.db.Exec(context.Background(), checkKgSingleSlotStmt, errMs.slot, errMs.slot)
+ if err != nil {
+ loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).Error("Unable to see if this slot is in the eth_beacon.known_gaps table")
+ }
+
+ rows, err := res.RowsAffected()
+ if err != nil {
+ loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).WithFields(log.Fields{
+ "queryStatement": checkKgSingleSlotStmt,
+ }).Error("Unable to get the number of rows affected by this statement.")
+ }
+
+ if rows > 0 {
+ loghelper.LogSlotError(strconv.Itoa(errMs.slot), errMs.err).Error("We received an error when processing a knownGap")
+ err = updateKnownGapErrors(kgp.db, errMs.slot, errMs.slot, errMs.err, kgp.metrics)
+ if err != nil {
+ loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).Error("Error processing known gap")
+ }
+ } else {
+ writeKnownGaps(kgp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, kgp.metrics)
+ }
+ }
+ }
+
+}
+
+// Updated checked_out column for the uniqueNodeIdentifier.
+func (kgp KnownGapsProcessing) releaseDbLocks(cancel context.CancelFunc) error {
+ cancel()
+ log.Debug("Updating all the entries to eth_beacon.known_gaps")
+ log.Debug("Db: ", kgp.db)
+ log.Debug("kgp.uniqueNodeIdentifier ", kgp.uniqueNodeIdentifier)
+ res, err := kgp.db.Exec(context.Background(), releaseKgLockStmt, kgp.uniqueNodeIdentifier)
+ if err != nil {
+ return err
+ }
+ rows, err := res.RowsAffected()
+ if err != nil {
+ return err
+ }
+ log.WithField("rowCount", rows).Info("Released knownGaps locks for specified rows.")
+ return nil
+}
diff --git a/pkg/beaconclient/processslot.go b/pkg/beaconclient/processslot.go
index 4e60109..3fb9465 100644
--- a/pkg/beaconclient/processslot.go
+++ b/pkg/beaconclient/processslot.go
@@ -26,6 +26,7 @@ import (
"strconv"
"strings"
+ "github.com/jackc/pgx/v4"
si "github.com/prysmaticlabs/prysm/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
dt "github.com/prysmaticlabs/prysm/encoding/ssz/detect"
@@ -35,17 +36,13 @@ import (
state "github.com/prysmaticlabs/prysm/beacon-chain/state"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
"golang.org/x/sync/errgroup"
)
var (
- SlotUnmarshalError = func(obj string) string {
- return fmt.Sprintf("Unable to properly unmarshal the Slot field in the %s.", obj)
- }
ParentRootUnmarshalError = "Unable to properly unmarshal the ParentRoot field in the SignedBeaconBlock."
- MissingIdentifiedError = "Can't query state without a set slot or block_root"
MissingEth1Data = "Can't get the Eth1 block_hash"
VersionedUnmarshalerError = "Unable to create a versioned unmarshaler"
)
@@ -73,97 +70,136 @@ type ProcessSlot struct {
// DB Write objects
DbSlotsModel *DbSlots // The model being written to the slots table.
- DbSignedBeaconBlockModel *DbSignedBeaconBlock // The model being written to the signed_beacon_block table.
- DbBeaconState *DbBeaconState // The model being written to the beacon_state table.
+ DbSignedBeaconBlockModel *DbSignedBeaconBlock // The model being written to the signed_block table.
+ DbBeaconState *DbBeaconState // The model being written to the state table.
}
// This function will do all the work to process the slot and write it to the DB.
-func processFullSlot(db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, headOrHistoric string, metrics *BeaconClientMetrics, knownGapsTableIncrement int) error {
- ps := &ProcessSlot{
- Slot: slot,
- BlockRoot: blockRoot,
- StateRoot: stateRoot,
- HeadOrHistoric: headOrHistoric,
- Db: db,
- Metrics: metrics,
- }
-
- g, _ := errgroup.WithContext(context.Background())
- vUnmarshalerCh := make(chan *dt.VersionedUnmarshaler, 1)
-
- // Get the BeaconState.
- g.Go(func() error {
- err := ps.getBeaconState(serverAddress, vUnmarshalerCh)
- if err != nil {
- return err
+// It will return the error and error process. The error process is used for providing reach detail to the
+// known_gaps table.
+func processFullSlot(ctx context.Context, db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, headOrHistoric string, metrics *BeaconClientMetrics, knownGapsTableIncrement int, checkDb bool) (error, string) {
+ select {
+ case <-ctx.Done():
+ return nil, ""
+ default:
+ ps := &ProcessSlot{
+ Slot: slot,
+ BlockRoot: blockRoot,
+ StateRoot: stateRoot,
+ HeadOrHistoric: headOrHistoric,
+ Db: db,
+ Metrics: metrics,
}
- return nil
- })
- // Get the SignedBeaconBlock.
- g.Go(func() error {
- err := ps.getSignedBeaconBlock(serverAddress, vUnmarshalerCh)
- if err != nil {
- return err
+ g, _ := errgroup.WithContext(context.Background())
+ vUnmarshalerCh := make(chan *dt.VersionedUnmarshaler, 1)
+
+ // Get the BeaconState.
+ g.Go(func() error {
+ select {
+ case <-ctx.Done():
+ return nil
+ default:
+ err := ps.getBeaconState(serverAddress, vUnmarshalerCh)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+ })
+
+ // Get the SignedBeaconBlock.
+ g.Go(func() error {
+ select {
+ case <-ctx.Done():
+ return nil
+ default:
+ err := ps.getSignedBeaconBlock(serverAddress, vUnmarshalerCh)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+ })
+
+ if err := g.Wait(); err != nil {
+ return err, "processSlot"
}
- return nil
- })
- if err := g.Wait(); err != nil {
- writeKnownGaps(ps.Db, 1, ps.Slot, ps.Slot, err, "processSlot", ps.Metrics)
- return err
- }
+ finalBlockRoot, finalStateRoot, finalEth1BlockHash, err := ps.provideFinalHash()
+ if err != nil {
+ return err, "CalculateBlockRoot"
+ }
+ if checkDb {
+ inDb, err := IsSlotInDb(ctx, ps.Db, strconv.Itoa(ps.Slot), finalBlockRoot, finalStateRoot)
+ if err != nil {
+ return err, "checkDb"
+ }
+ if inDb {
+ log.WithField("slot", slot).Info("Slot already in the DB.")
+ return nil, ""
+ }
+ }
- if ps.HeadOrHistoric == "head" && previousSlot == 0 && previousBlockRoot == "" {
- writeStartUpGaps(db, knownGapsTableIncrement, ps.Slot, ps.Metrics)
- }
+ // Get this object ready to write
+ dw, err := ps.createWriteObjects(finalBlockRoot, finalStateRoot, finalEth1BlockHash)
+ if err != nil {
+ return err, "blockRoot"
+ }
+ // Write the object to the DB.
+ defer func() {
+ err := dw.Tx.Rollback(dw.Ctx)
+ if err != nil && err != pgx.ErrTxClosed {
+ loghelper.LogError(err).Error("We were unable to Rollback a transaction")
+ }
+ }()
+ err = dw.transactFullSlot()
+ if err != nil {
+ return err, "processSlot"
+ }
- // Get this object ready to write
- blockRootEndpoint := serverAddress + BcBlockRootEndpoint(strconv.Itoa(ps.Slot))
- dw, err := ps.createWriteObjects(blockRootEndpoint)
- if err != nil {
- writeKnownGaps(ps.Db, 1, ps.Slot, ps.Slot, err, "blockRoot", ps.Metrics)
- return err
- }
- // Write the object to the DB.
- err = dw.writeFullSlot()
- if err != nil {
- writeKnownGaps(ps.Db, 1, ps.Slot, ps.Slot, err, "processSlot", ps.Metrics)
- return err
- }
+ // Handle any reorgs or skipped slots.
+ headOrHistoric = strings.ToLower(headOrHistoric)
+ if headOrHistoric != "head" && headOrHistoric != "historic" {
+ return fmt.Errorf("headOrHistoric must be either historic or head!"), ""
+ }
+ if ps.HeadOrHistoric == "head" && previousSlot != 0 && previousBlockRoot != "" && ps.Status != "skipped" {
+ ps.checkPreviousSlot(dw.Tx, dw.Ctx, previousSlot, previousBlockRoot, knownGapsTableIncrement)
+ }
- // Handle any reorgs or skipped slots.
- headOrHistoric = strings.ToLower(headOrHistoric)
- if headOrHistoric != "head" && headOrHistoric != "historic" {
- return fmt.Errorf("headOrHistoric must be either historic or head!")
+ // Commit the transaction
+ if err = dw.Tx.Commit(dw.Ctx); err != nil {
+ return err, "transactionCommit"
+ }
+
+ return nil, ""
}
- if ps.HeadOrHistoric == "head" && previousSlot != 0 && previousBlockRoot != "" && ps.Status != "skipped" {
- ps.checkPreviousSlot(previousSlot, previousBlockRoot, knownGapsTableIncrement)
- }
- return nil
}
// Handle a slot that is at head. A wrapper function for calling `handleFullSlot`.
-func processHeadSlot(db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, metrics *BeaconClientMetrics, knownGapsTableIncrement int) error {
- return processFullSlot(db, serverAddress, slot, blockRoot, stateRoot, previousSlot, previousBlockRoot, "head", metrics, knownGapsTableIncrement)
+func processHeadSlot(db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, metrics *BeaconClientMetrics, knownGapsTableIncrement int, checkDb bool) {
+ // Get the knownGaps at startUp.
+ if previousSlot == 0 && previousBlockRoot == "" {
+ writeStartUpGaps(db, knownGapsTableIncrement, slot, metrics)
+ }
+ err, errReason := processFullSlot(context.Background(), db, serverAddress, slot, blockRoot, stateRoot, previousSlot, previousBlockRoot, "head", metrics, knownGapsTableIncrement, checkDb)
+ if err != nil {
+ writeKnownGaps(db, knownGapsTableIncrement, slot, slot, err, errReason, metrics)
+ }
}
// Handle a historic slot. A wrapper function for calling `handleFullSlot`.
-// Commented because of the linter...... LOL
-//func handleHistoricSlot(db sql.Database, serverAddress string, slot int) error {
-// return handleFullSlot(db, serverAddress, slot, "", "", 0, "", "historic")
-//}
+func handleHistoricSlot(ctx context.Context, db sql.Database, serverAddress string, slot int, metrics *BeaconClientMetrics, checkDb bool) (error, string) {
+ return processFullSlot(ctx, db, serverAddress, slot, "", "", 0, "", "historic", metrics, 1, checkDb)
+}
// Update the SszSignedBeaconBlock and FullSignedBeaconBlock object with their respective values.
func (ps *ProcessSlot) getSignedBeaconBlock(serverAddress string, vmCh <-chan *dt.VersionedUnmarshaler) error {
var blockIdentifier string // Used to query the block
if ps.BlockRoot != "" {
blockIdentifier = ps.BlockRoot
- } else if ps.Slot != 0 {
- blockIdentifier = strconv.Itoa(ps.Slot)
} else {
- log.Error(MissingIdentifiedError)
- return fmt.Errorf(MissingIdentifiedError)
+ blockIdentifier = strconv.Itoa(ps.Slot)
}
blockEndpoint := serverAddress + BcBlockQueryEndpoint + blockIdentifier
var err error
@@ -189,15 +225,8 @@ func (ps *ProcessSlot) getSignedBeaconBlock(serverAddress string, vmCh <-chan *d
ps.FullSignedBeaconBlock, err = vm.UnmarshalBeaconBlock(ps.SszSignedBeaconBlock)
if err != nil {
- loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error("We are getting an error message when unmarshalling the SignedBeaconBlock.")
- if ps.FullSignedBeaconBlock.Block().Slot() == 0 {
- loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(SlotUnmarshalError("SignedBeaconBlock"))
- return fmt.Errorf(SlotUnmarshalError("SignedBeaconBlock"))
- } else if ps.FullSignedBeaconBlock.Block().ParentRoot() == nil {
- loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(ParentRootUnmarshalError)
- return fmt.Errorf(ParentRootUnmarshalError)
- }
- log.Warn("We received a processing error: ", err)
+ loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Warn("Unable to process the slots SignedBeaconBlock")
+ return nil
}
ps.ParentBlockRoot = "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().ParentRoot())
return nil
@@ -208,11 +237,8 @@ func (ps *ProcessSlot) getBeaconState(serverEndpoint string, vmCh chan<- *dt.Ver
var stateIdentifier string // Used to query the state
if ps.StateRoot != "" {
stateIdentifier = ps.StateRoot
- } else if ps.Slot != 0 {
- stateIdentifier = strconv.Itoa(ps.Slot)
} else {
- log.Error(MissingIdentifiedError)
- return fmt.Errorf(MissingIdentifiedError)
+ stateIdentifier = strconv.Itoa(ps.Slot)
}
stateEndpoint := serverEndpoint + BcStateQueryEndpoint + stateIdentifier
ps.SszBeaconState, _, _ = querySsz(stateEndpoint, strconv.Itoa(ps.Slot))
@@ -226,53 +252,68 @@ func (ps *ProcessSlot) getBeaconState(serverEndpoint string, vmCh chan<- *dt.Ver
vmCh <- versionedUnmarshaler
ps.FullBeaconState, err = versionedUnmarshaler.UnmarshalBeaconState(ps.SszBeaconState)
if err != nil {
- loghelper.LogError(err).Error("We are getting an error message when unmarshalling the BeaconState")
- if ps.FullBeaconState.Slot() == 0 {
- loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(SlotUnmarshalError("BeaconState"))
- return fmt.Errorf(SlotUnmarshalError("BeaconState"))
- } else if hex.EncodeToString(ps.FullBeaconState.Eth1Data().BlockHash) == "" {
- loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(MissingEth1Data)
- return fmt.Errorf(MissingEth1Data)
- }
+ loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error("Unable to process the slots BeaconState")
+ return err
}
return nil
}
// Check to make sure that the previous block we processed is the parent of the current block.
-func (ps *ProcessSlot) checkPreviousSlot(previousSlot int, previousBlockRoot string, knownGapsTableIncrement int) {
+func (ps *ProcessSlot) checkPreviousSlot(tx sql.Tx, ctx context.Context, previousSlot int, previousBlockRoot string, knownGapsTableIncrement int) {
parentRoot := "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().ParentRoot())
if previousSlot == int(ps.FullBeaconState.Slot()) {
log.WithFields(log.Fields{
- "slot": ps.FullBeaconState.Slot,
+ "slot": ps.FullBeaconState.Slot(),
"fork": true,
}).Warn("A fork occurred! The previous slot and current slot match.")
- writeReorgs(ps.Db, strconv.Itoa(ps.Slot), ps.BlockRoot, ps.Metrics)
+ transactReorgs(tx, ctx, strconv.Itoa(ps.Slot), ps.BlockRoot, ps.Metrics)
+ } else if previousSlot > int(ps.FullBeaconState.Slot()) {
+ log.WithFields(log.Fields{
+ "previousSlot": previousSlot,
+ "curSlot": int(ps.FullBeaconState.Slot()),
+ }).Warn("We noticed the previous slot is greater than the current slot.")
} else if previousSlot+1 != int(ps.FullBeaconState.Slot()) {
log.WithFields(log.Fields{
"previousSlot": previousSlot,
"currentSlot": ps.FullBeaconState.Slot(),
}).Error("We skipped a few slots.")
- writeKnownGaps(ps.Db, knownGapsTableIncrement, previousSlot+1, int(ps.FullBeaconState.Slot())-1, fmt.Errorf("Gaps during head processing"), "headGaps", ps.Metrics)
+ transactKnownGaps(tx, ctx, knownGapsTableIncrement, previousSlot+1, int(ps.FullBeaconState.Slot())-1, fmt.Errorf("Gaps during head processing"), "headGaps", ps.Metrics)
} else if previousBlockRoot != parentRoot {
log.WithFields(log.Fields{
"previousBlockRoot": previousBlockRoot,
"currentBlockParent": parentRoot,
}).Error("The previousBlockRoot does not match the current blocks parent, an unprocessed fork might have occurred.")
- writeReorgs(ps.Db, strconv.Itoa(previousSlot), parentRoot, ps.Metrics)
+ transactReorgs(tx, ctx, strconv.Itoa(previousSlot), parentRoot, ps.Metrics)
} else {
log.Debug("Previous Slot and Current Slot are one distance from each other.")
}
}
// Transforms all the raw data into DB models that can be written to the DB.
-func (ps *ProcessSlot) createWriteObjects(blockRootEndpoint string) (*DatabaseWriter, error) {
+func (ps *ProcessSlot) createWriteObjects(blockRoot, stateRoot, eth1BlockHash string) (*DatabaseWriter, error) {
+ var status string
+ if ps.Status != "" {
+ status = ps.Status
+ } else {
+ status = "proposed"
+ }
+
+ dw, err := CreateDatabaseWrite(ps.Db, ps.Slot, stateRoot, blockRoot, ps.ParentBlockRoot, eth1BlockHash, status, ps.SszSignedBeaconBlock, ps.SszBeaconState, ps.Metrics)
+ if err != nil {
+ return dw, err
+ }
+
+ return dw, nil
+}
+
+// This function will return the final blockRoot, stateRoot, and eth1BlockHash that will be
+// used to write to a DB
+func (ps *ProcessSlot) provideFinalHash() (string, string, string, error) {
var (
stateRoot string
blockRoot string
- status string
eth1BlockHash string
)
-
if ps.Status == "skipped" {
stateRoot = ""
blockRoot = ""
@@ -289,24 +330,15 @@ func (ps *ProcessSlot) createWriteObjects(blockRootEndpoint string) (*DatabaseWr
blockRoot = ps.BlockRoot
} else {
var err error
- blockRoot, err = queryBlockRoot(blockRootEndpoint, strconv.Itoa(ps.Slot))
+ rawBlockRoot, err := ps.FullSignedBeaconBlock.Block().HashTreeRoot()
+ //blockRoot, err = queryBlockRoot(blockRootEndpoint, strconv.Itoa(ps.Slot))
if err != nil {
- return nil, err
+ return "", "", "", err
}
+ blockRoot = "0x" + hex.EncodeToString(rawBlockRoot[:])
+ log.WithFields(log.Fields{"blockRoot": blockRoot}).Debug("Block Root from ssz")
}
eth1BlockHash = "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().Body().Eth1Data().BlockHash)
}
-
- if ps.Status != "" {
- status = ps.Status
- } else {
- status = "proposed"
- }
-
- dw, err := CreateDatabaseWrite(ps.Db, ps.Slot, stateRoot, blockRoot, ps.ParentBlockRoot, eth1BlockHash, status, ps.SszSignedBeaconBlock, ps.SszBeaconState, ps.Metrics)
- if err != nil {
- return dw, err
- }
-
- return dw, nil
+ return blockRoot, stateRoot, eth1BlockHash, nil
}
diff --git a/pkg/beaconclient/queryserver.go b/pkg/beaconclient/queryserver.go
index b863c7b..5294335 100644
--- a/pkg/beaconclient/queryserver.go
+++ b/pkg/beaconclient/queryserver.go
@@ -24,7 +24,7 @@ import (
"net/http"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
)
// A helper function to query endpoints that utilize slots.
diff --git a/pkg/beaconclient/systemvalidation_test.go b/pkg/beaconclient/systemvalidation_test.go
new file mode 100644
index 0000000..7aabd41
--- /dev/null
+++ b/pkg/beaconclient/systemvalidation_test.go
@@ -0,0 +1,69 @@
+package beaconclient_test
+
+import (
+ "os"
+ "strconv"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ //. "github.com/onsi/gomega"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
+)
+
+var (
+ prodConfig = Config{
+ protocol: os.Getenv("bc_protocol"),
+ address: os.Getenv("bc_address"),
+ port: getEnvInt(os.Getenv("bc_port")),
+ dbHost: os.Getenv("db_host"),
+ dbPort: getEnvInt(os.Getenv("db_port")),
+ dbName: os.Getenv("db_name"),
+ dbUser: os.Getenv("db_user"),
+ dbPassword: os.Getenv("db_password"),
+ dbDriver: os.Getenv("db_driver"),
+ knownGapsTableIncrement: 100000000,
+ bcUniqueIdentifier: 100,
+ checkDb: false,
+ }
+)
+var _ = Describe("Systemvalidation", Label("system"), func() {
+ Describe("Run the application against a running lighthouse node", func() {
+ Context("When we receive head messages", func() {
+ It("We should process the messages successfully", func() {
+ bc := setUpTest(prodConfig, "10000000000")
+ processProdHeadBlocks(bc, 3, 0, 0, 0)
+ })
+ })
+ Context("When we have historical and knownGaps slots to process", Label("system-batch"), func() {
+ It("Should process them successfully", func() {
+ bc := setUpTest(prodConfig, "10000000000")
+ //known Gaps
+ BeaconNodeTester.writeEventToKnownGaps(bc, 100, 101)
+ BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 0, 0)
+
+ // Historical
+ BeaconNodeTester.writeEventToHistoricProcess(bc, 2375703, 2375703, 10)
+ BeaconNodeTester.runHistoricalProcess(bc, 2, 3, 0, 0, 0)
+
+ time.Sleep(2 * time.Second)
+ validatePopularBatchBlocks(bc)
+ })
+ })
+ })
+})
+
+// Wrapper function to get int env variables.
+func getEnvInt(envVar string) int {
+ val, err := strconv.Atoi(envVar)
+ if err != nil {
+ return 0
+ }
+ return val
+}
+
+// Start head tracking and wait for the expected results.
+func processProdHeadBlocks(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
+ go bc.CaptureHead()
+ time.Sleep(1 * time.Second)
+ validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
+}
diff --git a/pkg/database/sql/postgres/database.go b/pkg/database/sql/postgres/database.go
index c8451f8..942d8f7 100644
--- a/pkg/database/sql/postgres/database.go
+++ b/pkg/database/sql/postgres/database.go
@@ -20,8 +20,8 @@ import (
"fmt"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
)
var _ sql.Database = &DB{}
@@ -49,7 +49,7 @@ func SetupPostgresDb(dbHostname string, dbPort int, dbName string, dbUsername st
"driver_name_provided": driverName,
}).Error("Can't resolve driver type")
}
- log.Info("Using Driver:", DbDriver)
+ log.Info("Using Driver: ", DbDriver)
postgresConfig := Config{
Hostname: dbHostname,
diff --git a/pkg/database/sql/postgres/pgx.go b/pkg/database/sql/postgres/pgx.go
index 0845915..9ad6c69 100644
--- a/pkg/database/sql/postgres/pgx.go
+++ b/pkg/database/sql/postgres/pgx.go
@@ -23,7 +23,7 @@ import (
"github.com/jackc/pgconn"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
)
// pgxDriver driver, implements sql.Driver
diff --git a/pkg/database/sql/postgres/pgx_test.go b/pkg/database/sql/postgres/pgx_test.go
index f601abe..43266f3 100644
--- a/pkg/database/sql/postgres/pgx_test.go
+++ b/pkg/database/sql/postgres/pgx_test.go
@@ -23,9 +23,9 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql/postgres"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/testhelpers"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql/postgres"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/testhelpers"
)
var _ = Describe("Pgx", func() {
diff --git a/pkg/gracefulshutdown/gracefulshutdown.go b/pkg/gracefulshutdown/gracefulshutdown.go
index b8e7066..29fe19e 100644
--- a/pkg/gracefulshutdown/gracefulshutdown.go
+++ b/pkg/gracefulshutdown/gracefulshutdown.go
@@ -25,7 +25,7 @@ import (
"time"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
+ "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
)
// operation is a clean up function on shutting down
diff --git a/pkg/loghelper/logerror.go b/pkg/loghelper/logerror.go
index 94a5069..f6dce63 100644
--- a/pkg/loghelper/logerror.go
+++ b/pkg/loghelper/logerror.go
@@ -27,9 +27,26 @@ func LogError(err error) *log.Entry {
})
}
+// A simple herlper function to log slot and error.
func LogSlotError(slot string, err error) *log.Entry {
return log.WithFields(log.Fields{
"err": err,
"slot": slot,
})
}
+
+func LogSlotRangeError(startSlot string, endSlot string, err error) *log.Entry {
+ return log.WithFields(log.Fields{
+ "err": err,
+ "startSlot": startSlot,
+ "endSlot": endSlot,
+ })
+}
+func LogSlotRangeStatementError(startSlot string, endSlot string, statement string, err error) *log.Entry {
+ return log.WithFields(log.Fields{
+ "err": err,
+ "startSlot": startSlot,
+ "endSlot": endSlot,
+ "SqlStatement": statement,
+ })
+}