Testing for Batch Processing (#56)

* Set starting slot and improve error gap capturing

* Set starting slot and improve error gap capturing

* Tests + Significant Refactor

The code for historical processing has been significantly refactored to use a context to signify a shutdown.

There have also been many tests added for historical and knownGaps processing.

* Update MhKeys in test

* Update correct values

* Update Max Retry

Genesis is not working as expected.

* Ensure we release locks properly

* Add ordered testing

* Include system tests

* Update workflow calls

* Add secrets

* Add required secrets

* update path

* Try using the absolute path

* Remove volumes at the end.

* Update system-tests.yml

* Update system-tests.yml

* Update test err

* Update and test the shutdown

* rename ethcl --> eth-beacon

* Try forcing /bin/bash for docker-compose

* Update system-tests.yml

* Update system-tests.yml

* Update system-tests.yml

* Update system-tests.yml

* Update system-tests.yml

* Update system-tests.yml

* Use single quote cron

* Dont run generic on schedule
This commit is contained in:
Abdul Rabbani 2022-06-09 17:32:46 -04:00 committed by GitHub
parent d674df1ef0
commit 24fc6358d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 2908 additions and 628 deletions

View File

@ -5,7 +5,7 @@ on:
stack-orchestrator-ref: stack-orchestrator-ref:
required: false required: false
type: string type: string
ipld-ethcl-db-ref: ipld-eth-beacon-db-ref:
required: false required: false
type: string type: string
ssz-data-ref: ssz-data-ref:
@ -16,8 +16,8 @@ on:
required: true required: true
env: env:
stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || 'develop' }} stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || '3048a224100ceb122d6da71328bf3803dff72a01' }}
ipld-ethcl-db-ref: ${{ inputs.ipld-ethcl-db-ref || 'feature/historic-processing' }} ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref || '3dfe416302d553f8240f6051c08a7899b0e39e12' }}
ssz-data-ref: ${{ inputs.ssz-data-ref || 'main' }} ssz-data-ref: ${{ inputs.ssz-data-ref || 'main' }}
GOPATH: /tmp/go GOPATH: /tmp/go
jobs: jobs:
@ -27,7 +27,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
with: with:
path: "./ipld-ethcl-indexer" path: "./ipld-eth-beacon-indexer"
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
@ -38,36 +38,36 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
ref: ${{ env.ipld-ethcl-db-ref }} ref: ${{ env.ipld-eth-beacon-db-ref }}
repository: vulcanize/ipld-ethcl-db repository: vulcanize/ipld-eth-beacon-db
path: "./ipld-ethcl-db/" path: "./ipld-eth-beacon-db/"
ssh-key: ${{secrets.GHA_KEY}} ssh-key: ${{secrets.GHA_KEY}}
fetch-depth: 0 fetch-depth: 0
- name: Create config file - name: Create config file
run: | run: |
echo vulcanize_ipld_ethcl_db=$GITHUB_WORKSPACE/ipld-ethcl-db/ > ./config.sh echo vulcanize_ipld_eth_beacon_db=$GITHUB_WORKSPACE/ipld-eth-beacon-db/ > ./config.sh
echo vulcanize_ipld_ethcl_indexer=$GITHUB_WORKSPACE/ipld-ethcl-indexer >> ./config.sh echo vulcanize_ipld_eth_beacon_indexer=$GITHUB_WORKSPACE/ipld-eth-beacon-indexer >> ./config.sh
echo ethcl_capture_mode=boot >> ./config.sh echo eth_beacon_capture_mode=boot >> ./config.sh
echo ethcl_skip_sync=true >> ./config.sh echo eth_beacon_skip_sync=true >> ./config.sh
echo ethcl_known_gap_increment=1000000 >> ./config.sh echo eth_beacon_known_gap_increment=1000000 >> ./config.sh
cat ./config.sh cat ./config.sh
- name: Run docker compose - name: Run docker compose
run: | run: |
docker-compose \ docker-compose \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ethcl-db.yml" \ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-ethcl-indexer.yml" \ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" \
--env-file ./config.sh \ --env-file ./config.sh \
up -d --build up -d --build
- name: Check to make sure HEALTH file is present - name: Check to make sure HEALTH file is present
shell: bash shell: bash
run: | run: |
until $(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-ethcl-indexer.yml" cp ipld-ethcl-indexer:/root/HEALTH ./HEALTH) ; do sleep 10; done until $(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" cp ipld-eth-beacon-indexer:/root/HEALTH ./HEALTH) ; do sleep 10; done
cat ./HEALTH cat ./HEALTH
if [[ "$(cat ./HEALTH)" -eq "0" ]]; then echo "Application boot successful" && (exit 0); else docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-ethcl-indexer.yml" cp ipld-ethcl-indexer:/root/ipld-ethcl-indexer.log . && cat ipld-ethcl-indexer.log && (exit 1); fi if [[ "$(cat ./HEALTH)" -eq "0" ]]; then echo "Application boot successful" && (exit 0); else docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" cp ipld-eth-beacon-indexer:/root/ipld-eth-beacon-indexer.log . && cat ipld-eth-beacon-indexer.log && (exit 1); fi
unit-test: unit-test:
name: Run Unit Tests name: Run Unit Tests
@ -79,7 +79,7 @@ jobs:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
with: with:
path: "./ipld-ethcl-indexer" path: "./ipld-eth-beacon-indexer"
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
@ -90,9 +90,9 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
ref: ${{ env.ipld-ethcl-db-ref }} ref: ${{ env.ipld-eth-beacon-db-ref }}
repository: vulcanize/ipld-ethcl-db repository: vulcanize/ipld-eth-beacon-db
path: "./ipld-ethcl-db/" path: "./ipld-eth-beacon-db/"
ssh-key: ${{ secrets.GHA_KEY }} ssh-key: ${{ secrets.GHA_KEY }}
fetch-depth: 0 fetch-depth: 0
@ -100,25 +100,25 @@ jobs:
with: with:
ref: ${{ env.ssz-data-ref }} ref: ${{ env.ssz-data-ref }}
repository: vulcanize/ssz-data repository: vulcanize/ssz-data
path: "./ipld-ethcl-indexer/pkg/beaconclient/ssz-data" path: "./ipld-eth-beacon-indexer/pkg/beaconclient/ssz-data"
fetch-depth: 0 fetch-depth: 0
- name: Create config file - name: Create config file
run: | run: |
echo vulcanize_ipld_ethcl_db=$GITHUB_WORKSPACE/ipld-ethcl-db/ > ./config.sh echo vulcanize_ipld_eth_beacon_db=$GITHUB_WORKSPACE/ipld-eth-beacon-db/ > ./config.sh
echo vulcanize_ipld_ethcl_indexer=$GITHUB_WORKSPACE/ipld-ethcl-indexer >> ./config.sh echo vulcanize_ipld_eth_beacon_indexer=$GITHUB_WORKSPACE/ipld-eth-beacon-indexer >> ./config.sh
cat ./config.sh cat ./config.sh
- name: Run docker compose - name: Run docker compose
run: | run: |
docker-compose \ docker-compose \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ethcl-db.yml" \ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
--env-file ./config.sh \ --env-file ./config.sh \
up -d --build up -d --build
- uses: actions/setup-go@v3 - uses: actions/setup-go@v3
with: with:
go-version: ">=1.17.0" go-version: ">=1.18.0"
check-latest: true check-latest: true
- name: Install packages - name: Install packages
@ -128,7 +128,7 @@ jobs:
- name: Run the tests using Make - name: Run the tests using Make
run: | run: |
cd ipld-ethcl-indexer cd ipld-eth-beacon-indexer
make unit-test-ci make unit-test-ci
integration-test: integration-test:
@ -140,7 +140,7 @@ jobs:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
with: with:
path: "./ipld-ethcl-indexer" path: "./ipld-eth-beacon-indexer"
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
@ -151,30 +151,30 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
ref: ${{ env.ipld-ethcl-db-ref }} ref: ${{ env.ipld-eth-beacon-db-ref }}
repository: vulcanize/ipld-ethcl-db repository: vulcanize/ipld-eth-beacon-db
path: "./ipld-ethcl-db/" path: "./ipld-eth-beacon-db/"
ssh-key: ${{secrets.GHA_KEY}} ssh-key: ${{secrets.GHA_KEY}}
fetch-depth: 0 fetch-depth: 0
- name: Create config file - name: Create config file
run: | run: |
echo vulcanize_ipld_ethcl_db=$GITHUB_WORKSPACE/ipld-ethcl-db/ > ./config.sh echo vulcanize_ipld_eth_beacon_db=$GITHUB_WORKSPACE/ipld-eth-beacon-db/ > ./config.sh
echo vulcanize_ipld_ethcl_indexer=$GITHUB_WORKSPACE/ipld-ethcl-indexer >> ./config.sh echo vulcanize_ipld_eth_beacon_indexer=$GITHUB_WORKSPACE/ipld-eth-beacon-indexer >> ./config.sh
echo ethcl_capture_mode=boot >> ./config.sh echo eth_beacon_capture_mode=boot >> ./config.sh
cat ./config.sh cat ./config.sh
- name: Run docker compose - name: Run docker compose
run: | run: |
docker-compose \ docker-compose \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ethcl-db.yml" \ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \ -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
--env-file ./config.sh \ --env-file ./config.sh \
up -d --build up -d --build
- uses: actions/setup-go@v3 - uses: actions/setup-go@v3
with: with:
go-version: ">=1.17.0" go-version: ">=1.18.0"
check-latest: true check-latest: true
- name: Install packages - name: Install packages
@ -184,7 +184,7 @@ jobs:
- name: Run the tests using Make - name: Run the tests using Make
run: | run: |
cd ipld-ethcl-indexer cd ipld-eth-beacon-indexer
make integration-test-ci make integration-test-ci
golangci: golangci:
@ -192,7 +192,7 @@ jobs:
steps: steps:
- uses: actions/setup-go@v3 - uses: actions/setup-go@v3
with: with:
go-version: ">=1.17.0" go-version: ">=1.18.0"
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: golangci-lint - name: golangci-lint
uses: golangci/golangci-lint-action@v3 uses: golangci/golangci-lint-action@v3

View File

@ -7,8 +7,8 @@ on:
description: "The branch, commit or sha from stack-orchestrator to checkout" description: "The branch, commit or sha from stack-orchestrator to checkout"
required: false required: false
default: "main" default: "main"
ipld-ethcl-db-ref: ipld-eth-beacon-db-ref:
description: "The branch, commit or sha from ipld-ethcl-db to checkout" description: "The branch, commit or sha from ipld-eth-beacon-db to checkout"
required: false required: false
default: "main" default: "main"
ssz-data-ref: ssz-data-ref:
@ -24,13 +24,24 @@ on:
- ".github/workflows/on-pr.yml" - ".github/workflows/on-pr.yml"
- ".github/workflows/tests.yml" - ".github/workflows/tests.yml"
- "**" - "**"
schedule:
- cron: '0 13 * * *' # Must be single quotes!!
jobs: jobs:
trigger-tests: trigger-tests:
uses: ./.github/workflows/tests.yml if: github.event_name != 'schedule'
uses: ./.github/workflows/generic-testing.yml
with: with:
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }} stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
ipld-ethcl-db-ref: ${{ github.event.inputs.ipld-ethcl-db-ref }} ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
ssz-data-ref: ${{ github.event.inputs.ssz-data-ref }} ssz-data-ref: ${{ github.event.inputs.ssz-data-ref }}
secrets: secrets:
GHA_KEY: ${{secrets.GHA_KEY}} GHA_KEY: ${{secrets.GHA_KEY}}
system-testing:
uses: ./.github/workflows/system-tests.yml
with:
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
secrets:
GHA_KEY: ${{secrets.GHA_KEY}}
BC_ADDRESS: ${{secrets.BC_ADDRESS}}

View File

@ -4,17 +4,27 @@ on:
types: [published, edited] types: [published, edited]
jobs: jobs:
trigger-tests: trigger-tests:
uses: ./.github/workflows/tests.yml uses: ./.github/workflows/generic-testing.yml
with: with:
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }} stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
ipld-ethcl-db-ref: ${{ github.event.inputs.ipld-ethcl-db-ref }} ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
ssz-data-ref: ${{ github.event.inputs.ssz-data-ref }} ssz-data-ref: ${{ github.event.inputs.ssz-data-ref }}
secrets: secrets:
GHA_KEY: ${{secrets.GHA_KEY}} GHA_KEY: ${{secrets.GHA_KEY}}
system-testing:
uses: ./.github/workflows/system-tests.yml
with:
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
secrets:
GHA_KEY: ${{secrets.GHA_KEY}}
BC_ADDRESS: ${{secrets.BC_ADDRESS}}
build: build:
name: Run docker build name: Run docker build
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: trigger-tests needs:
- trigger-tests
- system-testing
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Get the version - name: Get the version
@ -23,11 +33,11 @@ jobs:
- name: Run docker build - name: Run docker build
run: make docker-build run: make docker-build
- name: Tag docker image - name: Tag docker image
run: docker tag vulcanize/ipld-ethcl-indexer docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}} run: docker tag vulcanize/ipld-eth-beacon-indexer docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}}
- name: Docker Login - name: Docker Login
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
- name: Docker Push - name: Docker Push
run: docker push docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}} run: docker push docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}}
push_to_registries: push_to_registries:
name: Push Docker image to Docker Hub name: Push Docker image to Docker Hub
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -41,10 +51,10 @@ jobs:
- name: Docker Login to Github Registry - name: Docker Login to Github Registry
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
- name: Docker Pull - name: Docker Pull
run: docker pull docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}} run: docker pull docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}}
- name: Docker Login to Docker Registry - name: Docker Login to Docker Registry
run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
- name: Tag docker image - name: Tag docker image
run: docker tag docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}} vulcanize/ipld-ethcl-indexer:${{steps.vars.outputs.tag}} run: docker tag docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}} vulcanize/ipld-eth-beacon-indexer:${{steps.vars.outputs.tag}}
- name: Docker Push to Docker Hub - name: Docker Push to Docker Hub
run: docker push vulcanize/ipld-ethcl-indexer:${{steps.vars.outputs.tag}} run: docker push vulcanize/ipld-eth-beacon-indexer:${{steps.vars.outputs.tag}}

94
.github/workflows/system-tests.yml vendored Normal file
View File

@ -0,0 +1,94 @@
name: System Testing for the stack.
on:
workflow_call:
inputs:
stack-orchestrator-ref:
required: false
type: string
ipld-eth-beacon-db-ref:
required: false
type: string
secrets:
GHA_KEY:
required: true
BC_ADDRESS:
required: true
env:
stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || '3048a224100ceb122d6da71328bf3803dff72a01' }}
ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref || '3dfe416302d553f8240f6051c08a7899b0e39e12' }}
GOPATH: /tmp/go
bc_protocol: "http"
bc_address: ${{secrets.BC_ADDRESS}}
bc_port: 5052
db_host: localhost
db_port: 8076
db_name: vulcanize_testing
db_user: vdbm
db_password: password
db_driver: "pgx"
jobs:
system-testing:
runs-on: self-hosted
steps:
- name: Create GOPATH
run: mkdir -p /tmp/go
- uses: actions/checkout@v2
with:
path: "./ipld-eth-beacon-indexer"
- uses: actions/checkout@v3
with:
ref: ${{ env.stack-orchestrator-ref }}
path: "./stack-orchestrator/"
repository: vulcanize/stack-orchestrator
fetch-depth: 0
- uses: actions/checkout@v3
with:
ref: ${{ env.ipld-eth-beacon-db-ref }}
repository: vulcanize/ipld-eth-beacon-db
path: "./ipld-eth-beacon-db/"
ssh-key: ${{secrets.GHA_KEY}}
fetch-depth: 0
- name: Create config file
run: |
echo vulcanize_ipld_eth_beacon_db=$(pwd)/ipld-eth-beacon-db > ./config.sh
cat ./config.sh
- name: Run docker compose
id: compose
shell: bash
run: |
ls "./stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml"
whoami
/usr/local/bin/docker-compose \
-f "./stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
--env-file ./config.sh \
up -d --build
- uses: actions/setup-go@v3
with:
go-version: ">=1.18.0"
check-latest: true
- name: Install packages
run: |
go install github.com/onsi/ginkgo/v2/ginkgo@latest
which ginkgo
- name: Run the tests using Make
run: |
cd ipld-eth-beacon-indexer
make system-test-ci
- name: Clean up the docker containers
if: steps.compose.outcome == 'success'
shell: bash
run: |
/usr/local/bin/docker-compose \
-f "./stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
--env-file ./config.sh \
down -v

View File

@ -1,6 +1,6 @@
FROM golang:1.18-alpine as builder FROM golang:1.18-alpine as builder
WORKDIR /go/src/github.com/vulcanize/ipld-ethcl-indexer WORKDIR /go/src/github.com/vulcanize/ipld-eth-beacon-indexer
RUN apk --no-cache add ca-certificates make git g++ linux-headers RUN apk --no-cache add ca-certificates make git g++ linux-headers
ENV GO111MODULE=on ENV GO111MODULE=on
@ -9,12 +9,12 @@ COPY go.sum .
RUN go mod tidy; go mod download RUN go mod tidy; go mod download
COPY . . COPY . .
RUN GCO_ENABLED=0 GOOS=linux go build -race -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-ethcl-indexer . RUN GCO_ENABLED=0 GOOS=linux go build -race -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-eth-beacon-indexer .
RUN chmod +x ipld-ethcl-indexer RUN chmod +x ipld-eth-beacon-indexer
FROM frolvlad/alpine-bash:latest FROM frolvlad/alpine-bash:latest
RUN apk --no-cache add ca-certificates RUN apk --no-cache add ca-certificates
WORKDIR /root/ WORKDIR /root/
COPY --from=builder /go/src/github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer /root/ipld-ethcl-indexer COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer /root/ipld-eth-beacon-indexer
ADD entrypoint.sh . ADD entrypoint.sh .
ENTRYPOINT ["./entrypoint.sh"] ENTRYPOINT ["./entrypoint.sh"]

View File

@ -80,6 +80,24 @@ unit-test-ci:
--cover --coverprofile=cover.profile \ --cover --coverprofile=cover.profile \
--trace --json-report=report.json --trace --json-report=report.json
.PHONY: system-test-ci
system-test-ci:
go vet ./...
go fmt ./...
$(GINKGO) -r --label-filter system \
--randomize-all --randomize-suites \
--fail-on-pending --keep-going \
--cover --coverprofile=cover.profile \
--trace --json-report=report.json
.PHONY: system-test-local
system-test-local:
go vet ./...
go fmt ./...
$(GINKGO) -r --label-filter system \
--randomize-all --randomize-suites \
--fail-on-pending --keep-going \
--trace
.PHONY: build .PHONY: build
build: build:
@ -89,4 +107,4 @@ build:
## Build docker image ## Build docker image
.PHONY: docker-build .PHONY: docker-build
docker-build: docker-build:
docker build -t vulcanize/ipld-ethcl-indexer . docker build -t vulcanize/ipld-eth-beacon-indexer .

View File

@ -1,4 +1,4 @@
- [ipld-ethcl-indexer](#ipld-ethcl-indexer) - [ipld-eth-beacon-indexer](#ipld-eth-beacon-indexer)
- [Running the Application](#running-the-application) - [Running the Application](#running-the-application)
- [Development Patterns](#development-patterns) - [Development Patterns](#development-patterns)
- [Logging](#logging) - [Logging](#logging)
@ -8,7 +8,7 @@
<small><i><a href='http://ecotrust-canada.github.io/markdown-toc/'>Table of contents generated with markdown-toc</a></i></small> <small><i><a href='http://ecotrust-canada.github.io/markdown-toc/'>Table of contents generated with markdown-toc</a></i></small>
# ipld-ethcl-indexer # ipld-eth-beacon-indexer
This application will capture all the `BeaconState`'s and `SignedBeaconBlock`'s from the consensus chain on Ethereum. This application is going to connect to the lighthouse client, but hypothetically speaking, it should be interchangeable with any eth2 beacon node. This application will capture all the `BeaconState`'s and `SignedBeaconBlock`'s from the consensus chain on Ethereum. This application is going to connect to the lighthouse client, but hypothetically speaking, it should be interchangeable with any eth2 beacon node.
@ -22,12 +22,12 @@ To run the application, do as follows:
1. Setup the prerequisite applications. 1. Setup the prerequisite applications.
a. Run a beacon client (such as lighthouse). a. Run a beacon client (such as lighthouse).
b. Run a postgres DB for ethcl. b. Run a postgres DB for eth-beacon.
c. You can utilize the `stack-orchestrator` [repository](https://github.com/vulcanize/stack-orchestrato). c. You can utilize the `stack-orchestrator` [repository](https://github.com/vulcanize/stack-orchestrato).
``` ```
./wrapper.sh -e skip \ ./wrapper.sh -e skip \
-d ../docker/local/docker-compose-ethcl-db.yml \ -d ../docker/local/docker-compose-eth-beacon-db.yml \
-d ../docker/latest/docker-compose-lighthouse.yml \ -d ../docker/latest/docker-compose-lighthouse.yml \
-v remove \ -v remove \
-p ../local-config.sh -p ../local-config.sh
@ -37,7 +37,7 @@ To run the application, do as follows:
2. Run the start up command. 2. Run the start up command.
``` ```
go run -race main.go capture historic --config ./example.ipld-ethcl-indexer-config.json go run -race main.go capture historic --config ./example.ipld-eth-beacon-indexer-config.json
``` ```
## Running Tests ## Running Tests

View File

@ -51,4 +51,4 @@ This package contains useful functions for logging.
## `internal/shutdown` ## `internal/shutdown`
This package is used to shutdown the `ipld-ethcl-indexer`. It calls the `pkg/gracefulshutdown` package. This package is used to shutdown the `ipld-eth-beacon-indexer`. It calls the `pkg/gracefulshutdown` package.

View File

@ -24,9 +24,9 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-ethcl-indexer/internal/boot" "github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
"github.com/vulcanize/ipld-ethcl-indexer/internal/shutdown" "github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
// bootCmd represents the boot command // bootCmd represents the boot command
@ -62,9 +62,9 @@ func bootApp() {
err = shutdown.ShutdownBoot(ctx, notifierCh, maxWaitSecondsShutdown, Db, Bc) err = shutdown.ShutdownBoot(ctx, notifierCh, maxWaitSecondsShutdown, Db, Bc)
if err != nil { if err != nil {
loghelper.LogError(err).Error("Ungracefully Shutdown ipld-ethcl-indexer!") loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
} else { } else {
log.Info("Gracefully shutdown ipld-ethcl-indexer") log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
} }
} }

View File

@ -94,7 +94,7 @@ func init() {
captureCmd.PersistentFlags().StringVarP(&bcConnectionProtocol, "bc.connectionProtocol", "", "http", "protocol for connecting to the beacon node.") captureCmd.PersistentFlags().StringVarP(&bcConnectionProtocol, "bc.connectionProtocol", "", "http", "protocol for connecting to the beacon node.")
captureCmd.PersistentFlags().IntVarP(&bcBootRetryInterval, "bc.bootRetryInterval", "", 30, "The amount of time to wait between retries while booting the application") captureCmd.PersistentFlags().IntVarP(&bcBootRetryInterval, "bc.bootRetryInterval", "", 30, "The amount of time to wait between retries while booting the application")
captureCmd.PersistentFlags().IntVarP(&bcBootMaxRetry, "bc.bootMaxRetry", "", 5, "The amount of time to wait between retries while booting the application") captureCmd.PersistentFlags().IntVarP(&bcBootMaxRetry, "bc.bootMaxRetry", "", 5, "The amount of time to wait between retries while booting the application")
captureCmd.PersistentFlags().IntVarP(&bcMaxHistoricProcessWorker, "bc.maxHistoricProcessWorker", "", 30, "The number of workers that should be actively processing slots from the ethcl.historic_process table. Be careful of system memory.") captureCmd.PersistentFlags().IntVarP(&bcMaxHistoricProcessWorker, "bc.maxHistoricProcessWorker", "", 30, "The number of workers that should be actively processing slots from the eth-beacon.historic_process table. Be careful of system memory.")
captureCmd.PersistentFlags().IntVarP(&bcUniqueNodeIdentifier, "bc.uniqueNodeIdentifier", "", 0, "The unique identifier of this application. Each application connecting to the DB should have a unique identifier.") captureCmd.PersistentFlags().IntVarP(&bcUniqueNodeIdentifier, "bc.uniqueNodeIdentifier", "", 0, "The unique identifier of this application. Each application connecting to the DB should have a unique identifier.")
captureCmd.PersistentFlags().BoolVarP(&bcCheckDb, "bc.checkDb", "", true, "Should we check to see if the slot exists in the DB before writing it?") captureCmd.PersistentFlags().BoolVarP(&bcCheckDb, "bc.checkDb", "", true, "Should we check to see if the slot exists in the DB before writing it?")
// err = captureCmd.MarkPersistentFlagRequired("bc.address") // err = captureCmd.MarkPersistentFlagRequired("bc.address")
@ -103,9 +103,9 @@ func init() {
// exitErr(err) // exitErr(err)
//// Known Gaps specific //// Known Gaps specific
captureCmd.PersistentFlags().BoolVarP(&kgProcessGaps, "kg.processKnownGaps", "", true, "Should we process the slots within the ethcl.known_gaps table.") captureCmd.PersistentFlags().BoolVarP(&kgProcessGaps, "kg.processKnownGaps", "", true, "Should we process the slots within the eth-beacon.known_gaps table.")
captureCmd.PersistentFlags().IntVarP(&kgTableIncrement, "kg.increment", "", 10000, "The max slots within a single entry to the known_gaps table.") captureCmd.PersistentFlags().IntVarP(&kgTableIncrement, "kg.increment", "", 10000, "The max slots within a single entry to the known_gaps table.")
captureCmd.PersistentFlags().IntVarP(&kgMaxWorker, "kg.maxKnownGapsWorker", "", 30, "The number of workers that should be actively processing slots from the ethcl.known_gaps table. Be careful of system memory.") captureCmd.PersistentFlags().IntVarP(&kgMaxWorker, "kg.maxKnownGapsWorker", "", 30, "The number of workers that should be actively processing slots from the eth-beacon.known_gaps table. Be careful of system memory.")
// Prometheus Specific // Prometheus Specific
captureCmd.PersistentFlags().BoolVarP(&pmMetrics, "pm.metrics", "", true, "Should we capture prometheus metrics.") captureCmd.PersistentFlags().BoolVarP(&pmMetrics, "pm.metrics", "", true, "Should we capture prometheus metrics.")

118
cmd/full.go Normal file
View File

@ -0,0 +1,118 @@
// VulcanizeDB
// Copyright © 2022 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"strconv"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
"golang.org/x/sync/errgroup"
)
// fullCmd represents the full command
var fullCmd = &cobra.Command{
Use: "full",
Short: "Capture all components of the application (head and historical)",
Long: `Capture all components of the application (head and historical`,
Run: func(cmd *cobra.Command, args []string) {
startFullProcessing()
},
}
func init() {
captureCmd.AddCommand(fullCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// fullCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// fullCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
// Start the application to track at head and historical processing.
func startFullProcessing() {
// Boot the application
log.Info("Starting the application in head tracking mode.")
ctx := context.Background()
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
viper.GetInt("kg.increment"), "head", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"))
if err != nil {
StopApplicationPreBoot(err, Db)
}
if viper.GetBool("pm.metrics") {
addr := viper.GetString("pm.address") + ":" + strconv.Itoa(viper.GetInt("pm.port"))
serveProm(addr)
}
log.Info("The Beacon Client has booted successfully!")
// Capture head blocks
go Bc.CaptureHead()
hpContext, hpCancel := context.WithCancel(context.Background())
errG, _ := errgroup.WithContext(context.Background())
errG.Go(func() error {
errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"))
if len(errs) != 0 {
if len(errs) != 0 {
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
return fmt.Errorf("Application ended because there were too many error when attempting to process historic")
}
}
return nil
})
kgCtx, KgCancel := context.WithCancel(context.Background())
if viper.GetBool("kg.processKnownGaps") {
go func() {
errG := new(errgroup.Group)
errG.Go(func() error {
errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"))
if len(errs) != 0 {
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
}
return nil
})
if err := errG.Wait(); err != nil {
loghelper.LogError(err).Error("Error with knownGaps processing")
}
}()
}
// Shutdown when the time is right.
err = shutdown.ShutdownFull(ctx, KgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc)
if err != nil {
loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
} else {
log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
}
}

View File

@ -26,9 +26,9 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-ethcl-indexer/internal/boot" "github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
"github.com/vulcanize/ipld-ethcl-indexer/internal/shutdown" "github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -63,11 +63,12 @@ func startHeadTracking() {
log.Info("The Beacon Client has booted successfully!") log.Info("The Beacon Client has booted successfully!")
// Capture head blocks // Capture head blocks
go Bc.CaptureHead() go Bc.CaptureHead()
kgCtx, KgCancel := context.WithCancel(context.Background())
if viper.GetBool("kg.processKnownGaps") { if viper.GetBool("kg.processKnownGaps") {
go func() { go func() {
errG := new(errgroup.Group) errG := new(errgroup.Group)
errG.Go(func() error { errG.Go(func() error {
errs := Bc.ProcessKnownGaps(viper.GetInt("kg.maxKnownGapsWorker")) errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"))
if len(errs) != 0 { if len(errs) != 0 {
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps") log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps") return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
@ -81,11 +82,11 @@ func startHeadTracking() {
} }
// Shutdown when the time is right. // Shutdown when the time is right.
err = shutdown.ShutdownHeadTracking(ctx, notifierCh, maxWaitSecondsShutdown, Db, Bc) err = shutdown.ShutdownHeadTracking(ctx, KgCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc)
if err != nil { if err != nil {
loghelper.LogError(err).Error("Ungracefully Shutdown ipld-ethcl-indexer!") loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
} else { } else {
log.Info("Gracefully shutdown ipld-ethcl-indexer") log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
} }
} }

View File

@ -25,10 +25,10 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-ethcl-indexer/internal/boot" "github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
"github.com/vulcanize/ipld-ethcl-indexer/internal/shutdown" "github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -60,10 +60,11 @@ func startHistoricProcessing() {
serveProm(addr) serveProm(addr)
} }
errG, _ := errgroup.WithContext(context.Background()) hpContext, hpCancel := context.WithCancel(context.Background())
errG, _ := errgroup.WithContext(context.Background())
errG.Go(func() error { errG.Go(func() error {
errs := Bc.CaptureHistoric(viper.GetInt("bc.maxHistoricProcessWorker")) errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"))
if len(errs) != 0 { if len(errs) != 0 {
if len(errs) != 0 { if len(errs) != 0 {
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events") log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
@ -73,11 +74,12 @@ func startHistoricProcessing() {
return nil return nil
}) })
kgContext, kgCancel := context.WithCancel(context.Background())
if viper.GetBool("kg.processKnownGaps") { if viper.GetBool("kg.processKnownGaps") {
go func() { go func() {
errG := new(errgroup.Group) errG := new(errgroup.Group)
errG.Go(func() error { errG.Go(func() error {
errs := Bc.ProcessKnownGaps(viper.GetInt("kg.maxKnownGapsWorker")) errs := Bc.ProcessKnownGaps(kgContext, viper.GetInt("kg.maxKnownGapsWorker"))
if len(errs) != 0 { if len(errs) != 0 {
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps") log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps") return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
@ -91,11 +93,11 @@ func startHistoricProcessing() {
} }
// Shutdown when the time is right. // Shutdown when the time is right.
err = shutdown.ShutdownHistoricProcessing(ctx, notifierCh, maxWaitSecondsShutdown, Db, Bc) err = shutdown.ShutdownHistoricProcessing(ctx, kgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc)
if err != nil { if err != nil {
loghelper.LogError(err).Error("Ungracefully Shutdown ipld-ethcl-indexer!") loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
} else { } else {
log.Info("Gracefully shutdown ipld-ethcl-indexer") log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
} }
} }

View File

@ -32,7 +32,7 @@ var (
// rootCmd represents the base command when called without any subcommands // rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{ var rootCmd = &cobra.Command{
Use: "ipld-ethcl-indexer", Use: "ipld-eth-beacon-indexer",
Short: "This application will keep track of all BeaconState's and SignedBeaconBlock's on the Beacon Chain.", Short: "This application will keep track of all BeaconState's and SignedBeaconBlock's on the Beacon Chain.",
Long: `This is an application that will capture the BeaconState's and SignedBeaconBlock's on the Beacon Chain. Long: `This is an application that will capture the BeaconState's and SignedBeaconBlock's on the Beacon Chain.
It can either do this will keeping track of head, or backfilling historic data.`, It can either do this will keeping track of head, or backfilling historic data.`,
@ -127,9 +127,9 @@ func init() {
// will be global for your application. // will be global for your application.
// Optional Flags // Optional Flags
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.ipld-ethcl-indexer.yaml)") rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.ipld-eth-beacon-indexer.yaml)")
rootCmd.PersistentFlags().String("log.level", log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)") rootCmd.PersistentFlags().String("log.level", log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
rootCmd.PersistentFlags().String("log.file", "ipld-ethcl-indexer.log", "file path for logging") rootCmd.PersistentFlags().String("log.file", "ipld-eth-beacon-indexer.log", "file path for logging")
rootCmd.PersistentFlags().Bool("log.output", true, "Should we log to STDOUT") rootCmd.PersistentFlags().Bool("log.output", true, "Should we log to STDOUT")
rootCmd.PersistentFlags().String("log.format", "json", "json or text") rootCmd.PersistentFlags().String("log.format", "json", "json or text")
@ -160,10 +160,10 @@ func initConfig() {
home, err := os.UserHomeDir() home, err := os.UserHomeDir()
cobra.CheckErr(err) cobra.CheckErr(err)
// Search config in home directory with name ".ipld-ethcl-indexer" (without extension). // Search config in home directory with name ".ipld-eth-beacon-indexer" (without extension).
viper.AddConfigPath(home) viper.AddConfigPath(home)
viper.SetConfigType("yaml") viper.SetConfigType("yaml")
viper.SetConfigName(".ipld-ethcl-indexer") viper.SetConfigName(".ipld-eth-beacon-indexer")
} }
viper.AutomaticEnv() // read in environment variables that match viper.AutomaticEnv() // read in environment variables that match

View File

@ -20,7 +20,7 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
v "github.com/vulcanize/ipld-ethcl-indexer/pkg/version" v "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/version"
) )
var ( var (
@ -47,7 +47,7 @@ to quickly create a Cobra application.`,
Patch: Patch, Patch: Patch,
Meta: Meta, Meta: Meta,
} }
log.Infof("ipld-ethcl-indexer version: %s", version.GetVersionWithMeta()) log.Infof("ipld-eth-beacon-indexer version: %s", version.GetVersionWithMeta())
fmt.Println(version.GetVersionWithMeta()) fmt.Println(version.GetVersionWithMeta())
}, },
} }

View File

@ -1,9 +1,9 @@
#!/bin/bash #!/bin/bash
sleep 10 sleep 10
echo "Starting ipld-ethcl-indexer" echo "Starting ipld-eth-beacon-indexer"
echo /root/ipld-ethcl-indexer capture ${CAPTURE_MODE} --db.address $DB_ADDRESS \ echo /root/ipld-eth-beacon-indexer capture ${CAPTURE_MODE} --db.address $DB_ADDRESS \
--db.password $DB_PASSWORD \ --db.password $DB_PASSWORD \
--db.port $DB_PORT \ --db.port $DB_PORT \
--db.username $DB_USER \ --db.username $DB_USER \
@ -15,7 +15,7 @@ echo /root/ipld-ethcl-indexer capture ${CAPTURE_MODE} --db.address $DB_ADDRESS \
--t.skipSync=$SKIP_SYNC \ --t.skipSync=$SKIP_SYNC \
--kg.increment $KNOWN_GAP_INCREMENT --kg.increment $KNOWN_GAP_INCREMENT
/root/ipld-ethcl-indexer capture ${CAPTURE_MODE} --db.address $DB_ADDRESS \ /root/ipld-eth-beacon-indexer capture ${CAPTURE_MODE} --db.address $DB_ADDRESS \
--db.password $DB_PASSWORD \ --db.password $DB_PASSWORD \
--db.port $DB_PORT \ --db.port $DB_PORT \
--db.username $DB_USER \ --db.username $DB_USER \
@ -30,10 +30,10 @@ echo /root/ipld-ethcl-indexer capture ${CAPTURE_MODE} --db.address $DB_ADDRESS \
rv=$? rv=$?
if [ $rv != 0 ]; then if [ $rv != 0 ]; then
echo "ipld-ethcl-indexer startup failed" echo "ipld-eth-beacon-indexer startup failed"
echo 1 > /root/HEALTH echo 1 > /root/HEALTH
else else
echo "ipld-ethcl-indexer startup succeeded" echo "ipld-eth-beacon-indexer startup succeeded"
echo 0 > /root/HEALTH echo 0 > /root/HEALTH
fi fi

View File

@ -8,7 +8,7 @@
"driver": "PGX" "driver": "PGX"
}, },
"bc": { "bc": {
"address": "localhost", "address": "10.203.8.51",
"port": 5052, "port": 5052,
"type": "lighthouse", "type": "lighthouse",
"bootRetryInterval": 30, "bootRetryInterval": 30,
@ -24,7 +24,7 @@
"log": { "log": {
"level": "debug", "level": "debug",
"output": true, "output": true,
"file": "./ipld-ethcl-indexer.log", "file": "./ipld-eth-beacon-indexer.log",
"format": "json" "format": "json"
}, },
"kg": { "kg": {

2
go.mod
View File

@ -1,4 +1,4 @@
module github.com/vulcanize/ipld-ethcl-indexer module github.com/vulcanize/ipld-eth-beacon-indexer
go 1.18 go 1.18

View File

@ -22,9 +22,9 @@ import (
"time" "time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql/postgres" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql/postgres"
) )
var ( var (

View File

@ -20,7 +20,7 @@ import (
. "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-ethcl-indexer/internal/boot" "github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
) )
var _ = Describe("Boot", func() { var _ = Describe("Boot", func() {

View File

@ -20,10 +20,10 @@ import (
"os" "os"
"time" "time"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/gracefulshutdown" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/gracefulshutdown"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
// Shutdown all the internal services for the application. // Shutdown all the internal services for the application.
@ -40,7 +40,7 @@ func ShutdownServices(ctx context.Context, notifierCh chan os.Signal, waitTime t
} }
// Wrapper function for shutting down the head tracking process. // Wrapper function for shutting down the head tracking process.
func ShutdownHeadTracking(ctx context.Context, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { func ShutdownHeadTracking(ctx context.Context, kgCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{ return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{
// Combining DB shutdown with BC because BC needs DB open to cleanly shutdown. // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
"beaconClient": func(ctx context.Context) error { "beaconClient": func(ctx context.Context) error {
@ -50,7 +50,7 @@ func ShutdownHeadTracking(ctx context.Context, notifierCh chan os.Signal, waitTi
loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking") loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking")
} }
if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) { if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) {
err = BC.StopKnownGapsProcessing() err = BC.StopKnownGapsProcessing(kgCancel)
if err != nil { if err != nil {
loghelper.LogError(err).Error("Unable to stop processing known gaps") loghelper.LogError(err).Error("Unable to stop processing known gaps")
} }
@ -61,17 +61,17 @@ func ShutdownHeadTracking(ctx context.Context, notifierCh chan os.Signal, waitTi
} }
// Wrapper function for shutting down the head tracking process. // Wrapper function for shutting down the head tracking process.
func ShutdownHistoricProcessing(ctx context.Context, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { func ShutdownHistoricProcessing(ctx context.Context, kgCancel, hpCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{ return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{
// Combining DB shutdown with BC because BC needs DB open to cleanly shutdown. // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
"beaconClient": func(ctx context.Context) error { "beaconClient": func(ctx context.Context) error {
defer DB.Close() defer DB.Close()
err := BC.StopHistoric() err := BC.StopHistoric(hpCancel)
if err != nil { if err != nil {
loghelper.LogError(err).Error("Unable to stop processing historic") loghelper.LogError(err).Error("Unable to stop processing historic")
} }
if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) { if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) {
err = BC.StopKnownGapsProcessing() err = BC.StopKnownGapsProcessing(kgCancel)
if err != nil { if err != nil {
loghelper.LogError(err).Error("Unable to stop processing known gaps") loghelper.LogError(err).Error("Unable to stop processing known gaps")
} }
@ -81,6 +81,33 @@ func ShutdownHistoricProcessing(ctx context.Context, notifierCh chan os.Signal,
}) })
} }
// Shutdown the head and historical processing
func ShutdownFull(ctx context.Context, kgCancel, hpCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{
// Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
"beaconClient": func(ctx context.Context) error {
defer DB.Close()
err := BC.StopHistoric(hpCancel)
if err != nil {
loghelper.LogError(err).Error("Unable to stop processing historic")
}
if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) {
err = BC.StopKnownGapsProcessing(kgCancel)
if err != nil {
loghelper.LogError(err).Error("Unable to stop processing known gaps")
}
}
err = BC.StopHeadTracking()
if err != nil {
loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking")
}
return err
},
})
}
// Wrapper function for shutting down the application in boot mode. // Wrapper function for shutting down the application in boot mode.
func ShutdownBoot(ctx context.Context, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { func ShutdownBoot(ctx context.Context, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{ return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{

View File

@ -28,11 +28,11 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/r3labs/sse" "github.com/r3labs/sse"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/internal/boot" "github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
"github.com/vulcanize/ipld-ethcl-indexer/internal/shutdown" "github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/gracefulshutdown" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/gracefulshutdown"
) )
var ( var (
@ -72,8 +72,9 @@ var _ = Describe("Shutdown", func() {
Context("When Channels are empty,", func() { Context("When Channels are empty,", func() {
It("Should Shutdown Successfully.", func() { It("Should Shutdown Successfully.", func() {
go func() { go func() {
_, cancel := context.WithCancel(context.Background())
log.Debug("Starting shutdown chan") log.Debug("Starting shutdown chan")
err = shutdown.ShutdownHeadTracking(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC) err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC)
log.Debug("We have completed the shutdown...") log.Debug("We have completed the shutdown...")
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
}() }()
@ -84,8 +85,9 @@ var _ = Describe("Shutdown", func() {
shutdownCh := make(chan bool) shutdownCh := make(chan bool)
//log.SetLevel(log.DebugLevel) //log.SetLevel(log.DebugLevel)
go func() { go func() {
_, cancel := context.WithCancel(context.Background())
log.Debug("Starting shutdown chan") log.Debug("Starting shutdown chan")
err = shutdown.ShutdownHeadTracking(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC) err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC)
log.Debug("We have completed the shutdown...") log.Debug("We have completed the shutdown...")
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
shutdownCh <- true shutdownCh <- true
@ -118,7 +120,8 @@ var _ = Describe("Shutdown", func() {
//log.SetLevel(log.DebugLevel) //log.SetLevel(log.DebugLevel)
go func() { go func() {
log.Debug("Starting shutdown chan") log.Debug("Starting shutdown chan")
err = shutdown.ShutdownHeadTracking(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC) _, cancel := context.WithCancel(context.Background())
err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC)
log.Debug("We have completed the shutdown...") log.Debug("We have completed the shutdown...")
Expect(err).To(MatchError(gracefulshutdown.TimeoutErr(maxWaitSecondsShutdown.String()))) Expect(err).To(MatchError(gracefulshutdown.TimeoutErr(maxWaitSecondsShutdown.String())))
shutdownCh <- true shutdownCh <- true

1611
ipld-eth-beacon-indexer.log Normal file

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,7 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
*/ */
package main package main
import "github.com/vulcanize/ipld-ethcl-indexer/cmd" import "github.com/vulcanize/ipld-eth-beacon-indexer/cmd"
func main() { func main() {
cmd.Execute() cmd.Execute()

View File

@ -22,7 +22,7 @@ import (
"github.com/r3labs/sse" "github.com/r3labs/sse"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
) )
// TODO: Use prysms config values instead of hardcoding them here. // TODO: Use prysms config values instead of hardcoding them here.
@ -59,7 +59,6 @@ type BeaconClient struct {
StartingSlot int // If we're performing head tracking. What is the first slot we processed. StartingSlot int // If we're performing head tracking. What is the first slot we processed.
PreviousSlot int // Whats the previous slot we processed PreviousSlot int // Whats the previous slot we processed
PreviousBlockRoot string // Whats the previous block root, used to check the next blocks parent. PreviousBlockRoot string // Whats the previous block root, used to check the next blocks parent.
CheckKnownGaps bool // Should we check for gaps at start up.
HeadTracking *SseEvents[Head] // Track the head block HeadTracking *SseEvents[Head] // Track the head block
ReOrgTracking *SseEvents[ChainReorg] // Track all Reorgs ReOrgTracking *SseEvents[ChainReorg] // Track all Reorgs
//FinalizationTracking *SseEvents[FinalizedCheckpoint] // Track all finalization checkpoints //FinalizationTracking *SseEvents[FinalizedCheckpoint] // Track all finalization checkpoints
@ -70,7 +69,7 @@ type BeaconClient struct {
// This value is lazily updated. Therefore at times it will be outdated. // This value is lazily updated. Therefore at times it will be outdated.
LatestSlotInBeaconServer int64 LatestSlotInBeaconServer int64
PerformHistoricalProcessing bool // Should we perform historical processing? PerformHistoricalProcessing bool // Should we perform historical processing?
HistoricalProcess historicProcessing // object keeping track of historical processing HistoricalProcess HistoricProcessing // object keeping track of historical processing
} }
// A struct to keep track of relevant the head event topic. // A struct to keep track of relevant the head event topic.

View File

@ -21,7 +21,7 @@ import (
"time" "time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
// This function will perform all the heavy lifting for tracking the head of the chain. // This function will perform all the heavy lifting for tracking the head of the chain.

View File

@ -39,9 +39,9 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql/postgres" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql/postgres"
) )
var ( var (
@ -57,9 +57,26 @@ var (
bcUniqueIdentifier int = 100 bcUniqueIdentifier int = 100
dummyParentRoot string = "46f98c08b54a71dfda4d56e29ec3952b8300cd8d6b67a9b6c562ae96a7a25a42" dummyParentRoot string = "46f98c08b54a71dfda4d56e29ec3952b8300cd8d6b67a9b6c562ae96a7a25a42"
knownGapsTableIncrement int = 100000 knownGapsTableIncrement int = 100000
maxRetry int = 120 maxRetry int = 160
TestEvents = map[string]Message{ TestEvents = map[string]Message{
"0": {
HeadMessage: beaconclient.Head{
Slot: "0",
Block: "0x4d611d5b93fdab69013a7f0a2f961caca0c853f87cfe9595fe50038163079360",
State: "0x7e76880eb67bbdc86250aa578958e9d0675e64e714337855204fb5abaaf82c2b",
CurrentDutyDependentRoot: "",
PreviousDutyDependentRoot: "",
EpochTransition: false,
ExecutionOptimistic: false,
},
SignedBeaconBlock: filepath.Join("ssz-data", "0", "signed-beacon-block.ssz"),
BeaconState: filepath.Join("ssz-data", "0", "beacon-state.ssz"),
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2GINRRGFSDKYRZGNTGIYLCGY4TAMJTME3WMMDBGJTDSNRRMNQWGYJQMM4DKM3GHA3WGZTFHE2TSNLGMU2TAMBTHAYTMMZQG44TGNRQ",
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPA3WKNZWHA4DAZLCGY3WEYTEMM4DMMRVGBQWCNJXHA4TKODFHFSDANRXGVSTMNDFG4YTIMZTG44DKNJSGA2GMYRVMFRGCYLGHAZGGMTC",
CorrectParentRoot: "0x0000000000000000000000000000000000000000000000000000000000000000",
CorrectEth1BlockHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
},
"100-dummy": { "100-dummy": {
HeadMessage: beaconclient.Head{ HeadMessage: beaconclient.Head{
Slot: "100", Slot: "100",
@ -118,9 +135,13 @@ var (
EpochTransition: false, EpochTransition: false,
ExecutionOptimistic: false, ExecutionOptimistic: false,
}, },
TestNotes: "An easy to process Phase 0 block", TestNotes: "An easy to process Phase 0 block",
SignedBeaconBlock: filepath.Join("ssz-data", "100", "signed-beacon-block.ssz"), SignedBeaconBlock: filepath.Join("ssz-data", "100", "signed-beacon-block.ssz"),
BeaconState: filepath.Join("ssz-data", "100", "beacon-state.ssz"), BeaconState: filepath.Join("ssz-data", "100", "beacon-state.ssz"),
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2TQMRRHA3WKOJXMY3TKMRQMJRDMOLFMVQTAMJUMMZTQMZUMM4TMNDDGQ2TENJZGM3TEYJQMVQWCZLBGNTDAMZSGAYTGNZZG44TSNTC",
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBTDEOBWMEYDGNZZMMYDGOBWMEZWGN3CMUZDQZBQGVSDQMRZMY4GKYRXMIZDQMDDMM4WKZDFGE2TINBZMFTDEMDFMJRWIMBWME3WCNJW",
CorrectParentRoot: "0x629ae1587895043076500f4f5dcb202a47c2fc95d5b5c548cb83bc97bd2dbfe1",
CorrectEth1BlockHash: "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1",
}, },
"101": { "101": {
HeadMessage: beaconclient.Head{ HeadMessage: beaconclient.Head{
@ -132,9 +153,12 @@ var (
EpochTransition: false, EpochTransition: false,
ExecutionOptimistic: false, ExecutionOptimistic: false,
}, },
TestNotes: "An easy to process Phase 0 block", TestNotes: "An easy to process Phase 0 block",
SignedBeaconBlock: filepath.Join("ssz-data", "101", "signed-beacon-block.ssz"), SignedBeaconBlock: filepath.Join("ssz-data", "101", "signed-beacon-block.ssz"),
BeaconState: filepath.Join("ssz-data", "101", "beacon-state.ssz"), BeaconState: filepath.Join("ssz-data", "101", "beacon-state.ssz"),
CorrectEth1BlockHash: "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1",
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPBQWEZJRME4TOMTFGUYTEMJYGJSDANDGGBSDIYJVMM4WGMRVMY4WKZJVG5RTEZJZMQYGMZRTMY2GGNDDHAZGMZBUGJSDCM3EGMYTAOBT",
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBRWEMBUMFQTEZLEMJTDCM3DG5RGEN3FG5RGIOLCGYZDCY3FMQ3DQMZSMUYDANZVMU4DSMJUG4ZTKMTFMFRTGMBRHFQTQMRUMNSTQNBX",
}, },
"2375703-dummy": { "2375703-dummy": {
HeadMessage: beaconclient.Head{ HeadMessage: beaconclient.Head{
@ -176,19 +200,24 @@ var (
Block: "0x4392372c5f6e39499e31bf924388b5815639103149f0f54f8a453773b1802301", Block: "0x4392372c5f6e39499e31bf924388b5815639103149f0f54f8a453773b1802301",
State: "0xb6215b560273af63ec7e011572b60ec1ca0b0232f8ff44fcd4ed55c7526e964e", State: "0xb6215b560273af63ec7e011572b60ec1ca0b0232f8ff44fcd4ed55c7526e964e",
CurrentDutyDependentRoot: "", PreviousDutyDependentRoot: "", EpochTransition: false, ExecutionOptimistic: false}, CurrentDutyDependentRoot: "", PreviousDutyDependentRoot: "", EpochTransition: false, ExecutionOptimistic: false},
TestNotes: "An easy to process Altair Block", TestNotes: "An easy to process Altair Block",
SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"), SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"),
BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"), BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"),
CorrectEth1BlockHash: "0xd74b1c60423651624de6bb301ac25808951c167ba6ecdd9b2e79b4315aee8202",
CorrectParentRoot: "0x08736ddc20b77f65d1aa6301f7e6e856a820ff3ce6430ed2c3694ae35580e740",
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2DGOJSGM3TEYZVMY3GKMZZGQ4TSZJTGFRGMOJSGQZTQODCGU4DCNJWGM4TCMBTGE2DSZRQMY2TIZRYME2DKMZXG4ZWEMJYGAZDGMBR",
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBRDMMRRGVRDKNRQGI3TGYLGGYZWKYZXMUYDCMJVG4ZGENRQMVRTCY3BGBRDAMRTGJTDQZTGGQ2GMY3EGRSWINJVMM3TKMRWMU4TMNDF",
}, },
"3797056": { "3797056": {
HeadMessage: beaconclient.Head{ HeadMessage: beaconclient.Head{
Slot: "3797056", Slot: "3797056",
Block: "", Block: "",
State: "0xb6215b560273af63ec7e011572b60ec1ca0b0232f8ff44fcd4ed55c7526e964e", State: "",
CurrentDutyDependentRoot: "", PreviousDutyDependentRoot: "", EpochTransition: false, ExecutionOptimistic: false}, CurrentDutyDependentRoot: "", PreviousDutyDependentRoot: "", EpochTransition: false, ExecutionOptimistic: false},
TestNotes: "An easy to process Altair Block", TestNotes: "An easy to process Altair Block",
SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"), // The file below should not exist, this will trigger an error message and 404 response from the mock.
BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"), SignedBeaconBlock: filepath.Join("ssz-data", "3797056", "should-not-exist.txt"),
BeaconState: filepath.Join("ssz-data", "3797056", "beacon-state.ssz"),
}, },
} }
TestConfig = Config{ TestConfig = Config{
@ -214,11 +243,15 @@ var (
) )
type Message struct { type Message struct {
HeadMessage beaconclient.Head // The head messsage that will be streamed to the BeaconClient HeadMessage beaconclient.Head // The head messsage that will be streamed to the BeaconClient
TestNotes string // A small explanation of the purpose this structure plays in the testing landscape. TestNotes string // A small explanation of the purpose this structure plays in the testing landscape.
MimicConfig *MimicConfig // A configuration of parameters that you are trying to MimicConfig *MimicConfig // A configuration of parameters that you are trying to
SignedBeaconBlock string // The file path output of an SSZ encoded SignedBeaconBlock. SignedBeaconBlock string // The file path output of an SSZ encoded SignedBeaconBlock.
BeaconState string // The file path output of an SSZ encoded BeaconState. BeaconState string // The file path output of an SSZ encoded BeaconState.
CorrectSignedBeaconBlockMhKey string // The correct MhKey for the signedBeaconBlock
CorrectBeaconStateMhKey string // The correct MhKey beaconState
CorrectParentRoot string // The correct parent root
CorrectEth1BlockHash string // The correct eth1blockHash
} }
// A structure that can be utilized to mimic and existing SSZ object but change it ever so slightly. // A structure that can be utilized to mimic and existing SSZ object but change it ever so slightly.
@ -228,7 +261,7 @@ type MimicConfig struct {
ForkVersion string // Specify the fork version. This is needed as a workaround to create dummy SignedBeaconBlocks. ForkVersion string // Specify the fork version. This is needed as a workaround to create dummy SignedBeaconBlocks.
} }
var _ = Describe("Capturehead", func() { var _ = Describe("Capturehead", Label("head"), func() {
Describe("Receiving New Head SSE messages", Label("unit", "behavioral"), func() { Describe("Receiving New Head SSE messages", Label("unit", "behavioral"), func() {
Context("Correctly formatted Phase0 Block", func() { Context("Correctly formatted Phase0 Block", func() {
@ -237,8 +270,8 @@ var _ = Describe("Capturehead", func() {
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset() defer httpmock.DeactivateAndReset()
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0)
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, "0x629ae1587895043076500f4f5dcb202a47c2fc95d5b5c548cb83bc97bd2dbfe1", "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1", "/blocks/QHVAEQBQGQ4TKNJUGAYDGNZRGM2DOZJSGZTDMMLEG5QTIYTCMRQTKYRSGNTGCMDCGI2WINLGMM2DMNJRGYYGMMTBHEZGINJSME3DGYRZGE4WE") validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey)
validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, "/blocks/QHVAEQRQPBTDEOBWMEYDGNZZMMYDGOBWMEZWGN3CMUZDQZBQGVSDQMRZMY4GKYRXMIZDQMDDMM4WKZDFGE2TINBZMFTDEMDFMJRWIMBWME3WCNJW") validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey)
}) })
}) })
@ -248,8 +281,8 @@ var _ = Describe("Capturehead", func() {
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset() defer httpmock.DeactivateAndReset()
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, "0x83154c692b9cce50bdf56af5a933da0a020ed7ff809a6a8236301094c7f25276", "0xd74b1c60423651624de6bb301ac25808951c167ba6ecdd9b2e79b4315aee8202", "/blocks/QHVAEQRQPA2DGOJSGM3TEYZVMY3GKMZZGQ4TSZJTGFRGMOJSGQZTQODCGU4DCNJWGM4TCMBTGE2DSZRQMY2TIZRYME2DKMZXG4ZWEMJYGAZDGMBR") validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey)
validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, "/blocks/QHVAEQRQPBRDMMRRGVRDKNRQGI3TGYLGGYZWKYZXMUYDCMJVG4ZGENRQMVRTCY3BGBRDAMRTGJTDQZTGGQ2GMY3EGRSWINJVMM3TKMRWMU4TMNDF") validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey)
}) })
}) })
Context("Correctly formatted Altair Test Blocks", func() { Context("Correctly formatted Altair Test Blocks", func() {
@ -441,18 +474,19 @@ func setUpTest(config Config, maxSlot string) *beaconclient.BeaconClient {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
// Drop all records from the DB. // Drop all records from the DB.
clearEthclDbTables(db) clearEthBeaconDbTables(db)
// Add an slot to the ethcl.slots table so it we can control how known_gaps are handled. // Add an slot to the eth_beacon.slots table so it we can control how known_gaps are handled.
writeSlot(db, maxSlot) writeSlot(db, maxSlot)
bc.Db = db bc.Db = db
return bc return bc
} }
// A helper function to validate the expected output from the ethcl.slots table. // A helper function to validate the expected output from the eth_beacon.slots table.
func validateSlot(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctEpoch int, correctStatus string) { func validateSlot(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctEpoch int, correctStatus string) {
epoch, dbSlot, blockRoot, stateRoot, status := queryDbSlotAndBlock(bc.Db, headMessage.Slot, headMessage.Block) epoch, dbSlot, blockRoot, stateRoot, status := queryDbSlotAndBlock(bc.Db, headMessage.Slot, headMessage.Block)
log.Info("validateSlot: ", headMessage)
baseSlot, err := strconv.Atoi(headMessage.Slot) baseSlot, err := strconv.Atoi(headMessage.Slot)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(dbSlot).To(Equal(baseSlot)) Expect(dbSlot).To(Equal(baseSlot))
@ -462,27 +496,29 @@ func validateSlot(bc *beaconclient.BeaconClient, headMessage beaconclient.Head,
Expect(status).To(Equal(correctStatus)) Expect(status).To(Equal(correctStatus))
} }
// A helper function to validate the expected output from the ethcl.signed_beacon_block table. // A helper function to validate the expected output from the eth_beacon.signed_block table.
func validateSignedBeaconBlock(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctParentRoot string, correctEth1BlockHash string, correctMhKey string) { func validateSignedBeaconBlock(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctParentRoot string, correctEth1BlockHash string, correctMhKey string) {
dbSlot, blockRoot, parentRoot, eth1BlockHash, mhKey := queryDbSignedBeaconBlock(bc.Db, headMessage.Slot, headMessage.Block) dbSlot, blockRoot, parentRoot, eth1BlockHash, mhKey := queryDbSignedBeaconBlock(bc.Db, headMessage.Slot, headMessage.Block)
log.Info("validateSignedBeaconBlock: ", headMessage)
baseSlot, err := strconv.Atoi(headMessage.Slot) baseSlot, err := strconv.Atoi(headMessage.Slot)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(dbSlot).To(Equal(baseSlot)) Expect(dbSlot).To(Equal(baseSlot))
Expect(blockRoot).To(Equal(headMessage.Block)) Expect(blockRoot).To(Equal(headMessage.Block))
Expect(parentRoot, correctParentRoot) Expect(parentRoot).To(Equal(correctParentRoot))
Expect(eth1BlockHash, correctEth1BlockHash) Expect(eth1BlockHash).To(Equal(correctEth1BlockHash))
Expect(mhKey, correctMhKey) Expect(mhKey).To(Equal(correctMhKey))
} }
// A helper function to validate the expected output from the ethcl.beacon_state table. // A helper function to validate the expected output from the eth_beacon.state table.
func validateBeaconState(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctMhKey string) { func validateBeaconState(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctMhKey string) {
dbSlot, stateRoot, mhKey := queryDbBeaconState(bc.Db, headMessage.Slot, headMessage.State) dbSlot, stateRoot, mhKey := queryDbBeaconState(bc.Db, headMessage.Slot, headMessage.State)
log.Info("validateBeaconState: ", headMessage)
baseSlot, err := strconv.Atoi(headMessage.Slot) baseSlot, err := strconv.Atoi(headMessage.Slot)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(dbSlot).To(Equal(baseSlot)) Expect(dbSlot).To(Equal(baseSlot))
Expect(stateRoot).To(Equal(headMessage.State)) Expect(stateRoot).To(Equal(headMessage.State))
Expect(mhKey, correctMhKey) Expect(mhKey).To(Equal(correctMhKey))
} }
@ -514,21 +550,21 @@ func sendHeadMessage(bc *beaconclient.BeaconClient, head beaconclient.Head, maxR
} }
} }
// A helper function to query the ethcl.slots table based on the slot and block_root // A helper function to query the eth_beacon.slots table based on the slot and block_root
func queryDbSlotAndBlock(db sql.Database, querySlot string, queryBlockRoot string) (int, int, string, string, string) { func queryDbSlotAndBlock(db sql.Database, querySlot string, queryBlockRoot string) (int, int, string, string, string) {
sqlStatement := `SELECT epoch, slot, block_root, state_root, status FROM ethcl.slots WHERE slot=$1 AND block_root=$2;` sqlStatement := `SELECT epoch, slot, block_root, state_root, status FROM eth_beacon.slots WHERE slot=$1 AND block_root=$2;`
var epoch, slot int var epoch, slot int
var blockRoot, stateRoot, status string var blockRoot, stateRoot, status string
log.Debug("Starting to query the ethcl.slots table, ", querySlot, " ", queryBlockRoot) log.Debug("Starting to query the eth_beacon.slots table, ", querySlot, " ", queryBlockRoot)
err := db.QueryRow(context.Background(), sqlStatement, querySlot, queryBlockRoot).Scan(&epoch, &slot, &blockRoot, &stateRoot, &status) err := db.QueryRow(context.Background(), sqlStatement, querySlot, queryBlockRoot).Scan(&epoch, &slot, &blockRoot, &stateRoot, &status)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
log.Debug("Querying the ethcl.slots table complete") log.Debug("Querying the eth_beacon.slots table complete")
return epoch, slot, blockRoot, stateRoot, status return epoch, slot, blockRoot, stateRoot, status
} }
// A helper function to query the ethcl.signed_beacon_block table based on the slot and block_root. // A helper function to query the eth_beacon.signed_block table based on the slot and block_root.
func queryDbSignedBeaconBlock(db sql.Database, querySlot string, queryBlockRoot string) (int, string, string, string, string) { func queryDbSignedBeaconBlock(db sql.Database, querySlot string, queryBlockRoot string) (int, string, string, string, string) {
sqlStatement := `SELECT slot, block_root, parent_block_root, eth1_block_hash, mh_key FROM ethcl.signed_beacon_block WHERE slot=$1 AND block_root=$2;` sqlStatement := `SELECT slot, block_root, parent_block_root, eth1_block_hash, mh_key FROM eth_beacon.signed_block WHERE slot=$1 AND block_root=$2;`
var slot int var slot int
var blockRoot, parent_block_root, eth1_block_hash, mh_key string var blockRoot, parent_block_root, eth1_block_hash, mh_key string
row := db.QueryRow(context.Background(), sqlStatement, querySlot, queryBlockRoot) row := db.QueryRow(context.Background(), sqlStatement, querySlot, queryBlockRoot)
@ -537,9 +573,9 @@ func queryDbSignedBeaconBlock(db sql.Database, querySlot string, queryBlockRoot
return slot, blockRoot, parent_block_root, eth1_block_hash, mh_key return slot, blockRoot, parent_block_root, eth1_block_hash, mh_key
} }
// A helper function to query the ethcl.signed_beacon_block table based on the slot and block_root. // A helper function to query the eth_beacon.signed_block table based on the slot and block_root.
func queryDbBeaconState(db sql.Database, querySlot string, queryStateRoot string) (int, string, string) { func queryDbBeaconState(db sql.Database, querySlot string, queryStateRoot string) (int, string, string) {
sqlStatement := `SELECT slot, state_root, mh_key FROM ethcl.beacon_state WHERE slot=$1 AND state_root=$2;` sqlStatement := `SELECT slot, state_root, mh_key FROM eth_beacon.state WHERE slot=$1 AND state_root=$2;`
var slot int var slot int
var stateRoot, mh_key string var stateRoot, mh_key string
row := db.QueryRow(context.Background(), sqlStatement, querySlot, queryStateRoot) row := db.QueryRow(context.Background(), sqlStatement, querySlot, queryStateRoot)
@ -551,7 +587,7 @@ func queryDbBeaconState(db sql.Database, querySlot string, queryStateRoot string
// Count the entries in the knownGaps table. // Count the entries in the knownGaps table.
func countKnownGapsTable(db sql.Database) int { func countKnownGapsTable(db sql.Database) int {
var count int var count int
sqlStatement := "SELECT COUNT(*) FROM ethcl.known_gaps" sqlStatement := "SELECT COUNT(*) FROM eth_beacon.known_gaps"
err := db.QueryRow(context.Background(), sqlStatement).Scan(&count) err := db.QueryRow(context.Background(), sqlStatement).Scan(&count)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
return count return count
@ -559,25 +595,24 @@ func countKnownGapsTable(db sql.Database) int {
// Return the start and end slot // Return the start and end slot
func queryKnownGaps(db sql.Database, queryStartGap string, QueryEndGap string) (int, int) { func queryKnownGaps(db sql.Database, queryStartGap string, QueryEndGap string) (int, int) {
sqlStatement := `SELECT start_slot, end_slot FROM ethcl.known_gaps WHERE start_slot=$1 AND end_slot=$2;` sqlStatement := `SELECT start_slot, end_slot FROM eth_beacon.known_gaps WHERE start_slot=$1 AND end_slot=$2;`
var startGap, endGap int var startGap, endGap int
row := db.QueryRow(context.Background(), sqlStatement, queryStartGap, QueryEndGap) row := db.QueryRow(context.Background(), sqlStatement, queryStartGap, QueryEndGap)
err := row.Scan(&startGap, &endGap) err := row.Scan(&startGap, &endGap)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
return startGap, endGap return startGap, endGap
} }
// A function that will remove all entries from the ethcl tables for you. // A function that will remove all entries from the eth_beacon tables for you.
func clearEthclDbTables(db sql.Database) { func clearEthBeaconDbTables(db sql.Database) {
deleteQueries := []string{"DELETE FROM ethcl.slots;", "DELETE FROM ethcl.signed_beacon_block;", "DELETE FROM ethcl.beacon_state;", "DELETE FROM ethcl.known_gaps;", "DELETE FROM ethcl.historic_process;"} deleteQueries := []string{"DELETE FROM eth_beacon.slots;", "DELETE FROM eth_beacon.signed_block;", "DELETE FROM eth_beacon.state;", "DELETE FROM eth_beacon.known_gaps;", "DELETE FROM eth_beacon.historic_process;", "DELETE FROM public.blocks;"}
for _, queries := range deleteQueries { for _, queries := range deleteQueries {
_, err := db.Exec(context.Background(), queries) _, err := db.Exec(context.Background(), queries)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
} }
} }
// Write an entry to the ethcl.slots table with just a slot number // Write an entry to the eth_beacon.slots table with just a slot number
func writeSlot(db sql.Database, slot string) { func writeSlot(db sql.Database, slot string) {
_, err := db.Exec(context.Background(), beaconclient.UpsertSlotsStmt, "0", slot, "", "", "") _, err := db.Exec(context.Background(), beaconclient.UpsertSlotsStmt, "0", slot, "", "", "")
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -653,8 +688,7 @@ func (tbc TestBeaconNode) SetupBeaconNodeMock(TestEvents map[string]Message, pro
id := httpmock.MustGetSubmatch(req, 1) id := httpmock.MustGetSubmatch(req, 1)
dat, err := tbc.provideSsz(id, "state", dummyParentRoot) dat, err := tbc.provideSsz(id, "state", dummyParentRoot)
if err != nil { if err != nil {
Expect(err).NotTo(HaveOccurred()) return httpmock.NewStringResponse(404, fmt.Sprintf("Unable to find file for %s", id)), nil
return httpmock.NewStringResponse(404, fmt.Sprintf("Unable to find file for %s", id)), err
} }
return httpmock.NewBytesResponse(200, dat), nil return httpmock.NewBytesResponse(200, dat), nil
}, },
@ -667,8 +701,7 @@ func (tbc TestBeaconNode) SetupBeaconNodeMock(TestEvents map[string]Message, pro
id := httpmock.MustGetSubmatch(req, 1) id := httpmock.MustGetSubmatch(req, 1)
dat, err := tbc.provideSsz(id, "block", dummyParentRoot) dat, err := tbc.provideSsz(id, "block", dummyParentRoot)
if err != nil { if err != nil {
Expect(err).NotTo(HaveOccurred()) return httpmock.NewStringResponse(404, fmt.Sprintf("Unable to find file for %s", id)), nil
return httpmock.NewStringResponse(404, fmt.Sprintf("Unable to find file for %s", id)), err
} }
return httpmock.NewBytesResponse(200, dat), nil return httpmock.NewBytesResponse(200, dat), nil
}, },
@ -691,7 +724,6 @@ func (tbc TestBeaconNode) SetupBeaconNodeMock(TestEvents map[string]Message, pro
// Provide the Block root // Provide the Block root
func (tbc TestBeaconNode) provideBlockRoot(slot string) ([]byte, error) { func (tbc TestBeaconNode) provideBlockRoot(slot string) ([]byte, error) {
for _, val := range tbc.TestEvents { for _, val := range tbc.TestEvents {
if val.HeadMessage.Slot == slot && val.MimicConfig == nil { if val.HeadMessage.Slot == slot && val.MimicConfig == nil {
block, err := hex.DecodeString(val.HeadMessage.Block[2:]) block, err := hex.DecodeString(val.HeadMessage.Block[2:])

View File

@ -18,29 +18,30 @@
package beaconclient package beaconclient
import ( import (
"context"
"fmt" "fmt"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
// This function will perform all the heavy lifting for tracking the head of the chain. // This function will perform all the heavy lifting for tracking the head of the chain.
func (bc *BeaconClient) CaptureHistoric(maxWorkers int) []error { func (bc *BeaconClient) CaptureHistoric(ctx context.Context, maxWorkers int) []error {
log.Info("We are starting the historical processing service.") log.Info("We are starting the historical processing service.")
bc.HistoricalProcess = historicProcessing{db: bc.Db, metrics: bc.Metrics} bc.HistoricalProcess = HistoricProcessing{db: bc.Db, metrics: bc.Metrics, uniqueNodeIdentifier: bc.UniqueNodeIdentifier}
errs := handleBatchProcess(maxWorkers, bc.HistoricalProcess, bc.HistoricalProcess.finishProcessing, bc.HistoricalProcess.db, bc.ServerEndpoint, bc.Metrics, bc.CheckDb) errs := handleBatchProcess(ctx, maxWorkers, bc.HistoricalProcess, bc.HistoricalProcess.db, bc.ServerEndpoint, bc.Metrics, bc.CheckDb)
log.Debug("Exiting Historical") log.Debug("Exiting Historical")
return errs return errs
} }
// This function will perform all the necessary clean up tasks for stopping historical processing. // This function will perform all the necessary clean up tasks for stopping historical processing.
func (bc *BeaconClient) StopHistoric() error { func (bc *BeaconClient) StopHistoric(cancel context.CancelFunc) error {
log.Info("We are stopping the historical processing service.") log.Info("We are stopping the historical processing service.")
err := bc.HistoricalProcess.releaseDbLocks() err := bc.HistoricalProcess.releaseDbLocks(cancel)
if err != nil { if err != nil {
loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the ethcl.historic_processing table. Manual Intervention is needed!") loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the eth_beacon.historic_processing table. Manual Intervention is needed!")
} }
return nil return nil
} }
@ -51,10 +52,10 @@ func (bc *BeaconClient) StopHistoric() error {
// //
// 2. Known Gaps Processing // 2. Known Gaps Processing
type BatchProcessing interface { type BatchProcessing interface {
getSlotRange(chan<- slotsToProcess) []error // Write the slots to process in a channel, return an error if you cant get the next slots to write. getSlotRange(context.Context, chan<- slotsToProcess) []error // Write the slots to process in a channel, return an error if you cant get the next slots to write.
handleProcessingErrors(<-chan batchHistoricError) // Custom logic to handle errors. handleProcessingErrors(context.Context, <-chan batchHistoricError) // Custom logic to handle errors.
removeTableEntry(<-chan slotsToProcess) error // With the provided start and end slot, remove the entry from the database. removeTableEntry(context.Context, <-chan slotsToProcess) error // With the provided start and end slot, remove the entry from the database.
releaseDbLocks() error // Update the checked_out column to false for whatever table is being updated. releaseDbLocks(context.CancelFunc) error // Update the checked_out column to false for whatever table is being updated.
} }
/// ^^^ /// ^^^
@ -89,7 +90,7 @@ type batchHistoricError struct {
// 4. Remove the slot entry from the DB. // 4. Remove the slot entry from the DB.
// //
// 5. Handle any errors. // 5. Handle any errors.
func handleBatchProcess(maxWorkers int, bp BatchProcessing, finishCh chan int, db sql.Database, serverEndpoint string, metrics *BeaconClientMetrics, checkDb bool) []error { func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing, db sql.Database, serverEndpoint string, metrics *BeaconClientMetrics, checkDb bool) []error {
slotsCh := make(chan slotsToProcess) slotsCh := make(chan slotsToProcess)
workCh := make(chan int) workCh := make(chan int)
processedCh := make(chan slotsToProcess) processedCh := make(chan slotsToProcess)
@ -99,30 +100,40 @@ func handleBatchProcess(maxWorkers int, bp BatchProcessing, finishCh chan int, d
// Start workers // Start workers
for w := 1; w <= maxWorkers; w++ { for w := 1; w <= maxWorkers; w++ {
log.WithFields(log.Fields{"maxWorkers": maxWorkers}).Debug("Starting batch processing workers") log.WithFields(log.Fields{"maxWorkers": maxWorkers}).Debug("Starting batch processing workers")
go processSlotRangeWorker(workCh, errCh, db, serverEndpoint, metrics, checkDb) go processSlotRangeWorker(ctx, workCh, errCh, db, serverEndpoint, metrics, checkDb)
} }
// Process all ranges and send each individual slot to the worker. // Process all ranges and send each individual slot to the worker.
go func() { go func() {
for slots := range slotsCh { for {
if slots.startSlot > slots.endSlot { select {
log.Error("We received a batch process request where the startSlot is greater than the end slot.") case <-ctx.Done():
errCh <- batchHistoricError{ return
err: fmt.Errorf("We received a startSlot where the start was greater than the end."), case slots := <-slotsCh:
errProcess: "RangeOrder", if slots.startSlot > slots.endSlot {
slot: slots.startSlot, log.Error("We received a batch process request where the startSlot is greater than the end slot.")
errCh <- batchHistoricError{
err: fmt.Errorf("We received a startSlot where the start was greater than the end."),
errProcess: "RangeOrder",
slot: slots.startSlot,
}
errCh <- batchHistoricError{
err: fmt.Errorf("We received a endSlot where the start was greater than the end."),
errProcess: "RangeOrder",
slot: slots.endSlot,
}
} else if slots.startSlot == slots.endSlot {
log.WithField("slot", slots.startSlot).Debug("Added new slot to workCh")
workCh <- slots.startSlot
} else {
for i := slots.startSlot; i <= slots.endSlot; i++ {
workCh <- i
log.WithField("slot", i).Debug("Added new slot to workCh")
}
processedCh <- slots
} }
errCh <- batchHistoricError{
err: fmt.Errorf("We received a endSlot where the start was greater than the end."),
errProcess: "RangeOrder",
slot: slots.endSlot,
}
} else {
for i := slots.startSlot; i <= slots.endSlot; i++ {
workCh <- i
}
processedCh <- slots
} }
} }
}() }()
@ -130,26 +141,27 @@ func handleBatchProcess(maxWorkers int, bp BatchProcessing, finishCh chan int, d
go func() { go func() {
errG := new(errgroup.Group) errG := new(errgroup.Group)
errG.Go(func() error { errG.Go(func() error {
return bp.removeTableEntry(processedCh) return bp.removeTableEntry(ctx, processedCh)
}) })
if err := errG.Wait(); err != nil { if err := errG.Wait(); err != nil {
finalErrCh <- []error{err} finalErrCh <- []error{err}
} }
}() }()
// Process errors from slot processing. // Process errors from slot processing.
go bp.handleProcessingErrors(errCh) go bp.handleProcessingErrors(ctx, errCh)
// Get slots from the DB. // Get slots from the DB.
go func() { go func() {
errs := bp.getSlotRange(slotsCh) // Periodically adds new entries.... errs := bp.getSlotRange(ctx, slotsCh) // Periodically adds new entries....
if errs != nil { if errs != nil {
finalErrCh <- errs finalErrCh <- errs
} }
finalErrCh <- nil finalErrCh <- nil
log.Debug("We are stopping the processing of adding new entries")
}() }()
log.Debug("Waiting for shutdown signal from channel") log.Debug("Waiting for shutdown signal from channel")
select { select {
case <-finishCh: case <-ctx.Done():
log.Debug("Received shutdown signal from channel") log.Debug("Received shutdown signal from channel")
return nil return nil
case errs := <-finalErrCh: case errs := <-finalErrCh:

View File

@ -10,61 +10,281 @@ import (
. "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
)
var (
kgCheckCheckedOutStmt = `SELECT * FROM eth_beacon.known_gaps WHERE checked_out=true `
hpCheckCheckedOutStmt = `SELECT * FROM eth_beacon.historic_process WHERE checked_out=true `
) )
var _ = Describe("Capturehistoric", func() { var _ = Describe("Capturehistoric", func() {
Describe("Run the application in historic mode", Label("unit", "behavioral"), func() { Describe("Run the application in historic mode", Label("unit", "behavioral", "historical"), func() {
Context("Phase0: When we need to process a single block in the ethcl.historic_process table.", func() { Context("Phase0 + Altairs: When we need to process a multiple blocks in a multiple entries in the eth_beacon.historic_process table.", Label("deb"), func() {
It("Successfully Process the Block", func() { It("Successfully Process the Blocks", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "99") bc := setUpTest(BeaconNodeTester.TestConfig, "99")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset() defer httpmock.DeactivateAndReset()
BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 101, 10) BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 101, 10)
BeaconNodeTester.runBatchProcess(bc, 2, 100, 101, 0, 0) BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0)
// Run Two seperate processes
BeaconNodeTester.writeEventToHistoricProcess(bc, 2375703, 2375703, 10)
BeaconNodeTester.runHistoricalProcess(bc, 2, 3, 0, 0, 0)
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
validateSlot(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, "proposed") validatePopularBatchBlocks(bc)
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, "0x629ae1587895043076500f4f5dcb202a47c2fc95d5b5c548cb83bc97bd2dbfe1", "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1", "/blocks/QHVAEQBQGQ4TKNJUGAYDGNZRGM2DOZJSGZTDMMLEG5QTIYTCMRQTKYRSGNTGCMDCGI2WINLGMM2DMNJRGYYGMMTBHEZGINJSME3DGYRZGE4WE") })
validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, "/blocks/QHVAEQRQPBTDEOBWMEYDGNZZMMYDGOBWMEZWGN3CMUZDQZBQGVSDQMRZMY4GKYRXMIZDQMDDMM4WKZDFGE2TINBZMFTDEMDFMJRWIMBWME3WCNJW") })
Context("When the start block is greater than the endBlock", func() {
It("Should Add two entries to the knownGaps table", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "99")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset()
BeaconNodeTester.writeEventToHistoricProcess(bc, 101, 100, 10)
BeaconNodeTester.runHistoricalProcess(bc, 2, 0, 0, 2, 0)
})
})
Context("Processing the Genesis block", Label("genesis"), func() {
It("Should Process properly", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "100")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset()
BeaconNodeTester.writeEventToHistoricProcess(bc, 0, 0, 10)
BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0)
validateSlot(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, 0, "proposed")
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, BeaconNodeTester.TestEvents["0"].CorrectParentRoot, BeaconNodeTester.TestEvents["0"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["0"].CorrectSignedBeaconBlockMhKey)
validateBeaconState(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, BeaconNodeTester.TestEvents["0"].CorrectBeaconStateMhKey)
})
})
Context("When there is a skipped slot", func() {
It("Should process the slot properly.", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "3797055")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset()
BeaconNodeTester.writeEventToHistoricProcess(bc, 3797056, 3797056, 10)
BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0)
validateSlot(bc, BeaconNodeTester.TestEvents["3797056"].HeadMessage, 118658, "skipped")
})
})
})
Describe("Running the Application to process Known Gaps", Label("unit", "behavioral", "knownGaps"), func() {
Context("Phase0 + Altairs: When we need to process a multiple blocks in a multiple entries in the eth_beacon.known_gaps table.", func() {
It("Successfully Process the Blocks", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "99")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset()
BeaconNodeTester.writeEventToKnownGaps(bc, 100, 101)
BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 0, 0)
// Run Two seperate processes
BeaconNodeTester.writeEventToKnownGaps(bc, 2375703, 2375703)
BeaconNodeTester.runKnownGapsProcess(bc, 2, 3, 0, 0, 0)
//validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, "0x629ae1587895043076500f4f5dcb202a47c2fc95d5b5c548cb83bc97bd2dbfe1", "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1", "/blocks/QHVAEQBQGQ4TKNJUGAYDGNZRGM2DOZJSGZTDMMLEG5QTIYTCMRQTKYRSGNTGCMDCGI2WINLGMM2DMNJRGYYGMMTBHEZGINJSME3DGYRZGE4WE") time.Sleep(2 * time.Second)
//validateBeaconState(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, "/blocks/QHVAEQRQPBTDEOBWMEYDGNZZMMYDGOBWMEZWGN3CMUZDQZBQGVSDQMRZMY4GKYRXMIZDQMDDMM4WKZDFGE2TINBZMFTDEMDFMJRWIMBWME3WCNJW") validatePopularBatchBlocks(bc)
})
})
Context("When the start block is greater than the endBlock", func() {
It("Should Add two entries to the knownGaps table", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "104")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset()
BeaconNodeTester.writeEventToKnownGaps(bc, 101, 100)
BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 2, 0)
})
})
Context("When theres a reprocessing error", Label("reprocessingError"), func() {
It("Should update the reprocessing error.", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "99")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset()
// We dont have an entry in the BeaconNodeTester for this slot
BeaconNodeTester.writeEventToHistoricProcess(bc, 105, 105, 10)
BeaconNodeTester.runHistoricalProcess(bc, 2, 0, 0, 1, 0)
BeaconNodeTester.runKnownGapsProcess(bc, 2, 0, 0, 1, 1)
})
})
})
Describe("Running the application in Historic, Head, and KnownGaps mode", Label("unit", "historical", "full"), func() {
Context("When it recieves a head, historic and known Gaps message (in order)", func() {
It("Should process them all successfully.", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "2375702")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset()
// Head
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
// Historical
BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10)
BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0)
// Known Gaps
BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101)
BeaconNodeTester.runKnownGapsProcess(bc, 2, 3, 0, 0, 0)
time.Sleep(2 * time.Second)
validatePopularBatchBlocks(bc)
})
})
Context("When it recieves a historic, head and known Gaps message (in order)", func() {
It("Should process them all successfully.", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "2375702")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset()
// Historical
BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10)
BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0)
// Head
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
// Known Gaps
BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101)
BeaconNodeTester.runKnownGapsProcess(bc, 2, 3, 0, 0, 0)
time.Sleep(2 * time.Second)
validatePopularBatchBlocks(bc)
})
})
Context("When it recieves a known Gaps, historic and head message (in order)", func() {
It("Should process them all successfully.", func() {
bc := setUpTest(BeaconNodeTester.TestConfig, "2375702")
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
defer httpmock.DeactivateAndReset()
// Known Gaps
BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101)
BeaconNodeTester.runKnownGapsProcess(bc, 2, 1, 0, 0, 0)
// Historical
BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10)
BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0)
// Head
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
time.Sleep(2 * time.Second)
validatePopularBatchBlocks(bc)
}) })
}) })
}) })
}) })
// This function will write an even to the ethcl.historic_process table // This function will write an even to the eth_beacon.known_gaps table
func (tbc TestBeaconNode) writeEventToHistoricProcess(bc *beaconclient.BeaconClient, startSlot, endSlot, priority int) { func (tbc TestBeaconNode) writeEventToKnownGaps(bc *beaconclient.BeaconClient, startSlot, endSlot int) {
log.Debug("We are writing the necessary events to batch process") log.Debug("We are writing the necessary events to batch process")
insertHistoricProcessingStmt := `INSERT INTO ethcl.historic_process (start_slot, end_slot, priority) insertKnownGapsStmt := `INSERT INTO eth_beacon.known_gaps (start_slot, end_slot)
VALUES ($1, $2, $3);` VALUES ($1, $2);`
res, err := bc.Db.Exec(context.Background(), insertHistoricProcessingStmt, startSlot, endSlot, priority) res, err := bc.Db.Exec(context.Background(), insertKnownGapsStmt, startSlot, endSlot)
Expect(err) Expect(err).ToNot(HaveOccurred())
rows, err := res.RowsAffected() rows, err := res.RowsAffected()
if rows != 1 { if rows != 1 {
Fail("We didnt write...") Fail("We didnt write...")
} }
Expect(err) Expect(err).ToNot(HaveOccurred())
} }
// Start the batch processing function, and check for the correct inserted slots. // This function will write an even to the eth_beacon.known_gaps table
func (tbc TestBeaconNode) runBatchProcess(bc *beaconclient.BeaconClient, maxWorkers int, startSlot uint64, endSlot uint64, expectedReorgs uint64, expectedKnownGaps uint64) { func (tbc TestBeaconNode) writeEventToHistoricProcess(bc *beaconclient.BeaconClient, startSlot, endSlot, priority int) {
go bc.CaptureHistoric(maxWorkers) log.Debug("We are writing the necessary events to batch process")
diff := endSlot - startSlot + 1 insertHistoricProcessingStmt := `INSERT INTO eth_beacon.historic_process (start_slot, end_slot, priority)
VALUES ($1, $2, $3);`
res, err := bc.Db.Exec(context.Background(), insertHistoricProcessingStmt, startSlot, endSlot, priority)
Expect(err).ToNot(HaveOccurred())
rows, err := res.RowsAffected()
if rows != 1 {
Fail("We didnt write...")
}
Expect(err).ToNot(HaveOccurred())
}
// Start the CaptureHistoric function, and check for the correct inserted slots.
func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
ctx, cancel := context.WithCancel(context.Background())
go bc.CaptureHistoric(ctx, maxWorkers)
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
log.Debug("Calling the stop function for historical processing..")
err := bc.StopHistoric(cancel)
time.Sleep(5 * time.Second)
Expect(err).ToNot(HaveOccurred())
validateAllRowsCheckedOut(bc.Db, hpCheckCheckedOutStmt)
}
// Wrapper function that processes knownGaps
func (tbc TestBeaconNode) runKnownGapsProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
ctx, cancel := context.WithCancel(context.Background())
go bc.ProcessKnownGaps(ctx, maxWorkers)
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
err := bc.StopKnownGapsProcessing(cancel)
time.Sleep(5 * time.Second)
Expect(err).ToNot(HaveOccurred())
validateAllRowsCheckedOut(bc.Db, kgCheckCheckedOutStmt)
}
func validateMetrics(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
curRetry := 0 curRetry := 0
for atomic.LoadUint64(&bc.Metrics.SlotInserts) != diff { value := atomic.LoadUint64(&bc.Metrics.SlotInserts)
for value != expectedInserts {
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
curRetry = curRetry + 1 curRetry = curRetry + 1
if curRetry == maxRetry { if curRetry == maxRetry {
Fail(fmt.Sprintf("Too many retries have occurred. The number of inserts expects %d, the number that actually occurred, %d", atomic.LoadUint64(&bc.Metrics.SlotInserts), diff)) Fail(fmt.Sprintf("Too many retries have occurred. The number of inserts expected %d, the number that actually occurred, %d", expectedInserts, atomic.LoadUint64(&bc.Metrics.SlotInserts)))
} }
value = atomic.LoadUint64(&bc.Metrics.SlotInserts)
}
curRetry = 0
value = atomic.LoadUint64(&bc.Metrics.KnownGapsInserts)
for value != expectedKnownGaps {
time.Sleep(1 * time.Second)
curRetry = curRetry + 1
if curRetry == maxRetry {
Fail(fmt.Sprintf("Too many retries have occurred. The number of knownGaps expected %d, the number that actually occurred, %d", expectedKnownGaps, atomic.LoadUint64(&bc.Metrics.KnownGapsInserts)))
}
value = atomic.LoadUint64(&bc.Metrics.KnownGapsInserts)
}
curRetry = 0
value = atomic.LoadUint64(&bc.Metrics.KnownGapsReprocessError)
for value != expectedKnownGapsReprocessError {
time.Sleep(1 * time.Second)
curRetry = curRetry + 1
if curRetry == maxRetry {
Fail(fmt.Sprintf("Too many retries have occurred. The number of knownGapsReprocessingErrors expected %d, the number that actually occurred, %d", expectedKnownGapsReprocessError, value))
}
log.Debug("&bc.Metrics.KnownGapsReprocessError: ", &bc.Metrics.KnownGapsReprocessError)
value = atomic.LoadUint64(&bc.Metrics.KnownGapsReprocessError)
}
curRetry = 0
value = atomic.LoadUint64(&bc.Metrics.ReorgInserts)
for value != expectedReorgs {
time.Sleep(1 * time.Second)
curRetry = curRetry + 1
if curRetry == maxRetry {
Fail(fmt.Sprintf("Too many retries have occurred. The number of Reorgs expected %d, the number that actually occurred, %d", expectedReorgs, atomic.LoadUint64(&bc.Metrics.ReorgInserts)))
}
value = atomic.LoadUint64(&bc.Metrics.ReorgInserts)
} }
}
Expect(atomic.LoadUint64(&bc.Metrics.KnownGapsInserts)).To(Equal(expectedKnownGaps))
Expect(atomic.LoadUint64(&bc.Metrics.ReorgInserts)).To(Equal(expectedKnownGaps)) // A wrapper function to validate a few popular blocks
func validatePopularBatchBlocks(bc *beaconclient.BeaconClient) {
validateSlot(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, "proposed")
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey)
validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey)
validateSlot(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, "proposed")
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, BeaconNodeTester.TestEvents["100"].HeadMessage.Block, BeaconNodeTester.TestEvents["101"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["101"].CorrectSignedBeaconBlockMhKey)
validateBeaconState(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, BeaconNodeTester.TestEvents["101"].CorrectBeaconStateMhKey)
validateSlot(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, "proposed")
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey)
validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey)
}
// Make sure all rows have checked_out as false.
func validateAllRowsCheckedOut(db sql.Database, checkStmt string) {
res, err := db.Exec(context.Background(), checkStmt)
Expect(err).ToNot(HaveOccurred())
rows, err := res.RowsAffected()
Expect(err).ToNot(HaveOccurred())
Expect(rows).To(Equal(int64(0)))
} }

View File

@ -25,7 +25,7 @@ import (
"sync/atomic" "sync/atomic"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
var ( var (

View File

@ -22,61 +22,61 @@ import (
"github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
var ( var (
// Statement to upsert to the ethcl.slots table. // Statement to upsert to the eth_beacon.slots table.
UpsertSlotsStmt string = ` UpsertSlotsStmt string = `
INSERT INTO ethcl.slots (epoch, slot, block_root, state_root, status) INSERT INTO eth_beacon.slots (epoch, slot, block_root, state_root, status)
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING` VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
// Statement to upsert to the ethcl.signed_beacon_blocks table. // Statement to upsert to the eth_beacon.signed_blocks table.
UpsertSignedBeaconBlockStmt string = ` UpsertSignedBeaconBlockStmt string = `
INSERT INTO ethcl.signed_beacon_block (slot, block_root, parent_block_root, eth1_block_hash, mh_key) INSERT INTO eth_beacon.signed_block (slot, block_root, parent_block_root, eth1_block_hash, mh_key)
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING` VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
// Statement to upsert to the ethcl.beacon_state table. // Statement to upsert to the eth_beacon.state table.
UpsertBeaconState string = ` UpsertBeaconState string = `
INSERT INTO ethcl.beacon_state (slot, state_root, mh_key) INSERT INTO eth_beacon.state (slot, state_root, mh_key)
VALUES ($1, $2, $3) ON CONFLICT (slot, state_root) DO NOTHING` VALUES ($1, $2, $3) ON CONFLICT (slot, state_root) DO NOTHING`
// Statement to upsert to the public.blocks table. // Statement to upsert to the public.blocks table.
UpsertBlocksStmt string = ` UpsertBlocksStmt string = `
INSERT INTO public.blocks (key, data) INSERT INTO public.blocks (key, data)
VALUES ($1, $2) ON CONFLICT (key) DO NOTHING` VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`
UpdateForkedStmt string = `UPDATE ethcl.slots UpdateForkedStmt string = `UPDATE eth_beacon.slots
SET status='forked' SET status='forked'
WHERE slot=$1 AND block_root<>$2 WHERE slot=$1 AND block_root<>$2
RETURNING block_root;` RETURNING block_root;`
UpdateProposedStmt string = `UPDATE ethcl.slots UpdateProposedStmt string = `UPDATE eth_beacon.slots
SET status='proposed' SET status='proposed'
WHERE slot=$1 AND block_root=$2 WHERE slot=$1 AND block_root=$2
RETURNING block_root;` RETURNING block_root;`
CheckProposedStmt string = `SELECT slot, block_root CheckProposedStmt string = `SELECT slot, block_root
FROM ethcl.slots FROM eth_beacon.slots
WHERE slot=$1 AND block_root=$2;` WHERE slot=$1 AND block_root=$2;`
// Check to see if the slot and block_root exist in ethcl.signed_beacon_block // Check to see if the slot and block_root exist in eth_beacon.signed_block
CheckSignedBeaconBlockStmt string = `SELECT slot, block_root CheckSignedBeaconBlockStmt string = `SELECT slot, block_root
FROM ethcl.signed_beacon_block FROM eth_beacon.signed_block
WHERE slot=$1 AND block_root=$2` WHERE slot=$1 AND block_root=$2`
// Check to see if the slot and state_root exist in ethcl.beacon_state // Check to see if the slot and state_root exist in eth_beacon.state
CheckBeaconStateStmt string = `SELECT slot, state_root CheckBeaconStateStmt string = `SELECT slot, state_root
FROM ethcl.beacon_state FROM eth_beacon.state
WHERE slot=$1 AND state_root=$2` WHERE slot=$1 AND state_root=$2`
// Used to get a single slot from the table if it exists // Used to get a single slot from the table if it exists
QueryBySlotStmt string = `SELECT slot QueryBySlotStmt string = `SELECT slot
FROM ethcl.slots FROM eth_beacon.slots
WHERE slot=$1` WHERE slot=$1`
// Statement to insert known_gaps. We don't pass in timestamp, we let the server take care of that one. // Statement to insert known_gaps. We don't pass in timestamp, we let the server take care of that one.
UpsertKnownGapsStmt string = ` UpsertKnownGapsStmt string = `
INSERT INTO ethcl.known_gaps (start_slot, end_slot, checked_out, reprocessing_error, entry_error, entry_process) INSERT INTO eth_beacon.known_gaps (start_slot, end_slot, checked_out, reprocessing_error, entry_error, entry_process)
VALUES ($1, $2, $3, $4, $5, $6) on CONFLICT (start_slot, end_slot) DO NOTHING` VALUES ($1, $2, $3, $4, $5, $6) on CONFLICT (start_slot, end_slot) DO NOTHING`
UpsertKnownGapsErrorStmt string = ` UpsertKnownGapsErrorStmt string = `
UPDATE ethcl.known_gaps UPDATE eth_beacon.known_gaps
SET reprocessing_error=$3 SET reprocessing_error=$3, priority=priority+1
WHERE start_slot=$1 AND end_slot=$2;` WHERE start_slot=$1 AND end_slot=$2;`
// Get the highest slot if one exists // Get the highest slot if one exists
QueryHighestSlotStmt string = "SELECT COALESCE(MAX(slot), 0) FROM ethcl.slots" QueryHighestSlotStmt string = "SELECT COALESCE(MAX(slot), 0) FROM eth_beacon.slots"
) )
// Put all functionality to prepare the write object // Put all functionality to prepare the write object
@ -123,7 +123,7 @@ func CreateDatabaseWrite(db sql.Database, slot int, stateRoot string, blockRoot
// Write functions to write each all together... // Write functions to write each all together...
// Should I do one atomic write? // Should I do one atomic write?
// Create the model for the ethcl.slots table // Create the model for the eth_beacon.slots table
func (dw *DatabaseWriter) prepareSlotsModel(slot int, stateRoot string, blockRoot string, status string) { func (dw *DatabaseWriter) prepareSlotsModel(slot int, stateRoot string, blockRoot string, status string) {
dw.DbSlots = &DbSlots{ dw.DbSlots = &DbSlots{
Epoch: calculateEpoch(slot, bcSlotsPerEpoch), Epoch: calculateEpoch(slot, bcSlotsPerEpoch),
@ -136,7 +136,7 @@ func (dw *DatabaseWriter) prepareSlotsModel(slot int, stateRoot string, blockRoo
} }
// Create the model for the ethcl.signed_beacon_block table. // Create the model for the eth_beacon.signed_block table.
func (dw *DatabaseWriter) prepareSignedBeaconBlockModel(slot int, blockRoot string, parentBlockRoot string, eth1BlockHash string) error { func (dw *DatabaseWriter) prepareSignedBeaconBlockModel(slot int, blockRoot string, parentBlockRoot string, eth1BlockHash string) error {
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.BlockRoot)) mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.BlockRoot))
if err != nil { if err != nil {
@ -153,7 +153,7 @@ func (dw *DatabaseWriter) prepareSignedBeaconBlockModel(slot int, blockRoot stri
return nil return nil
} }
// Create the model for the ethcl.beacon_state table. // Create the model for the eth_beacon.state table.
func (dw *DatabaseWriter) prepareBeaconStateModel(slot int, stateRoot string) error { func (dw *DatabaseWriter) prepareBeaconStateModel(slot int, stateRoot string) error {
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.StateRoot)) mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.StateRoot))
if err != nil { if err != nil {
@ -177,10 +177,10 @@ func (dw *DatabaseWriter) transactFullSlot() error {
}).Debug("Starting to write to the DB.") }).Debug("Starting to write to the DB.")
err := dw.transactSlots() err := dw.transactSlots()
if err != nil { if err != nil {
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the ethcl.slots table...") loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon.slots table...")
return err return err
} }
log.Debug("We finished writing to the ethcl.slots table.") log.Debug("We finished writing to the eth_beacon.slots table.")
if dw.DbSlots.Status != "skipped" { if dw.DbSlots.Status != "skipped" {
//errG, _ := errgroup.WithContext(context.Background()) //errG, _ := errgroup.WithContext(context.Background())
//errG.Go(func() error { //errG.Go(func() error {
@ -190,18 +190,18 @@ func (dw *DatabaseWriter) transactFullSlot() error {
// return dw.transactBeaconState() // return dw.transactBeaconState()
//}) //})
//if err := errG.Wait(); err != nil { //if err := errG.Wait(); err != nil {
// loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the ethcl block or state table...") // loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon block or state table...")
// return err // return err
//} //}
// Might want to seperate writing to public.blocks so we can do this concurrently... // Might want to seperate writing to public.blocks so we can do this concurrently...
err := dw.transactSignedBeaconBlocks() err := dw.transactSignedBeaconBlocks()
if err != nil { if err != nil {
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the ethcl block table...") loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon block table...")
return err return err
} }
err = dw.transactBeaconState() err = dw.transactBeaconState()
if err != nil { if err != nil {
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the ethcl state table...") loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon state table...")
return err return err
} }
} }
@ -209,24 +209,24 @@ func (dw *DatabaseWriter) transactFullSlot() error {
return nil return nil
} }
// Add data for the ethcl.slots table to a transaction. For now this is only one function. // Add data for the eth_beacon.slots table to a transaction. For now this is only one function.
// But in the future if we need to incorporate any FK's or perform any actions to write to the // But in the future if we need to incorporate any FK's or perform any actions to write to the
// slots table we can do it all here. // slots table we can do it all here.
func (dw *DatabaseWriter) transactSlots() error { func (dw *DatabaseWriter) transactSlots() error {
return dw.upsertSlots() return dw.upsertSlots()
} }
// Upsert to the ethcl.slots table. // Upsert to the eth_beacon.slots table.
func (dw *DatabaseWriter) upsertSlots() error { func (dw *DatabaseWriter) upsertSlots() error {
_, err := dw.Tx.Exec(dw.Ctx, UpsertSlotsStmt, dw.DbSlots.Epoch, dw.DbSlots.Slot, dw.DbSlots.BlockRoot, dw.DbSlots.StateRoot, dw.DbSlots.Status) _, err := dw.Tx.Exec(dw.Ctx, UpsertSlotsStmt, dw.DbSlots.Epoch, dw.DbSlots.Slot, dw.DbSlots.BlockRoot, dw.DbSlots.StateRoot, dw.DbSlots.Status)
if err != nil { if err != nil {
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the ethcl.slots table") loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the eth_beacon.slots table")
return err return err
} }
return nil return nil
} }
// Add the information for the signed_beacon_block to a transaction. // Add the information for the signed_block to a transaction.
func (dw *DatabaseWriter) transactSignedBeaconBlocks() error { func (dw *DatabaseWriter) transactSignedBeaconBlocks() error {
err := dw.upsertPublicBlocks(dw.DbSignedBeaconBlock.MhKey, dw.rawSignedBeaconBlock) err := dw.upsertPublicBlocks(dw.DbSignedBeaconBlock.MhKey, dw.rawSignedBeaconBlock)
if err != nil { if err != nil {
@ -249,17 +249,17 @@ func (dw *DatabaseWriter) upsertPublicBlocks(key string, data []byte) error {
return nil return nil
} }
// Upsert to the ethcl.signed_beacon_block table. // Upsert to the eth_beacon.signed_block table.
func (dw *DatabaseWriter) upsertSignedBeaconBlock() error { func (dw *DatabaseWriter) upsertSignedBeaconBlock() error {
_, err := dw.Tx.Exec(dw.Ctx, UpsertSignedBeaconBlockStmt, dw.DbSignedBeaconBlock.Slot, dw.DbSignedBeaconBlock.BlockRoot, dw.DbSignedBeaconBlock.ParentBlock, dw.DbSignedBeaconBlock.Eth1BlockHash, dw.DbSignedBeaconBlock.MhKey) _, err := dw.Tx.Exec(dw.Ctx, UpsertSignedBeaconBlockStmt, dw.DbSignedBeaconBlock.Slot, dw.DbSignedBeaconBlock.BlockRoot, dw.DbSignedBeaconBlock.ParentBlock, dw.DbSignedBeaconBlock.Eth1BlockHash, dw.DbSignedBeaconBlock.MhKey)
if err != nil { if err != nil {
loghelper.LogSlotError(dw.DbSlots.Slot, err).WithFields(log.Fields{"block_root": dw.DbSignedBeaconBlock.BlockRoot}).Error("Unable to write to the slot to the ethcl.signed_beacon_block table") loghelper.LogSlotError(dw.DbSlots.Slot, err).WithFields(log.Fields{"block_root": dw.DbSignedBeaconBlock.BlockRoot}).Error("Unable to write to the slot to the eth_beacon.signed_block table")
return err return err
} }
return nil return nil
} }
// Add the information for the beacon_state to a transaction. // Add the information for the state to a transaction.
func (dw *DatabaseWriter) transactBeaconState() error { func (dw *DatabaseWriter) transactBeaconState() error {
err := dw.upsertPublicBlocks(dw.DbBeaconState.MhKey, dw.rawBeaconState) err := dw.upsertPublicBlocks(dw.DbBeaconState.MhKey, dw.rawBeaconState)
if err != nil { if err != nil {
@ -272,11 +272,11 @@ func (dw *DatabaseWriter) transactBeaconState() error {
return nil return nil
} }
// Upsert to the ethcl.beacon_state table. // Upsert to the eth_beacon.state table.
func (dw *DatabaseWriter) upsertBeaconState() error { func (dw *DatabaseWriter) upsertBeaconState() error {
_, err := dw.Tx.Exec(dw.Ctx, UpsertBeaconState, dw.DbBeaconState.Slot, dw.DbBeaconState.StateRoot, dw.DbBeaconState.MhKey) _, err := dw.Tx.Exec(dw.Ctx, UpsertBeaconState, dw.DbBeaconState.Slot, dw.DbBeaconState.StateRoot, dw.DbBeaconState.MhKey)
if err != nil { if err != nil {
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the ethcl.beacon_state table") loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the eth_beacon.state table")
return err return err
} }
return nil return nil
@ -351,7 +351,7 @@ func writeReorgs(db sql.Database, slot string, latestBlockRoot string, metrics *
func updateForked(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot string) (int64, error) { func updateForked(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot string) (int64, error) {
res, err := tx.Exec(ctx, UpdateForkedStmt, slot, latestBlockRoot) res, err := tx.Exec(ctx, UpdateForkedStmt, slot, latestBlockRoot)
if err != nil { if err != nil {
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the ethcl.slots table with the forked slots") loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the eth_beacon.slots table with the forked slots")
return 0, err return 0, err
} }
count, err := res.RowsAffected() count, err := res.RowsAffected()
@ -366,7 +366,7 @@ func updateForked(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot s
func updateProposed(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot string) (int64, error) { func updateProposed(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot string) (int64, error) {
res, err := tx.Exec(ctx, UpdateProposedStmt, slot, latestBlockRoot) res, err := tx.Exec(ctx, UpdateProposedStmt, slot, latestBlockRoot)
if err != nil { if err != nil {
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the ethcl.slots table with the proposed slot.") loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the eth_beacon.slots table with the proposed slot.")
return 0, err return 0, err
} }
count, err := res.RowsAffected() count, err := res.RowsAffected()
@ -447,7 +447,7 @@ func writeKnownGaps(db sql.Database, tableIncrement int, startSlot int, endSlot
} }
} }
// A function to upsert a single entry to the ethcl.known_gaps table. // A function to upsert a single entry to the eth_beacon.known_gaps table.
func upsertKnownGaps(tx sql.Tx, ctx context.Context, knModel DbKnownGaps, metric *BeaconClientMetrics) { func upsertKnownGaps(tx sql.Tx, ctx context.Context, knModel DbKnownGaps, metric *BeaconClientMetrics) {
_, err := tx.Exec(ctx, UpsertKnownGapsStmt, knModel.StartSlot, knModel.EndSlot, _, err := tx.Exec(ctx, UpsertKnownGapsStmt, knModel.StartSlot, knModel.EndSlot,
knModel.CheckedOut, knModel.ReprocessingError, knModel.EntryError, knModel.EntryProcess) knModel.CheckedOut, knModel.ReprocessingError, knModel.EntryError, knModel.EntryProcess)
@ -456,12 +456,12 @@ func upsertKnownGaps(tx sql.Tx, ctx context.Context, knModel DbKnownGaps, metric
"err": err, "err": err,
"startSlot": knModel.StartSlot, "startSlot": knModel.StartSlot,
"endSlot": knModel.EndSlot, "endSlot": knModel.EndSlot,
}).Fatal("We are unable to write to the ethcl.known_gaps table!!! We will stop the application because of that.") }).Fatal("We are unable to write to the eth_beacon.known_gaps table!!! We will stop the application because of that.")
} }
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"startSlot": knModel.StartSlot, "startSlot": knModel.StartSlot,
"endSlot": knModel.EndSlot, "endSlot": knModel.EndSlot,
}).Warn("A new gap has been added to the ethcl.known_gaps table.") }).Warn("A new gap has been added to the eth_beacon.known_gaps table.")
metric.IncrementKnownGapsInserts(1) metric.IncrementKnownGapsInserts(1)
} }
@ -479,7 +479,18 @@ func writeStartUpGaps(db sql.Database, tableIncrement int, firstSlot int, metric
}).Fatal("Unable to get convert max block from DB to int. We must close the application or we might have undetected gaps.") }).Fatal("Unable to get convert max block from DB to int. We must close the application or we might have undetected gaps.")
} }
if maxSlot != firstSlot-1 { if maxSlot != firstSlot-1 {
writeKnownGaps(db, tableIncrement, maxSlot+1, firstSlot-1, fmt.Errorf(""), "startup", metric) if maxSlot < firstSlot-1 {
if maxSlot == 0 {
writeKnownGaps(db, tableIncrement, maxSlot, firstSlot-1, fmt.Errorf(""), "startup", metric)
} else {
writeKnownGaps(db, tableIncrement, maxSlot+1, firstSlot-1, fmt.Errorf(""), "startup", metric)
}
} else {
log.WithFields(log.Fields{
"maxSlot": maxSlot,
"firstSlot": firstSlot,
}).Warn("The maxSlot in the DB is greater than or equal to the first Slot we are processing.")
}
} }
} }
@ -488,23 +499,21 @@ func updateKnownGapErrors(db sql.Database, startSlot int, endSlot int, reprocess
res, err := db.Exec(context.Background(), UpsertKnownGapsErrorStmt, startSlot, endSlot, reprocessingErr.Error()) res, err := db.Exec(context.Background(), UpsertKnownGapsErrorStmt, startSlot, endSlot, reprocessingErr.Error())
if err != nil { if err != nil {
loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Error("Unable to update reprocessing_error") loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Error("Unable to update reprocessing_error")
metric.IncrementKnownGapsProcessingError(1)
return err return err
} }
row, err := res.RowsAffected() row, err := res.RowsAffected()
if err != nil { if err != nil {
loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Error("Unable to count rows affected when trying to update reprocessing_error.") loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Error("Unable to count rows affected when trying to update reprocessing_error.")
metric.IncrementKnownGapsProcessingError(1)
return err return err
} }
if row != 1 { if row != 1 {
loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).WithFields(log.Fields{ loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).WithFields(log.Fields{
"rowCount": row, "rowCount": row,
}).Error("The rows affected by the upsert for reprocessing_error is not 1.") }).Error("The rows affected by the upsert for reprocessing_error is not 1.")
metric.IncrementKnownGapsProcessingError(1) metric.IncrementKnownGapsReprocessError(1)
return err return err
} }
metric.IncrementKnownGapsProcessed(1) metric.IncrementKnownGapsReprocessError(1)
return nil return nil
} }
@ -530,9 +539,9 @@ func isSlotProcessed(db sql.Database, checkProcessStmt string, slot string) (boo
return false, nil return false, nil
} }
// Check to see if this slot is in the DB. Check ethcl.slots, ethcl.signed_beacon_block // Check to see if this slot is in the DB. Check eth_beacon.slots, eth_beacon.signed_block
// and ethcl.beacon_state. If the slot exists, return true // and eth_beacon.state. If the slot exists, return true
func IsSlotInDb(db sql.Database, slot string, blockRoot string, stateRoot string) (bool, error) { func IsSlotInDb(ctx context.Context, db sql.Database, slot string, blockRoot string, stateRoot string) (bool, error) {
var ( var (
isInBeaconState bool isInBeaconState bool
isInSignedBeaconBlock bool isInSignedBeaconBlock bool
@ -540,18 +549,28 @@ func IsSlotInDb(db sql.Database, slot string, blockRoot string, stateRoot string
) )
errG, _ := errgroup.WithContext(context.Background()) errG, _ := errgroup.WithContext(context.Background())
errG.Go(func() error { errG.Go(func() error {
isInBeaconState, err = checkSlotAndRoot(db, CheckBeaconStateStmt, slot, stateRoot) select {
if err != nil { case <-ctx.Done():
loghelper.LogError(err).Error("Unable to check if the slot and stateroot exist in ethcl.beacon_state") return nil
default:
isInBeaconState, err = checkSlotAndRoot(db, CheckBeaconStateStmt, slot, stateRoot)
if err != nil {
loghelper.LogError(err).Error("Unable to check if the slot and stateroot exist in eth_beacon.state")
}
return err
} }
return err
}) })
errG.Go(func() error { errG.Go(func() error {
isInSignedBeaconBlock, err = checkSlotAndRoot(db, CheckSignedBeaconBlockStmt, slot, blockRoot) select {
if err != nil { case <-ctx.Done():
loghelper.LogError(err).Error("Unable to check if the slot and block_root exist in ethcl.signed_beacon_block") return nil
default:
isInSignedBeaconBlock, err = checkSlotAndRoot(db, CheckSignedBeaconBlockStmt, slot, blockRoot)
if err != nil {
loghelper.LogError(err).Error("Unable to check if the slot and block_root exist in eth_beacon.signed_block")
}
return err
} }
return err
}) })
if err := errG.Wait(); err != nil { if err := errG.Wait(); err != nil {
return false, err return false, err

View File

@ -20,7 +20,7 @@ import (
"net/http" "net/http"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
// This function will ensure that we can connect to the beacon client. // This function will ensure that we can connect to the beacon client.

View File

@ -20,7 +20,7 @@ import (
. "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
beaconclient "github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient" beaconclient "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
) )
var _ = Describe("Healthcheck", func() { var _ = Describe("Healthcheck", func() {

View File

@ -22,7 +22,7 @@ import (
"time" "time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )

View File

@ -19,20 +19,20 @@ import (
"sync/atomic" "sync/atomic"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
//Create a metric struct and register each channel with prometheus //Create a metric struct and register each channel with prometheus
func CreateBeaconClientMetrics() (*BeaconClientMetrics, error) { func CreateBeaconClientMetrics() (*BeaconClientMetrics, error) {
metrics := &BeaconClientMetrics{ metrics := &BeaconClientMetrics{
SlotInserts: 0, SlotInserts: 0,
ReorgInserts: 0, ReorgInserts: 0,
KnownGapsInserts: 0, KnownGapsInserts: 0,
knownGapsProcessed: 0, KnownGapsProcessed: 0,
KnownGapsProcessingError: 0, KnownGapsReprocessError: 0,
HeadError: 0, HeadError: 0,
HeadReorgError: 0, HeadReorgError: 0,
} }
err := prometheusRegisterHelper("slot_inserts", "Keeps track of the number of slots we have inserted.", &metrics.SlotInserts) err := prometheusRegisterHelper("slot_inserts", "Keeps track of the number of slots we have inserted.", &metrics.SlotInserts)
if err != nil { if err != nil {
@ -46,11 +46,11 @@ func CreateBeaconClientMetrics() (*BeaconClientMetrics, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = prometheusRegisterHelper("known_gaps_processed", "Keeps track of the number of known gaps we processed.", &metrics.knownGapsProcessed) err = prometheusRegisterHelper("known_gaps_reprocess_error", "Keeps track of the number of known gaps that had errors when reprocessing, but the error was updated successfully.", &metrics.KnownGapsReprocessError)
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = prometheusRegisterHelper("known_gaps_processing_error", "Keeps track of the number of known gaps we had errors processing.", &metrics.KnownGapsProcessingError) err = prometheusRegisterHelper("known_gaps_processed", "Keeps track of the number of known gaps we successfully processed.", &metrics.KnownGapsProcessed)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -86,19 +86,19 @@ func prometheusRegisterHelper(name string, help string, varPointer *uint64) erro
// A structure utilized for keeping track of various metrics. Currently, mostly used in testing. // A structure utilized for keeping track of various metrics. Currently, mostly used in testing.
type BeaconClientMetrics struct { type BeaconClientMetrics struct {
SlotInserts uint64 // Number of head events we successfully wrote to the DB. SlotInserts uint64 // Number of head events we successfully wrote to the DB.
ReorgInserts uint64 // Number of reorg events we successfully wrote to the DB. ReorgInserts uint64 // Number of reorg events we successfully wrote to the DB.
KnownGapsInserts uint64 // Number of known_gaps we successfully wrote to the DB. KnownGapsInserts uint64 // Number of known_gaps we successfully wrote to the DB.
knownGapsProcessed uint64 // Number of knownGaps processed. KnownGapsProcessed uint64 // Number of knownGaps processed.
KnownGapsProcessingError uint64 // Number of errors that occurred while processing a knownGap KnownGapsReprocessError uint64 // Number of knownGaps that were updated with an error.
HeadError uint64 // Number of errors that occurred when decoding the head message. HeadError uint64 // Number of errors that occurred when decoding the head message.
HeadReorgError uint64 // Number of errors that occurred when decoding the reorg message. HeadReorgError uint64 // Number of errors that occurred when decoding the reorg message.
} }
// Wrapper function to increment inserts. If we want to use mutexes later we can easily update all // Wrapper function to increment inserts. If we want to use mutexes later we can easily update all
// occurrences here. // occurrences here.
func (m *BeaconClientMetrics) IncrementSlotInserts(inc uint64) { func (m *BeaconClientMetrics) IncrementSlotInserts(inc uint64) {
logrus.Debug("Incrementing Slot Insert") log.Debug("Incrementing Slot Insert")
atomic.AddUint64(&m.SlotInserts, inc) atomic.AddUint64(&m.SlotInserts, inc)
} }
@ -117,13 +117,7 @@ func (m *BeaconClientMetrics) IncrementKnownGapsInserts(inc uint64) {
// Wrapper function to increment known gaps processed. If we want to use mutexes later we can easily update all // Wrapper function to increment known gaps processed. If we want to use mutexes later we can easily update all
// occurrences here. // occurrences here.
func (m *BeaconClientMetrics) IncrementKnownGapsProcessed(inc uint64) { func (m *BeaconClientMetrics) IncrementKnownGapsProcessed(inc uint64) {
atomic.AddUint64(&m.knownGapsProcessed, inc) atomic.AddUint64(&m.KnownGapsProcessed, inc)
}
// Wrapper function to increment known gaps processing error. If we want to use mutexes later we can easily update all
// occurrences here.
func (m *BeaconClientMetrics) IncrementKnownGapsProcessingError(inc uint64) {
atomic.AddUint64(&m.KnownGapsProcessingError, inc)
} }
// Wrapper function to increment head errors. If we want to use mutexes later we can easily update all // Wrapper function to increment head errors. If we want to use mutexes later we can easily update all
@ -137,3 +131,10 @@ func (m *BeaconClientMetrics) IncrementHeadError(inc uint64) {
func (m *BeaconClientMetrics) IncrementReorgError(inc uint64) { func (m *BeaconClientMetrics) IncrementReorgError(inc uint64) {
atomic.AddUint64(&m.HeadReorgError, inc) atomic.AddUint64(&m.HeadReorgError, inc)
} }
// Wrapper function to increment the number of knownGaps that were updated with reprocessing errors.
//If we want to use mutexes later we can easily update all occurrences here.
func (m *BeaconClientMetrics) IncrementKnownGapsReprocessError(inc uint64) {
log.Debug("Incrementing Known Gap Reprocessing: ", &m.KnownGapsReprocessError)
atomic.AddUint64(&m.KnownGapsReprocessError, inc)
}

View File

@ -51,7 +51,7 @@ type ChainReorg struct {
ExecutionOptimistic bool `json:"execution_optimistic"` ExecutionOptimistic bool `json:"execution_optimistic"`
} }
// A struct to capture whats being written to the ethcl.slots table. // A struct to capture whats being written to the eth-beacon.slots table.
type DbSlots struct { type DbSlots struct {
Epoch string // The epoch. Epoch string // The epoch.
Slot string // The slot. Slot string // The slot.
@ -60,7 +60,7 @@ type DbSlots struct {
Status string // The status, it can be proposed | forked | skipped. Status string // The status, it can be proposed | forked | skipped.
} }
// A struct to capture whats being written to ethcl.signed_beacon_block table. // A struct to capture whats being written to eth-beacon.signed_block table.
type DbSignedBeaconBlock struct { type DbSignedBeaconBlock struct {
Slot string // The slot. Slot string // The slot.
BlockRoot string // The block root BlockRoot string // The block root
@ -70,14 +70,14 @@ type DbSignedBeaconBlock struct {
} }
// A struct to capture whats being written to ethcl.beacon_state table. // A struct to capture whats being written to eth-beacon.state table.
type DbBeaconState struct { type DbBeaconState struct {
Slot string // The slot. Slot string // The slot.
StateRoot string // The state root StateRoot string // The state root
MhKey string // The ipld multihash key. MhKey string // The ipld multihash key.
} }
// A structure to capture whats being written to the ethcl.known_gaps table. // A structure to capture whats being written to the eth-beacon.known_gaps table.
type DbKnownGaps struct { type DbKnownGaps struct {
StartSlot string // The start slot for known_gaps, inclusive. StartSlot string // The start slot for known_gaps, inclusive.
EndSlot string // The end slot for known_gaps, inclusive. EndSlot string // The end slot for known_gaps, inclusive.

View File

@ -20,7 +20,7 @@ import (
dshelp "github.com/ipfs/go-ipfs-ds-help" dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/multiformats/go-multihash" "github.com/multiformats/go-multihash"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
const SSZ_SHA2_256_PREFIX uint64 = 0xb502 const SSZ_SHA2_256_PREFIX uint64 = 0xb502

View File

@ -53,13 +53,19 @@ func (bc *BeaconClient) handleHead() {
if errorSlots != 0 && bc.PreviousSlot != 0 { if errorSlots != 0 && bc.PreviousSlot != 0 {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"lastProcessedSlot": bc.PreviousSlot, "lastProcessedSlot": bc.PreviousSlot,
"errorMessages": errorSlots, "errorSlots": errorSlots,
}).Warn("We added slots to the knownGaps table because we got bad head messages.") }).Warn("We added slots to the knownGaps table because we got bad head messages.")
writeKnownGaps(bc.Db, bc.KnownGapTableIncrement, bc.PreviousSlot, bcSlotsPerEpoch+errorSlots, fmt.Errorf("Bad Head Messages"), "headProcessing", bc.Metrics) writeKnownGaps(bc.Db, bc.KnownGapTableIncrement, bc.PreviousSlot+1, slot, fmt.Errorf("Bad Head Messages"), "headProcessing", bc.Metrics)
errorSlots = 0
} }
log.WithFields(log.Fields{"head": head}).Debug("We are going to start processing the slot.") log.WithFields(log.Fields{"head": head}).Debug("We are going to start processing the slot.")
// Not used anywhere yet but might be useful to have.
if bc.PreviousSlot == 0 && bc.PreviousBlockRoot == "" {
bc.StartingSlot = slot
}
go processHeadSlot(bc.Db, bc.ServerEndpoint, slot, head.Block, head.State, bc.PreviousSlot, bc.PreviousBlockRoot, bc.Metrics, bc.KnownGapTableIncrement, bc.CheckDb) go processHeadSlot(bc.Db, bc.ServerEndpoint, slot, head.Block, head.State, bc.PreviousSlot, bc.PreviousBlockRoot, bc.Metrics, bc.KnownGapTableIncrement, bc.CheckDb)
log.WithFields(log.Fields{"head": head.Slot}).Debug("We finished calling processHeadSlot.") log.WithFields(log.Fields{"head": head.Slot}).Debug("We finished calling processHeadSlot.")

View File

@ -26,85 +26,96 @@ import (
"github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
var ( var (
// Get a single highest priority and non-checked out row row from ethcl.historical_process // Get a single highest priority and non-checked out row row from eth_beacon.historical_process
getHpEntryStmt string = `SELECT start_slot, end_slot FROM ethcl.historic_process getHpEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.historic_process
WHERE checked_out=false WHERE checked_out=false
ORDER BY priority ASC ORDER BY priority ASC
LIMIT 1;` LIMIT 1;`
// Used to periodically check to see if there is a new entry in the ethcl.historic_process table. // Used to periodically check to see if there is a new entry in the eth_beacon.historic_process table.
checkHpEntryStmt string = `SELECT * FROM ethcl.historic_process WHERE checked_out=false;` checkHpEntryStmt string = `SELECT * FROM eth_beacon.historic_process WHERE checked_out=false;`
// Used to checkout a row from the ethcl.historic_process table // Used to checkout a row from the eth_beacon.historic_process table
lockHpEntryStmt string = `UPDATE ethcl.historic_process lockHpEntryStmt string = `UPDATE eth_beacon.historic_process
SET checked_out=true, checked_out_by=$3 SET checked_out=true, checked_out_by=$3
WHERE start_slot=$1 AND end_slot=$2;` WHERE start_slot=$1 AND end_slot=$2;`
// Used to delete an entry from the ethcl.historic_process table // Used to delete an entry from the eth_beacon.historic_process table
deleteHpEntryStmt string = `DELETE FROM ethcl.historic_process deleteHpEntryStmt string = `DELETE FROM eth_beacon.historic_process
WHERE start_slot=$1 AND end_slot=$2;` WHERE start_slot=$1 AND end_slot=$2;`
// Used to update every single row that this node has checked out. // Used to update every single row that this node has checked out.
releaseHpLockStmt string = `UPDATE ethcl.historic_process releaseHpLockStmt string = `UPDATE eth_beacon.historic_process
SET checked_out=false SET checked_out=false, checked_out_by=null
WHERE checked_out_by=$1` WHERE checked_out_by=$1`
) )
type historicProcessing struct { type HistoricProcessing struct {
db sql.Database //db connection db sql.Database //db connection
metrics *BeaconClientMetrics // metrics for beaconclient metrics *BeaconClientMetrics // metrics for beaconclient
uniqueNodeIdentifier int // node unique identifier. uniqueNodeIdentifier int // node unique identifier.
finishProcessing chan int // A channel which indicates to the process handleBatchProcess function that its time to end.
} }
// Get a single row of historical slots from the table. // Get a single row of historical slots from the table.
func (hp historicProcessing) getSlotRange(slotCh chan<- slotsToProcess) []error { func (hp HistoricProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess) []error {
return getBatchProcessRow(hp.db, getHpEntryStmt, checkHpEntryStmt, lockHpEntryStmt, slotCh, strconv.Itoa(hp.uniqueNodeIdentifier)) return getBatchProcessRow(ctx, hp.db, getHpEntryStmt, checkHpEntryStmt, lockHpEntryStmt, slotCh, strconv.Itoa(hp.uniqueNodeIdentifier))
} }
// Remove the table entry. // Remove the table entry.
func (hp historicProcessing) removeTableEntry(processCh <-chan slotsToProcess) error { func (hp HistoricProcessing) removeTableEntry(ctx context.Context, processCh <-chan slotsToProcess) error {
return removeRowPostProcess(hp.db, processCh, QueryBySlotStmt, deleteHpEntryStmt) return removeRowPostProcess(ctx, hp.db, processCh, QueryBySlotStmt, deleteHpEntryStmt)
} }
// Remove the table entry. // Remove the table entry.
func (hp historicProcessing) handleProcessingErrors(errMessages <-chan batchHistoricError) { func (hp HistoricProcessing) handleProcessingErrors(ctx context.Context, errMessages <-chan batchHistoricError) {
for { for {
errMs := <-errMessages select {
loghelper.LogSlotError(strconv.Itoa(errMs.slot), errMs.err) case <-ctx.Done():
writeKnownGaps(hp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, hp.metrics) return
case errMs := <-errMessages:
loghelper.LogSlotError(strconv.Itoa(errMs.slot), errMs.err)
writeKnownGaps(hp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, hp.metrics)
}
} }
} }
func (hp historicProcessing) releaseDbLocks() error { // "un"-checkout the rows held by this DB in the eth_beacon.historical_process table.
go func() { hp.finishProcessing <- 1 }() func (hp HistoricProcessing) releaseDbLocks(cancel context.CancelFunc) error {
log.Debug("Updating all the entries to ethcl.historical processing") cancel()
log.Debug("Updating all the entries to eth_beacon.historical processing")
log.Debug("Db: ", hp.db)
log.Debug("hp.uniqueNodeIdentifier ", hp.uniqueNodeIdentifier)
res, err := hp.db.Exec(context.Background(), releaseHpLockStmt, hp.uniqueNodeIdentifier) res, err := hp.db.Exec(context.Background(), releaseHpLockStmt, hp.uniqueNodeIdentifier)
if err != nil { if err != nil {
return fmt.Errorf("Unable to remove lock from ethcl.historical_processing table for node %d, error is %e", hp.uniqueNodeIdentifier, err) return fmt.Errorf("Unable to remove lock from eth_beacon.historical_processing table for node %d, error is %e", hp.uniqueNodeIdentifier, err)
} }
log.Debug("Update all the entries to ethcl.historical processing") log.Debug("Update all the entries to eth_beacon.historical processing")
rows, err := res.RowsAffected() rows, err := res.RowsAffected()
if err != nil { if err != nil {
return fmt.Errorf("Unable to calculated number of rows affected by releasing locks from ethcl.historical_processing table for node %d, error is %e", hp.uniqueNodeIdentifier, err) return fmt.Errorf("Unable to calculated number of rows affected by releasing locks from eth_beacon.historical_processing table for node %d, error is %e", hp.uniqueNodeIdentifier, err)
} }
log.WithField("rowCount", rows).Info("Released historicalProcess locks for specified rows.") log.WithField("rowCount", rows).Info("Released historicalProcess locks for specified rows.")
return nil return nil
} }
// Process the slot range. // Process the slot range.
func processSlotRangeWorker(workCh <-chan int, errCh chan<- batchHistoricError, db sql.Database, serverAddress string, metrics *BeaconClientMetrics, checkDb bool) { func processSlotRangeWorker(ctx context.Context, workCh <-chan int, errCh chan<- batchHistoricError, db sql.Database, serverAddress string, metrics *BeaconClientMetrics, checkDb bool) {
for slot := range workCh { for {
log.Debug("Handling slot: ", slot) select {
err, errProcess := handleHistoricSlot(db, serverAddress, slot, metrics, checkDb) case <-ctx.Done():
if err != nil { return
errMs := batchHistoricError{ case slot := <-workCh:
err: err, log.Debug("Handling slot: ", slot)
errProcess: errProcess, err, errProcess := handleHistoricSlot(ctx, db, serverAddress, slot, metrics, checkDb)
slot: slot, if err != nil {
errMs := batchHistoricError{
err: err,
errProcess: errProcess,
slot: slot,
}
errCh <- errMs
} }
errCh <- errMs
} }
} }
} }
@ -113,96 +124,102 @@ func processSlotRangeWorker(workCh <-chan int, errCh chan<- batchHistoricError,
// It also locks the row by updating the checked_out column. // It also locks the row by updating the checked_out column.
// The statement for getting the start_slot and end_slot must be provided. // The statement for getting the start_slot and end_slot must be provided.
// The statement for "locking" the row must also be provided. // The statement for "locking" the row must also be provided.
func getBatchProcessRow(db sql.Database, getStartEndSlotStmt string, checkNewRowsStmt string, checkOutRowStmt string, slotCh chan<- slotsToProcess, uniqueNodeIdentifier string) []error { func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStmt string, checkNewRowsStmt string, checkOutRowStmt string, slotCh chan<- slotsToProcess, uniqueNodeIdentifier string) []error {
errCount := make([]error, 0) errCount := make([]error, 0)
// 5 is an arbitrary number. It allows us to retry a few times before // 5 is an arbitrary number. It allows us to retry a few times before
// ending the application. // ending the application.
prevErrCount := 0 prevErrCount := 0
for len(errCount) < 5 { for len(errCount) < 5 {
if len(errCount) != prevErrCount { select {
log.WithFields(log.Fields{ case <-ctx.Done():
"errCount": errCount, return errCount
}).Error("New error entry added") default:
} if len(errCount) != prevErrCount {
processRow, err := db.Exec(context.Background(), checkNewRowsStmt) log.WithFields(log.Fields{
if err != nil { "errCount": errCount,
errCount = append(errCount, err) }).Error("New error entry added")
} }
row, err := processRow.RowsAffected() processRow, err := db.Exec(context.Background(), checkNewRowsStmt)
if err != nil { if err != nil {
errCount = append(errCount, err)
}
if row < 1 {
time.Sleep(1000 * time.Millisecond)
log.Debug("We are checking rows, be patient")
continue
}
log.Debug("We found a new row")
ctx := context.Background()
// Setup TX
tx, err := db.Begin(ctx)
if err != nil {
loghelper.LogError(err).Error("We are unable to Begin a SQL transaction")
errCount = append(errCount, err)
continue
}
defer func() {
err := tx.Rollback(ctx)
if err != nil && err != pgx.ErrTxClosed {
loghelper.LogError(err).Error("We were unable to Rollback a transaction")
errCount = append(errCount, err) errCount = append(errCount, err)
} }
}() row, err := processRow.RowsAffected()
if err != nil {
// Query the DB for slots. errCount = append(errCount, err)
sp := slotsToProcess{}
err = tx.QueryRow(ctx, getStartEndSlotStmt).Scan(&sp.startSlot, &sp.endSlot)
if err != nil {
if err == pgx.ErrNoRows {
time.Sleep(100 * time.Millisecond)
continue
} }
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), getStartEndSlotStmt, err).Error("Unable to get a row") if row < 1 {
errCount = append(errCount, err) time.Sleep(1000 * time.Millisecond)
continue log.Debug("We are checking rows, be patient")
} break
}
log.Debug("We found a new row")
dbCtx := context.Background()
// Checkout the Row // Setup TX
res, err := tx.Exec(ctx, checkOutRowStmt, sp.startSlot, sp.endSlot, uniqueNodeIdentifier) tx, err := db.Begin(dbCtx)
if err != nil { if err != nil {
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).Error("Unable to checkout the row") loghelper.LogError(err).Error("We are unable to Begin a SQL transaction")
errCount = append(errCount, err) errCount = append(errCount, err)
continue break
}
defer func() {
err := tx.Rollback(dbCtx)
if err != nil && err != pgx.ErrTxClosed {
loghelper.LogError(err).Error("We were unable to Rollback a transaction")
errCount = append(errCount, err)
}
}()
// Query the DB for slots.
sp := slotsToProcess{}
err = tx.QueryRow(dbCtx, getStartEndSlotStmt).Scan(&sp.startSlot, &sp.endSlot)
if err != nil {
if err == pgx.ErrNoRows {
time.Sleep(100 * time.Millisecond)
break
}
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), getStartEndSlotStmt, err).Error("Unable to get a row")
errCount = append(errCount, err)
break
}
// Checkout the Row
res, err := tx.Exec(dbCtx, checkOutRowStmt, sp.startSlot, sp.endSlot, uniqueNodeIdentifier)
if err != nil {
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).Error("Unable to checkout the row")
errCount = append(errCount, err)
break
}
rows, err := res.RowsAffected()
if err != nil {
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, fmt.Errorf("Unable to determine the rows affected when trying to checkout a row."))
errCount = append(errCount, err)
break
}
if rows > 1 {
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).WithFields(log.Fields{
"rowsReturn": rows,
}).Error("We locked too many rows.....")
errCount = append(errCount, err)
break
}
if rows == 0 {
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).WithFields(log.Fields{
"rowsReturn": rows,
}).Error("We did not lock a single row.")
errCount = append(errCount, err)
break
}
err = tx.Commit(dbCtx)
if err != nil {
loghelper.LogSlotRangeError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), err).Error("Unable commit transactions.")
errCount = append(errCount, err)
break
}
log.WithField("slots", sp).Debug("Added a new slots to be processed")
slotCh <- sp
} }
rows, err := res.RowsAffected()
if err != nil {
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, fmt.Errorf("Unable to determine the rows affected when trying to checkout a row."))
errCount = append(errCount, err)
continue
}
if rows > 1 {
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).WithFields(log.Fields{
"rowsReturn": rows,
}).Error("We locked too many rows.....")
errCount = append(errCount, err)
continue
}
if rows == 0 {
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).WithFields(log.Fields{
"rowsReturn": rows,
}).Error("We did not lock a single row.")
errCount = append(errCount, err)
continue
}
err = tx.Commit(ctx)
if err != nil {
loghelper.LogSlotRangeError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), err).Error("Unable commit transactions.")
errCount = append(errCount, err)
continue
}
slotCh <- sp
} }
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"ErrCount": errCount, "ErrCount": errCount,
@ -211,35 +228,39 @@ func getBatchProcessRow(db sql.Database, getStartEndSlotStmt string, checkNewRow
} }
// After a row has been processed it should be removed from its appropriate table. // After a row has been processed it should be removed from its appropriate table.
func removeRowPostProcess(db sql.Database, processCh <-chan slotsToProcess, checkProcessedStmt, removeStmt string) error { func removeRowPostProcess(ctx context.Context, db sql.Database, processCh <-chan slotsToProcess, checkProcessedStmt, removeStmt string) error {
errCh := make(chan error) errCh := make(chan error)
for { for {
slots := <-processCh select {
// Make sure the start and end slot exist in the slots table. case <-ctx.Done():
go func() { return nil
finishedProcess := false case slots := <-processCh:
for !finishedProcess { // Make sure the start and end slot exist in the slots table.
isStartProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.startSlot)) go func() {
finishedProcess := false
for !finishedProcess {
isStartProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.startSlot))
if err != nil {
errCh <- err
}
isEndProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.endSlot))
if err != nil {
errCh <- err
}
if isStartProcess && isEndProcess {
finishedProcess = true
}
}
_, err := db.Exec(context.Background(), removeStmt, strconv.Itoa(slots.startSlot), strconv.Itoa(slots.endSlot))
if err != nil { if err != nil {
errCh <- err errCh <- err
} }
isEndProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.endSlot))
if err != nil {
errCh <- err
}
if isStartProcess && isEndProcess {
finishedProcess = true
}
}
_, err := db.Exec(context.Background(), removeStmt, strconv.Itoa(slots.startSlot), strconv.Itoa(slots.endSlot)) }()
if err != nil { if len(errCh) != 0 {
errCh <- err return <-errCh
} }
}()
if len(errCh) != 0 {
return <-errCh
} }
} }
} }

View File

@ -23,30 +23,31 @@ import (
"strconv" "strconv"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
var ( var (
// Get a single non-checked out row row from ethcl.known_gaps. // Get a single non-checked out row row from eth_beacon.known_gaps.
getKgEntryStmt string = `SELECT start_slot, end_slot FROM ethcl.known_gaps getKgEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.known_gaps
WHERE checked_out=false WHERE checked_out=false
ORDER BY priority ASC
LIMIT 1;` LIMIT 1;`
// Used to periodically check to see if there is a new entry in the ethcl.known_gaps table. // Used to periodically check to see if there is a new entry in the eth_beacon.known_gaps table.
checkKgEntryStmt string = `SELECT * FROM ethcl.known_gaps WHERE checked_out=false;` checkKgEntryStmt string = `SELECT * FROM eth_beacon.known_gaps WHERE checked_out=false;`
// Used to checkout a row from the ethcl.known_gaps table // Used to checkout a row from the eth_beacon.known_gaps table
lockKgEntryStmt string = `UPDATE ethcl.known_gaps lockKgEntryStmt string = `UPDATE eth_beacon.known_gaps
SET checked_out=true, checked_out_by=$3 SET checked_out=true, checked_out_by=$3
WHERE start_slot=$1 AND end_slot=$2;` WHERE start_slot=$1 AND end_slot=$2;`
// Used to delete an entry from the knownGaps table // Used to delete an entry from the knownGaps table
deleteKgEntryStmt string = `DELETE FROM ethcl.known_gaps deleteKgEntryStmt string = `DELETE FROM eth_beacon.known_gaps
WHERE start_slot=$1 AND end_slot=$2;` WHERE start_slot=$1 AND end_slot=$2;`
// Used to check to see if a single slot exists in the known_gaps table. // Used to check to see if a single slot exists in the known_gaps table.
checkKgSingleSlotStmt string = `SELECT start_slot, end_slot FROM ethcl.known_gaps checkKgSingleSlotStmt string = `SELECT start_slot, end_slot FROM eth_beacon.known_gaps
WHERE start_slot=$1 AND end_slot=$2;` WHERE start_slot=$1 AND end_slot=$2;`
// Used to update every single row that this node has checked out. // Used to update every single row that this node has checked out.
releaseKgLockStmt string = `UPDATE ethcl.known_gaps releaseKgLockStmt string = `UPDATE eth_beacon.known_gaps
SET checked_out=false SET checked_out=false, checked_out_by=null
WHERE checked_out_by=$1` WHERE checked_out_by=$1`
) )
@ -54,71 +55,77 @@ type KnownGapsProcessing struct {
db sql.Database //db connection db sql.Database //db connection
metrics *BeaconClientMetrics // metrics for beaconclient metrics *BeaconClientMetrics // metrics for beaconclient
uniqueNodeIdentifier int // node unique identifier. uniqueNodeIdentifier int // node unique identifier.
finishProcessing chan int // A channel which indicates to the process handleBatchProcess function that its time to end.
} }
// This function will perform all the heavy lifting for tracking the head of the chain. // This function will perform all the heavy lifting for tracking the head of the chain.
func (bc *BeaconClient) ProcessKnownGaps(maxWorkers int) []error { func (bc *BeaconClient) ProcessKnownGaps(ctx context.Context, maxWorkers int) []error {
log.Info("We are starting the known gaps processing service.") log.Info("We are starting the known gaps processing service.")
bc.KnownGapsProcess = KnownGapsProcessing{db: bc.Db, uniqueNodeIdentifier: bc.UniqueNodeIdentifier, metrics: bc.Metrics, finishProcessing: make(chan int)} bc.KnownGapsProcess = KnownGapsProcessing{db: bc.Db, uniqueNodeIdentifier: bc.UniqueNodeIdentifier, metrics: bc.Metrics}
errs := handleBatchProcess(maxWorkers, bc.KnownGapsProcess, bc.KnownGapsProcess.finishProcessing, bc.KnownGapsProcess.db, bc.ServerEndpoint, bc.Metrics, bc.CheckDb) errs := handleBatchProcess(ctx, maxWorkers, bc.KnownGapsProcess, bc.KnownGapsProcess.db, bc.ServerEndpoint, bc.Metrics, bc.CheckDb)
log.Debug("Exiting known gaps processing service") log.Debug("Exiting known gaps processing service")
return errs return errs
} }
// This function will perform all the necessary clean up tasks for stopping historical processing. // This function will perform all the necessary clean up tasks for stopping historical processing.
func (bc *BeaconClient) StopKnownGapsProcessing() error { func (bc *BeaconClient) StopKnownGapsProcessing(cancel context.CancelFunc) error {
log.Info("We are stopping the historical processing service.") log.Info("We are stopping the known gaps processing service.")
err := bc.KnownGapsProcess.releaseDbLocks() err := bc.KnownGapsProcess.releaseDbLocks(cancel)
if err != nil { if err != nil {
loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the ethcl.known_gaps table. Manual Intervention is needed!") loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the eth_beacon.known_gaps table. Manual Intervention is needed!")
} }
return nil return nil
} }
// Get a single row of historical slots from the table. // Get a single row of historical slots from the table.
func (kgp KnownGapsProcessing) getSlotRange(slotCh chan<- slotsToProcess) []error { func (kgp KnownGapsProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess) []error {
return getBatchProcessRow(kgp.db, getKgEntryStmt, checkKgEntryStmt, lockKgEntryStmt, slotCh, strconv.Itoa(kgp.uniqueNodeIdentifier)) return getBatchProcessRow(ctx, kgp.db, getKgEntryStmt, checkKgEntryStmt, lockKgEntryStmt, slotCh, strconv.Itoa(kgp.uniqueNodeIdentifier))
} }
// Remove the table entry. // Remove the table entry.
func (kgp KnownGapsProcessing) removeTableEntry(processCh <-chan slotsToProcess) error { func (kgp KnownGapsProcessing) removeTableEntry(ctx context.Context, processCh <-chan slotsToProcess) error {
return removeRowPostProcess(kgp.db, processCh, QueryBySlotStmt, deleteKgEntryStmt) return removeRowPostProcess(ctx, kgp.db, processCh, QueryBySlotStmt, deleteKgEntryStmt)
} }
// Remove the table entry. // Remove the table entry.
func (kgp KnownGapsProcessing) handleProcessingErrors(errMessages <-chan batchHistoricError) { func (kgp KnownGapsProcessing) handleProcessingErrors(ctx context.Context, errMessages <-chan batchHistoricError) {
for { for {
errMs := <-errMessages select {
case <-ctx.Done():
// Check to see if this if this entry already exists. return
res, err := kgp.db.Exec(context.Background(), checkKgSingleSlotStmt, errMs.slot, errMs.slot) case errMs := <-errMessages:
if err != nil { // Check to see if this if this entry already exists.
loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).Error("Unable to see if this slot is in the ethcl.known_gaps table") res, err := kgp.db.Exec(context.Background(), checkKgSingleSlotStmt, errMs.slot, errMs.slot)
}
rows, err := res.RowsAffected()
if err != nil {
loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).WithFields(log.Fields{
"queryStatement": checkKgSingleSlotStmt,
}).Error("Unable to get the number of rows affected by this statement.")
}
if rows > 0 {
loghelper.LogSlotError(strconv.Itoa(errMs.slot), errMs.err).Error("We received an error when processing a knownGap")
err = updateKnownGapErrors(kgp.db, errMs.slot, errMs.slot, errMs.err, kgp.metrics)
if err != nil { if err != nil {
loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).Error("Error processing known gap") loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).Error("Unable to see if this slot is in the eth_beacon.known_gaps table")
}
rows, err := res.RowsAffected()
if err != nil {
loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).WithFields(log.Fields{
"queryStatement": checkKgSingleSlotStmt,
}).Error("Unable to get the number of rows affected by this statement.")
}
if rows > 0 {
loghelper.LogSlotError(strconv.Itoa(errMs.slot), errMs.err).Error("We received an error when processing a knownGap")
err = updateKnownGapErrors(kgp.db, errMs.slot, errMs.slot, errMs.err, kgp.metrics)
if err != nil {
loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).Error("Error processing known gap")
}
} else {
writeKnownGaps(kgp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, kgp.metrics)
} }
} else {
writeKnownGaps(kgp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, kgp.metrics)
} }
} }
} }
// Updated checked_out column for the uniqueNodeIdentifier. // Updated checked_out column for the uniqueNodeIdentifier.
func (kgp KnownGapsProcessing) releaseDbLocks() error { func (kgp KnownGapsProcessing) releaseDbLocks(cancel context.CancelFunc) error {
go func() { kgp.finishProcessing <- 1 }() cancel()
log.Debug("Updating all the entries to eth_beacon.known_gaps")
log.Debug("Db: ", kgp.db)
log.Debug("kgp.uniqueNodeIdentifier ", kgp.uniqueNodeIdentifier)
res, err := kgp.db.Exec(context.Background(), releaseKgLockStmt, kgp.uniqueNodeIdentifier) res, err := kgp.db.Exec(context.Background(), releaseKgLockStmt, kgp.uniqueNodeIdentifier)
if err != nil { if err != nil {
return err return err

View File

@ -36,15 +36,12 @@ import (
state "github.com/prysmaticlabs/prysm/beacon-chain/state" state "github.com/prysmaticlabs/prysm/beacon-chain/state"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
var ( var (
SlotUnmarshalError = func(obj string) string {
return fmt.Sprintf("Unable to properly unmarshal the Slot field in the %s.", obj)
}
ParentRootUnmarshalError = "Unable to properly unmarshal the ParentRoot field in the SignedBeaconBlock." ParentRootUnmarshalError = "Unable to properly unmarshal the ParentRoot field in the SignedBeaconBlock."
MissingEth1Data = "Can't get the Eth1 block_hash" MissingEth1Data = "Can't get the Eth1 block_hash"
VersionedUnmarshalerError = "Unable to create a versioned unmarshaler" VersionedUnmarshalerError = "Unable to create a versioned unmarshaler"
@ -73,95 +70,110 @@ type ProcessSlot struct {
// DB Write objects // DB Write objects
DbSlotsModel *DbSlots // The model being written to the slots table. DbSlotsModel *DbSlots // The model being written to the slots table.
DbSignedBeaconBlockModel *DbSignedBeaconBlock // The model being written to the signed_beacon_block table. DbSignedBeaconBlockModel *DbSignedBeaconBlock // The model being written to the signed_block table.
DbBeaconState *DbBeaconState // The model being written to the beacon_state table. DbBeaconState *DbBeaconState // The model being written to the state table.
} }
// This function will do all the work to process the slot and write it to the DB. // This function will do all the work to process the slot and write it to the DB.
// It will return the error and error process. The error process is used for providing reach detail to the // It will return the error and error process. The error process is used for providing reach detail to the
// known_gaps table. // known_gaps table.
func processFullSlot(db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, headOrHistoric string, metrics *BeaconClientMetrics, knownGapsTableIncrement int, checkDb bool) (error, string) { func processFullSlot(ctx context.Context, db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, headOrHistoric string, metrics *BeaconClientMetrics, knownGapsTableIncrement int, checkDb bool) (error, string) {
ps := &ProcessSlot{ select {
Slot: slot, case <-ctx.Done():
BlockRoot: blockRoot, return nil, ""
StateRoot: stateRoot, default:
HeadOrHistoric: headOrHistoric, ps := &ProcessSlot{
Db: db, Slot: slot,
Metrics: metrics, BlockRoot: blockRoot,
} StateRoot: stateRoot,
HeadOrHistoric: headOrHistoric,
Db: db,
Metrics: metrics,
}
g, _ := errgroup.WithContext(context.Background()) g, _ := errgroup.WithContext(context.Background())
vUnmarshalerCh := make(chan *dt.VersionedUnmarshaler, 1) vUnmarshalerCh := make(chan *dt.VersionedUnmarshaler, 1)
// Get the BeaconState. // Get the BeaconState.
g.Go(func() error { g.Go(func() error {
err := ps.getBeaconState(serverAddress, vUnmarshalerCh) select {
case <-ctx.Done():
return nil
default:
err := ps.getBeaconState(serverAddress, vUnmarshalerCh)
if err != nil {
return err
}
return nil
}
})
// Get the SignedBeaconBlock.
g.Go(func() error {
select {
case <-ctx.Done():
return nil
default:
err := ps.getSignedBeaconBlock(serverAddress, vUnmarshalerCh)
if err != nil {
return err
}
return nil
}
})
if err := g.Wait(); err != nil {
return err, "processSlot"
}
finalBlockRoot, finalStateRoot, finalEth1BlockHash, err := ps.provideFinalHash()
if err != nil { if err != nil {
return err return err, "CalculateBlockRoot"
}
if checkDb {
inDb, err := IsSlotInDb(ctx, ps.Db, strconv.Itoa(ps.Slot), finalBlockRoot, finalStateRoot)
if err != nil {
return err, "checkDb"
}
if inDb {
log.WithField("slot", slot).Info("Slot already in the DB.")
return nil, ""
}
} }
return nil
})
// Get the SignedBeaconBlock. // Get this object ready to write
g.Go(func() error { dw, err := ps.createWriteObjects(finalBlockRoot, finalStateRoot, finalEth1BlockHash)
err := ps.getSignedBeaconBlock(serverAddress, vUnmarshalerCh)
if err != nil { if err != nil {
return err return err, "blockRoot"
} }
return nil // Write the object to the DB.
}) defer func() {
err := dw.Tx.Rollback(dw.Ctx)
if err := g.Wait(); err != nil { if err != nil && err != pgx.ErrTxClosed {
return err, "processSlot" loghelper.LogError(err).Error("We were unable to Rollback a transaction")
} }
}()
finalBlockRoot, finalStateRoot, finalEth1BlockHash, err := ps.provideFinalHash() err = dw.transactFullSlot()
if err != nil {
return err, "CalculateBlockRoot"
}
if checkDb {
inDb, err := IsSlotInDb(ps.Db, strconv.Itoa(ps.Slot), finalBlockRoot, finalStateRoot)
if err != nil { if err != nil {
return err, "checkDb" return err, "processSlot"
} }
if inDb {
log.WithField("slot", slot).Info("Slot already in the DB.") // Handle any reorgs or skipped slots.
return nil, "" headOrHistoric = strings.ToLower(headOrHistoric)
if headOrHistoric != "head" && headOrHistoric != "historic" {
return fmt.Errorf("headOrHistoric must be either historic or head!"), ""
} }
} if ps.HeadOrHistoric == "head" && previousSlot != 0 && previousBlockRoot != "" && ps.Status != "skipped" {
ps.checkPreviousSlot(dw.Tx, dw.Ctx, previousSlot, previousBlockRoot, knownGapsTableIncrement)
// Get this object ready to write
dw, err := ps.createWriteObjects(finalBlockRoot, finalStateRoot, finalEth1BlockHash)
if err != nil {
return err, "blockRoot"
}
// Write the object to the DB.
defer func() {
err := dw.Tx.Rollback(dw.Ctx)
if err != nil && err != pgx.ErrTxClosed {
loghelper.LogError(err).Error("We were unable to Rollback a transaction")
} }
}()
err = dw.transactFullSlot()
if err != nil {
return err, "processSlot"
}
// Handle any reorgs or skipped slots. // Commit the transaction
headOrHistoric = strings.ToLower(headOrHistoric) if err = dw.Tx.Commit(dw.Ctx); err != nil {
if headOrHistoric != "head" && headOrHistoric != "historic" { return err, "transactionCommit"
return fmt.Errorf("headOrHistoric must be either historic or head!"), "" }
}
if ps.HeadOrHistoric == "head" && previousSlot != 0 && previousBlockRoot != "" && ps.Status != "skipped" {
ps.checkPreviousSlot(dw.Tx, dw.Ctx, previousSlot, previousBlockRoot, knownGapsTableIncrement)
}
// Commit the transaction return nil, ""
if err = dw.Tx.Commit(dw.Ctx); err != nil {
return err, "transactionCommit"
} }
return nil, ""
} }
// Handle a slot that is at head. A wrapper function for calling `handleFullSlot`. // Handle a slot that is at head. A wrapper function for calling `handleFullSlot`.
@ -170,15 +182,15 @@ func processHeadSlot(db sql.Database, serverAddress string, slot int, blockRoot
if previousSlot == 0 && previousBlockRoot == "" { if previousSlot == 0 && previousBlockRoot == "" {
writeStartUpGaps(db, knownGapsTableIncrement, slot, metrics) writeStartUpGaps(db, knownGapsTableIncrement, slot, metrics)
} }
err, errReason := processFullSlot(db, serverAddress, slot, blockRoot, stateRoot, previousSlot, previousBlockRoot, "head", metrics, knownGapsTableIncrement, checkDb) err, errReason := processFullSlot(context.Background(), db, serverAddress, slot, blockRoot, stateRoot, previousSlot, previousBlockRoot, "head", metrics, knownGapsTableIncrement, checkDb)
if err != nil { if err != nil {
writeKnownGaps(db, knownGapsTableIncrement, slot, slot, err, errReason, metrics) writeKnownGaps(db, knownGapsTableIncrement, slot, slot, err, errReason, metrics)
} }
} }
// Handle a historic slot. A wrapper function for calling `handleFullSlot`. // Handle a historic slot. A wrapper function for calling `handleFullSlot`.
func handleHistoricSlot(db sql.Database, serverAddress string, slot int, metrics *BeaconClientMetrics, checkDb bool) (error, string) { func handleHistoricSlot(ctx context.Context, db sql.Database, serverAddress string, slot int, metrics *BeaconClientMetrics, checkDb bool) (error, string) {
return processFullSlot(db, serverAddress, slot, "", "", 0, "", "historic", metrics, 1, checkDb) return processFullSlot(ctx, db, serverAddress, slot, "", "", 0, "", "historic", metrics, 1, checkDb)
} }
// Update the SszSignedBeaconBlock and FullSignedBeaconBlock object with their respective values. // Update the SszSignedBeaconBlock and FullSignedBeaconBlock object with their respective values.
@ -213,15 +225,8 @@ func (ps *ProcessSlot) getSignedBeaconBlock(serverAddress string, vmCh <-chan *d
ps.FullSignedBeaconBlock, err = vm.UnmarshalBeaconBlock(ps.SszSignedBeaconBlock) ps.FullSignedBeaconBlock, err = vm.UnmarshalBeaconBlock(ps.SszSignedBeaconBlock)
if err != nil { if err != nil {
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error("We are getting an error message when unmarshalling the SignedBeaconBlock.") loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Warn("Unable to process the slots SignedBeaconBlock")
if ps.FullSignedBeaconBlock.Block().Slot() == 0 { return nil
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(SlotUnmarshalError("SignedBeaconBlock"))
return fmt.Errorf(SlotUnmarshalError("SignedBeaconBlock"))
} else if ps.FullSignedBeaconBlock.Block().ParentRoot() == nil {
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(ParentRootUnmarshalError)
return fmt.Errorf(ParentRootUnmarshalError)
}
log.Warn("We received a processing error: ", err)
} }
ps.ParentBlockRoot = "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().ParentRoot()) ps.ParentBlockRoot = "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().ParentRoot())
return nil return nil
@ -247,14 +252,8 @@ func (ps *ProcessSlot) getBeaconState(serverEndpoint string, vmCh chan<- *dt.Ver
vmCh <- versionedUnmarshaler vmCh <- versionedUnmarshaler
ps.FullBeaconState, err = versionedUnmarshaler.UnmarshalBeaconState(ps.SszBeaconState) ps.FullBeaconState, err = versionedUnmarshaler.UnmarshalBeaconState(ps.SszBeaconState)
if err != nil { if err != nil {
loghelper.LogError(err).Error("We are getting an error message when unmarshalling the BeaconState") loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error("Unable to process the slots BeaconState")
if ps.FullBeaconState.Slot() == 0 { return err
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(SlotUnmarshalError("BeaconState"))
return fmt.Errorf(SlotUnmarshalError("BeaconState"))
} else if hex.EncodeToString(ps.FullBeaconState.Eth1Data().BlockHash) == "" {
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(MissingEth1Data)
return fmt.Errorf(MissingEth1Data)
}
} }
return nil return nil
} }

View File

@ -24,7 +24,7 @@ import (
"net/http" "net/http"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
// A helper function to query endpoints that utilize slots. // A helper function to query endpoints that utilize slots.

View File

@ -0,0 +1,69 @@
package beaconclient_test
import (
"os"
"strconv"
"time"
. "github.com/onsi/ginkgo/v2"
//. "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
)
var (
prodConfig = Config{
protocol: os.Getenv("bc_protocol"),
address: os.Getenv("bc_address"),
port: getEnvInt(os.Getenv("bc_port")),
dbHost: os.Getenv("db_host"),
dbPort: getEnvInt(os.Getenv("db_port")),
dbName: os.Getenv("db_name"),
dbUser: os.Getenv("db_user"),
dbPassword: os.Getenv("db_password"),
dbDriver: os.Getenv("db_driver"),
knownGapsTableIncrement: 100000000,
bcUniqueIdentifier: 100,
checkDb: false,
}
)
var _ = Describe("Systemvalidation", Label("system"), func() {
Describe("Run the application against a running lighthouse node", func() {
Context("When we receive head messages", func() {
It("We should process the messages successfully", func() {
bc := setUpTest(prodConfig, "10000000000")
processProdHeadBlocks(bc, 3, 0, 0, 0)
})
})
Context("When we have historical and knownGaps slots to process", Label("system-batch"), func() {
It("Should process them successfully", func() {
bc := setUpTest(prodConfig, "10000000000")
//known Gaps
BeaconNodeTester.writeEventToKnownGaps(bc, 100, 101)
BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 0, 0)
// Historical
BeaconNodeTester.writeEventToHistoricProcess(bc, 2375703, 2375703, 10)
BeaconNodeTester.runHistoricalProcess(bc, 2, 3, 0, 0, 0)
time.Sleep(2 * time.Second)
validatePopularBatchBlocks(bc)
})
})
})
})
// Wrapper function to get int env variables.
func getEnvInt(envVar string) int {
val, err := strconv.Atoi(envVar)
if err != nil {
return 0
}
return val
}
// Start head tracking and wait for the expected results.
func processProdHeadBlocks(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
go bc.CaptureHead()
time.Sleep(1 * time.Second)
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
}

View File

@ -20,8 +20,8 @@ import (
"fmt" "fmt"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
var _ sql.Database = &DB{} var _ sql.Database = &DB{}

View File

@ -23,7 +23,7 @@ import (
"github.com/jackc/pgconn" "github.com/jackc/pgconn"
"github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool" "github.com/jackc/pgx/v4/pgxpool"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
) )
// pgxDriver driver, implements sql.Driver // pgxDriver driver, implements sql.Driver

View File

@ -23,9 +23,9 @@ import (
. "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql/postgres" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql/postgres"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/testhelpers" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/testhelpers"
) )
var _ = Describe("Pgx", func() { var _ = Describe("Pgx", func() {

View File

@ -25,7 +25,7 @@ import (
"time" "time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
) )
// operation is a clean up function on shutting down // operation is a clean up function on shutting down