Compare commits
No commits in common. "main" and "v0.1.0" have entirely different histories.
204
.github/workflows/generic-testing.yml
vendored
204
.github/workflows/generic-testing.yml
vendored
@ -1,204 +0,0 @@
|
|||||||
name: Test the stack.
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
stack-orchestrator-ref:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
ipld-eth-beacon-db-ref:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
ssz-data-ref:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
secrets:
|
|
||||||
GHA_KEY:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
env:
|
|
||||||
stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || '7fb664270a0ba09e2caa3095e8c91f3fdb5b38af' }}
|
|
||||||
ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref || '6b38fe9b18f7b19a803c626b742cafdccc1a2365' }}
|
|
||||||
ssz-data-ref: ${{ inputs.ssz-data-ref || 'main' }}
|
|
||||||
GOPATH: /tmp/go
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Run Docker Build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
path: "./ipld-eth-beacon-indexer"
|
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: ${{ env.stack-orchestrator-ref }}
|
|
||||||
path: "./stack-orchestrator/"
|
|
||||||
repository: vulcanize/stack-orchestrator
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: ${{ env.ipld-eth-beacon-db-ref }}
|
|
||||||
repository: vulcanize/ipld-eth-beacon-db
|
|
||||||
path: "./ipld-eth-beacon-db/"
|
|
||||||
ssh-key: ${{secrets.GHA_KEY}}
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Create config file
|
|
||||||
run: |
|
|
||||||
echo vulcanize_ipld_eth_beacon_db=$GITHUB_WORKSPACE/ipld-eth-beacon-db/ > ./config.sh
|
|
||||||
echo vulcanize_ipld_eth_beacon_indexer=$GITHUB_WORKSPACE/ipld-eth-beacon-indexer >> ./config.sh
|
|
||||||
echo eth_beacon_config_file=$GITHUB_WORKSPACE/ipld-eth-beacon-indexer/config/cicd/boot.ipld-eth-beacon-indexer.json >> ./config.sh
|
|
||||||
echo eth_beacon_capture_mode=boot >> ./config.sh
|
|
||||||
echo CAPTURE_MODE=boot >> config.sh
|
|
||||||
cat ./config.sh
|
|
||||||
|
|
||||||
- name: Run docker compose
|
|
||||||
run: |
|
|
||||||
docker-compose \
|
|
||||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
|
|
||||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
|
|
||||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" \
|
|
||||||
--env-file ./config.sh \
|
|
||||||
up -d --build
|
|
||||||
|
|
||||||
- name: Check to make sure HEALTH file is present
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
until $(docker compose \
|
|
||||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
|
|
||||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" \
|
|
||||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
|
|
||||||
--env-file ./config.sh cp ipld-eth-beacon-indexer:/root/HEALTH ./HEALTH) ; do sleep 10; done
|
|
||||||
cat ./HEALTH
|
|
||||||
if [[ "$(cat ./HEALTH)" -eq "0" ]]; then echo "Application boot successful" && (exit 0); else docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" cp ipld-eth-beacon-indexer:/root/ipld-eth-beacon-indexer.log . && cat ipld-eth-beacon-indexer.log && (exit 1); fi
|
|
||||||
|
|
||||||
unit-test:
|
|
||||||
name: Run Unit Tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
## IF you want to update the default branch for `pull_request runs, do it after the ||`
|
|
||||||
steps:
|
|
||||||
- name: Create GOPATH
|
|
||||||
run: mkdir -p /tmp/go
|
|
||||||
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
path: "./ipld-eth-beacon-indexer"
|
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: ${{ env.stack-orchestrator-ref }}
|
|
||||||
path: "./stack-orchestrator/"
|
|
||||||
repository: vulcanize/stack-orchestrator
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: ${{ env.ipld-eth-beacon-db-ref }}
|
|
||||||
repository: vulcanize/ipld-eth-beacon-db
|
|
||||||
path: "./ipld-eth-beacon-db/"
|
|
||||||
ssh-key: ${{ secrets.GHA_KEY }}
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: ${{ env.ssz-data-ref }}
|
|
||||||
repository: vulcanize/ssz-data
|
|
||||||
path: "./ipld-eth-beacon-indexer/pkg/beaconclient/ssz-data"
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Create config file
|
|
||||||
run: |
|
|
||||||
echo vulcanize_ipld_eth_beacon_db=$GITHUB_WORKSPACE/ipld-eth-beacon-db/ > ./config.sh
|
|
||||||
echo vulcanize_ipld_eth_beacon_indexer=$GITHUB_WORKSPACE/ipld-eth-beacon-indexer >> ./config.sh
|
|
||||||
cat ./config.sh
|
|
||||||
|
|
||||||
- name: Run docker compose
|
|
||||||
run: |
|
|
||||||
docker-compose \
|
|
||||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
|
|
||||||
--env-file ./config.sh \
|
|
||||||
up -d --build
|
|
||||||
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: ">=1.18.0"
|
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: Install packages
|
|
||||||
run: |
|
|
||||||
go install github.com/onsi/ginkgo/v2/ginkgo@2.1.4
|
|
||||||
which ginkgo
|
|
||||||
|
|
||||||
- name: Run the tests using Make
|
|
||||||
run: |
|
|
||||||
cd ipld-eth-beacon-indexer
|
|
||||||
make unit-test-ci
|
|
||||||
|
|
||||||
integration-test:
|
|
||||||
name: Run Integration Tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Create GOPATH
|
|
||||||
run: mkdir -p /tmp/go
|
|
||||||
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
path: "./ipld-eth-beacon-indexer"
|
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: ${{ env.stack-orchestrator-ref }}
|
|
||||||
path: "./stack-orchestrator/"
|
|
||||||
repository: vulcanize/stack-orchestrator
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: ${{ env.ipld-eth-beacon-db-ref }}
|
|
||||||
repository: vulcanize/ipld-eth-beacon-db
|
|
||||||
path: "./ipld-eth-beacon-db/"
|
|
||||||
ssh-key: ${{secrets.GHA_KEY}}
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Create config file
|
|
||||||
run: |
|
|
||||||
echo vulcanize_ipld_eth_beacon_db=$GITHUB_WORKSPACE/ipld-eth-beacon-db/ > ./config.sh
|
|
||||||
echo vulcanize_ipld_eth_beacon_indexer=$GITHUB_WORKSPACE/ipld-eth-beacon-indexer >> ./config.sh
|
|
||||||
cat ./config.sh
|
|
||||||
|
|
||||||
- name: Run docker compose
|
|
||||||
run: |
|
|
||||||
docker-compose \
|
|
||||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
|
|
||||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
|
|
||||||
--env-file ./config.sh \
|
|
||||||
up -d --build
|
|
||||||
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: ">=1.18.0"
|
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: Install packages
|
|
||||||
run: |
|
|
||||||
go install github.com/onsi/ginkgo/v2/ginkgo@latest
|
|
||||||
which ginkgo
|
|
||||||
|
|
||||||
- name: Run the tests using Make
|
|
||||||
run: |
|
|
||||||
cd ipld-eth-beacon-indexer
|
|
||||||
make integration-test-ci
|
|
||||||
|
|
||||||
golangci:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: ">=1.18.0"
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: golangci-lint
|
|
||||||
uses: golangci/golangci-lint-action@v3
|
|
||||||
with:
|
|
||||||
args: --timeout 90s --disable deadcode,unused
|
|
||||||
# args: --timeout 90s --disable deadcode,
|
|
29
.github/workflows/issues-notion-sync.yml
vendored
29
.github/workflows/issues-notion-sync.yml
vendored
@ -1,29 +0,0 @@
|
|||||||
name: Notion Sync
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
issues:
|
|
||||||
types:
|
|
||||||
[
|
|
||||||
opened,
|
|
||||||
edited,
|
|
||||||
labeled,
|
|
||||||
unlabeled,
|
|
||||||
assigned,
|
|
||||||
unassigned,
|
|
||||||
milestoned,
|
|
||||||
demilestoned,
|
|
||||||
reopened,
|
|
||||||
closed,
|
|
||||||
]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
notion_job:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Add GitHub Issues to Notion
|
|
||||||
steps:
|
|
||||||
- name: Add GitHub Issues to Notion
|
|
||||||
uses: vulcanize/notion-github-action@v1.2.4-issueid
|
|
||||||
with:
|
|
||||||
notion-token: ${{ secrets.NOTION_TOKEN }}
|
|
||||||
notion-db: ${{ secrets.NOTION_DATABASE }}
|
|
206
.github/workflows/on-pr.yml
vendored
206
.github/workflows/on-pr.yml
vendored
@ -7,8 +7,8 @@ on:
|
|||||||
description: "The branch, commit or sha from stack-orchestrator to checkout"
|
description: "The branch, commit or sha from stack-orchestrator to checkout"
|
||||||
required: false
|
required: false
|
||||||
default: "main"
|
default: "main"
|
||||||
ipld-eth-beacon-db-ref:
|
ipld-ethcl-db-ref:
|
||||||
description: "The branch, commit or sha from ipld-eth-beacon-db to checkout"
|
description: "The branch, commit or sha from ipld-ethcl-db to checkout"
|
||||||
required: false
|
required: false
|
||||||
default: "main"
|
default: "main"
|
||||||
ssz-data-ref:
|
ssz-data-ref:
|
||||||
@ -22,26 +22,188 @@ on:
|
|||||||
- "!LICENSE"
|
- "!LICENSE"
|
||||||
- "!.github/workflows/**"
|
- "!.github/workflows/**"
|
||||||
- ".github/workflows/on-pr.yml"
|
- ".github/workflows/on-pr.yml"
|
||||||
- ".github/workflows/tests.yml"
|
|
||||||
- "**"
|
- "**"
|
||||||
#schedule:
|
|
||||||
# - cron: '0 13 * * *' # Must be single quotes!!
|
|
||||||
|
|
||||||
|
env:
|
||||||
|
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref || 'feature/client-build'}}
|
||||||
|
ipld-ethcl-db-ref: ${{ github.event.inputs.ipld-ethcl-db-ref || 'feature/schema-ipld-ethcl-indexer' }}
|
||||||
|
ssz-data-ref: ${{ github.event.inputs.ssz-data-ref || 'main' }}
|
||||||
|
GOPATH: /tmp/go
|
||||||
jobs:
|
jobs:
|
||||||
trigger-tests:
|
build:
|
||||||
if: github.event_name != 'schedule'
|
name: Run Docker Build
|
||||||
uses: ./.github/workflows/generic-testing.yml
|
runs-on: ubuntu-latest
|
||||||
with:
|
steps:
|
||||||
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
|
- uses: actions/checkout@v2
|
||||||
ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
|
with:
|
||||||
ssz-data-ref: ${{ github.event.inputs.ssz-data-ref }}
|
path: "./ipld-ethcl-indexer"
|
||||||
secrets:
|
|
||||||
GHA_KEY: ${{secrets.GHA_KEY}}
|
- uses: actions/checkout@v3
|
||||||
system-testing:
|
with:
|
||||||
uses: ./.github/workflows/system-tests.yml
|
ref: ${{ env.stack-orchestrator-ref }}
|
||||||
with:
|
path: "./stack-orchestrator/"
|
||||||
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
|
repository: vulcanize/stack-orchestrator
|
||||||
ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
|
fetch-depth: 0
|
||||||
secrets:
|
|
||||||
GHA_KEY: ${{secrets.GHA_KEY}}
|
- uses: actions/checkout@v3
|
||||||
BC_ADDRESS: ${{secrets.BC_ADDRESS}}
|
with:
|
||||||
|
ref: ${{ env.ipld-ethcl-db-ref }}
|
||||||
|
repository: vulcanize/ipld-ethcl-db
|
||||||
|
path: "./ipld-ethcl-db/"
|
||||||
|
token: ${{ secrets.GH_PAT }}
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Create config file
|
||||||
|
run: |
|
||||||
|
echo vulcanize_ipld_ethcl_db=$GITHUB_WORKSPACE/ipld-ethcl-db/ > ./config.sh
|
||||||
|
echo vulcanize_ipld_ethcl_indexer=$GITHUB_WORKSPACE/ipld-ethcl-indexer >> ./config.sh
|
||||||
|
echo ethcl_capture_mode=boot >> ./config.sh
|
||||||
|
echo ethcl_skip_sync=true >> ./config.sh
|
||||||
|
echo ethcl_known_gap_increment=1000000 >> ./config.sh
|
||||||
|
cat ./config.sh
|
||||||
|
|
||||||
|
- name: Run docker compose
|
||||||
|
run: |
|
||||||
|
docker-compose \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ethcl-db.yml" \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-ethcl-indexer.yml" \
|
||||||
|
--env-file ./config.sh \
|
||||||
|
up -d --build
|
||||||
|
|
||||||
|
- name: Check to make sure HEALTH file is present
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
until $(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-ethcl-indexer.yml" cp ipld-ethcl-indexer:/root/HEALTH ./HEALTH) ; do sleep 10; done
|
||||||
|
cat ./HEALTH
|
||||||
|
if [[ "$(cat ./HEALTH)" -eq "0" ]]; then echo "Application boot successful" && (exit 0); else docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-ethcl-indexer.yml" cp ipld-ethcl-indexer:/root/ipld-ethcl-indexer.log . && cat ipld-ethcl-indexer.log && (exit 1); fi
|
||||||
|
|
||||||
|
unit-test:
|
||||||
|
name: Run Unit Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
## IF you want to update the default branch for `pull_request runs, do it after the ||`
|
||||||
|
steps:
|
||||||
|
- name: Create GOPATH
|
||||||
|
run: mkdir -p /tmp/go
|
||||||
|
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
path: "./ipld-ethcl-indexer"
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ env.stack-orchestrator-ref }}
|
||||||
|
path: "./stack-orchestrator/"
|
||||||
|
repository: vulcanize/stack-orchestrator
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ env.ipld-ethcl-db-ref }}
|
||||||
|
repository: vulcanize/ipld-ethcl-db
|
||||||
|
path: "./ipld-ethcl-db/"
|
||||||
|
token: ${{ secrets.GH_PAT }}
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ env.ssz-data-ref }}
|
||||||
|
repository: vulcanize/ssz-data
|
||||||
|
path: "./ipld-ethcl-indexer/pkg/beaconclient/ssz-data"
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Create config file
|
||||||
|
run: |
|
||||||
|
echo vulcanize_ipld_ethcl_db=$GITHUB_WORKSPACE/ipld-ethcl-db/ > ./config.sh
|
||||||
|
echo vulcanize_ipld_ethcl_indexer=$GITHUB_WORKSPACE/ipld-ethcl-indexer >> ./config.sh
|
||||||
|
cat ./config.sh
|
||||||
|
|
||||||
|
- name: Run docker compose
|
||||||
|
run: |
|
||||||
|
docker-compose \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ethcl-db.yml" \
|
||||||
|
--env-file ./config.sh \
|
||||||
|
up -d --build
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: ">=1.17.0"
|
||||||
|
check-latest: true
|
||||||
|
|
||||||
|
- name: Install packages
|
||||||
|
run: |
|
||||||
|
go install github.com/onsi/ginkgo/v2/ginkgo@latest
|
||||||
|
which ginkgo
|
||||||
|
|
||||||
|
- name: Run the tests using Make
|
||||||
|
run: |
|
||||||
|
cd ipld-ethcl-indexer
|
||||||
|
make unit-test-ci
|
||||||
|
|
||||||
|
integration-test:
|
||||||
|
name: Run Integration Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Create GOPATH
|
||||||
|
run: mkdir -p /tmp/go
|
||||||
|
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
path: "./ipld-ethcl-indexer"
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ env.stack-orchestrator-ref }}
|
||||||
|
path: "./stack-orchestrator/"
|
||||||
|
repository: vulcanize/stack-orchestrator
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ env.ipld-ethcl-db-ref }}
|
||||||
|
repository: vulcanize/ipld-ethcl-db
|
||||||
|
path: "./ipld-ethcl-db/"
|
||||||
|
token: ${{ secrets.GH_PAT }}
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Create config file
|
||||||
|
run: |
|
||||||
|
echo vulcanize_ipld_ethcl_db=$GITHUB_WORKSPACE/ipld-ethcl-db/ > ./config.sh
|
||||||
|
echo vulcanize_ipld_ethcl_indexer=$GITHUB_WORKSPACE/ipld-ethcl-indexer >> ./config.sh
|
||||||
|
echo ethcl_capture_mode=boot >> ./config.sh
|
||||||
|
cat ./config.sh
|
||||||
|
|
||||||
|
- name: Run docker compose
|
||||||
|
run: |
|
||||||
|
docker-compose \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ethcl-db.yml" \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
|
||||||
|
--env-file ./config.sh \
|
||||||
|
up -d --build
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: ">=1.17.0"
|
||||||
|
check-latest: true
|
||||||
|
|
||||||
|
- name: Install packages
|
||||||
|
run: |
|
||||||
|
go install github.com/onsi/ginkgo/v2/ginkgo@latest
|
||||||
|
which ginkgo
|
||||||
|
|
||||||
|
- name: Run the tests using Make
|
||||||
|
run: |
|
||||||
|
cd ipld-ethcl-indexer
|
||||||
|
make integration-test-ci
|
||||||
|
|
||||||
|
golangci:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: ">=1.17.0"
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: golangci-lint
|
||||||
|
uses: golangci/golangci-lint-action@v3
|
||||||
|
with:
|
||||||
|
args: --timeout 90s
|
||||||
|
29
.github/workflows/on-publish.yml
vendored
29
.github/workflows/on-publish.yml
vendored
@ -3,28 +3,9 @@ on:
|
|||||||
release:
|
release:
|
||||||
types: [published, edited]
|
types: [published, edited]
|
||||||
jobs:
|
jobs:
|
||||||
trigger-tests:
|
|
||||||
uses: ./.github/workflows/generic-testing.yml
|
|
||||||
with:
|
|
||||||
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
|
|
||||||
ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
|
|
||||||
ssz-data-ref: ${{ github.event.inputs.ssz-data-ref }}
|
|
||||||
secrets:
|
|
||||||
GHA_KEY: ${{secrets.GHA_KEY}}
|
|
||||||
system-testing:
|
|
||||||
uses: ./.github/workflows/system-tests.yml
|
|
||||||
with:
|
|
||||||
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }}
|
|
||||||
ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }}
|
|
||||||
secrets:
|
|
||||||
GHA_KEY: ${{secrets.GHA_KEY}}
|
|
||||||
BC_ADDRESS: ${{secrets.BC_ADDRESS}}
|
|
||||||
build:
|
build:
|
||||||
name: Run docker build
|
name: Run docker build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs:
|
|
||||||
- trigger-tests
|
|
||||||
- system-testing
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Get the version
|
- name: Get the version
|
||||||
@ -33,11 +14,11 @@ jobs:
|
|||||||
- name: Run docker build
|
- name: Run docker build
|
||||||
run: make docker-build
|
run: make docker-build
|
||||||
- name: Tag docker image
|
- name: Tag docker image
|
||||||
run: docker tag vulcanize/ipld-eth-beacon-indexer docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}}
|
run: docker tag vulcanize/ipld-ethcl-indexer docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}}
|
||||||
- name: Docker Login
|
- name: Docker Login
|
||||||
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
||||||
- name: Docker Push
|
- name: Docker Push
|
||||||
run: docker push docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}}
|
run: docker push docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}}
|
||||||
push_to_registries:
|
push_to_registries:
|
||||||
name: Push Docker image to Docker Hub
|
name: Push Docker image to Docker Hub
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -51,10 +32,10 @@ jobs:
|
|||||||
- name: Docker Login to Github Registry
|
- name: Docker Login to Github Registry
|
||||||
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
||||||
- name: Docker Pull
|
- name: Docker Pull
|
||||||
run: docker pull docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}}
|
run: docker pull docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}}
|
||||||
- name: Docker Login to Docker Registry
|
- name: Docker Login to Docker Registry
|
||||||
run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
|
run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
|
||||||
- name: Tag docker image
|
- name: Tag docker image
|
||||||
run: docker tag docker.pkg.github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer:${{steps.vars.outputs.sha}} vulcanize/ipld-eth-beacon-indexer:${{steps.vars.outputs.tag}}
|
run: docker tag docker.pkg.github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer:${{steps.vars.outputs.sha}} vulcanize/ipld-ethcl-indexer:${{steps.vars.outputs.tag}}
|
||||||
- name: Docker Push to Docker Hub
|
- name: Docker Push to Docker Hub
|
||||||
run: docker push vulcanize/ipld-eth-beacon-indexer:${{steps.vars.outputs.tag}}
|
run: docker push vulcanize/ipld-ethcl-indexer:${{steps.vars.outputs.tag}}
|
||||||
|
95
.github/workflows/system-tests.yml
vendored
95
.github/workflows/system-tests.yml
vendored
@ -1,95 +0,0 @@
|
|||||||
name: System Testing for the stack.
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
stack-orchestrator-ref:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
ipld-eth-beacon-db-ref:
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
secrets:
|
|
||||||
GHA_KEY:
|
|
||||||
required: true
|
|
||||||
BC_ADDRESS:
|
|
||||||
required: true
|
|
||||||
env:
|
|
||||||
stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || '7fb664270a0ba09e2caa3095e8c91f3fdb5b38af' }}
|
|
||||||
ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref || '6b38fe9b18f7b19a803c626b742cafdccc1a2365' }}
|
|
||||||
GOPATH: /tmp/go
|
|
||||||
bc_protocol: "http"
|
|
||||||
bc_address: ${{secrets.BC_ADDRESS}}
|
|
||||||
bc_port: 5052
|
|
||||||
db_host: localhost
|
|
||||||
db_port: 8076
|
|
||||||
db_name: vulcanize_testing
|
|
||||||
db_user: vdbm
|
|
||||||
db_password: password
|
|
||||||
db_driver: "pgx"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
system-testing:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Create GOPATH
|
|
||||||
run: mkdir -p /tmp/go
|
|
||||||
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
path: "./ipld-eth-beacon-indexer"
|
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: ${{ env.stack-orchestrator-ref }}
|
|
||||||
path: "./stack-orchestrator/"
|
|
||||||
repository: vulcanize/stack-orchestrator
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: ${{ env.ipld-eth-beacon-db-ref }}
|
|
||||||
repository: vulcanize/ipld-eth-beacon-db
|
|
||||||
path: "./ipld-eth-beacon-db/"
|
|
||||||
ssh-key: ${{secrets.GHA_KEY}}
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Create config file
|
|
||||||
run: |
|
|
||||||
echo vulcanize_ipld_eth_beacon_db=$(pwd)/ipld-eth-beacon-db > ./config.sh
|
|
||||||
cat ./config.sh
|
|
||||||
|
|
||||||
- name: Run docker compose
|
|
||||||
id: compose
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
ls "./stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml"
|
|
||||||
whoami
|
|
||||||
/usr/local/bin/docker-compose \
|
|
||||||
-f "./stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
|
|
||||||
--env-file ./config.sh \
|
|
||||||
up -d --build
|
|
||||||
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: ">=1.18.0"
|
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: Install packages
|
|
||||||
run: |
|
|
||||||
go install github.com/onsi/ginkgo/v2/ginkgo@latest
|
|
||||||
which ginkgo
|
|
||||||
|
|
||||||
- name: Run the tests using Make
|
|
||||||
run: |
|
|
||||||
sleep 20
|
|
||||||
cd ipld-eth-beacon-indexer
|
|
||||||
make system-test-ci
|
|
||||||
|
|
||||||
- name: Clean up the docker containers
|
|
||||||
if: always() && steps.compose.outcome == 'success'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
/usr/local/bin/docker-compose \
|
|
||||||
-f "./stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
|
|
||||||
--env-file ./config.sh \
|
|
||||||
down -v
|
|
8
.gitignore
vendored
8
.gitignore
vendored
@ -4,11 +4,5 @@ ipld-ethcl-indexer.log
|
|||||||
report.json
|
report.json
|
||||||
cover.profile
|
cover.profile
|
||||||
temp/*
|
temp/*
|
||||||
.vscode/*
|
|
||||||
pkg/beaconclient/ssz-data/
|
pkg/beaconclient/ssz-data/
|
||||||
*.test
|
*.test
|
||||||
ipld-eth-beacon-indexer.log
|
|
||||||
ipld-eth-beacon-indexer
|
|
||||||
config/local.ipld-eth-beacon-indexer-config.json
|
|
||||||
config/docker.ipld-eth-beacon-indexer-config.json
|
|
||||||
.idea/*
|
|
17
Dockerfile
17
Dockerfile
@ -1,7 +1,7 @@
|
|||||||
FROM golang:1.18-alpine as builder
|
FROM golang:1.18-alpine as builder
|
||||||
|
|
||||||
WORKDIR /go/src/github.com/vulcanize/ipld-eth-beacon-indexer
|
WORKDIR /go/src/github.com/vulcanize/ipld-ethcl-indexer
|
||||||
RUN apk --no-cache add ca-certificates make git g++ linux-headers libstdc++
|
RUN apk --no-cache add ca-certificates make git g++ linux-headers
|
||||||
|
|
||||||
ENV GO111MODULE=on
|
ENV GO111MODULE=on
|
||||||
COPY go.mod .
|
COPY go.mod .
|
||||||
@ -9,13 +9,12 @@ COPY go.sum .
|
|||||||
RUN go mod tidy; go mod download
|
RUN go mod tidy; go mod download
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN GCO_ENABLED=0 GOOS=linux go build -race -ldflags="-s -w" -o ipld-eth-beacon-indexer .
|
RUN GCO_ENABLED=0 GOOS=linux go build -race -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-ethcl-indexer .
|
||||||
RUN chmod +x ipld-eth-beacon-indexer
|
RUN chmod +x ipld-ethcl-indexer
|
||||||
|
|
||||||
FROM alpine:latest
|
FROM frolvlad/alpine-bash:latest
|
||||||
RUN apk --no-cache add ca-certificates libstdc++ busybox-extras gettext libintl bash gawk sed grep bc coreutils
|
RUN apk --no-cache add ca-certificates
|
||||||
WORKDIR /root/
|
WORKDIR /root/
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer /root/ipld-eth-beacon-indexer
|
COPY --from=builder /go/src/github.com/vulcanize/ipld-ethcl-indexer/ipld-ethcl-indexer /root/ipld-ethcl-indexer
|
||||||
ADD entrypoint.sh .
|
ADD entrypoint.sh .
|
||||||
ADD ipld-eth-beacon-config-docker.json .
|
ENTRYPOINT ["./entrypoint.sh"]
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
|
44
Makefile
44
Makefile
@ -65,19 +65,8 @@ integration-test-local-no-race:
|
|||||||
unit-test-local:
|
unit-test-local:
|
||||||
go vet ./...
|
go vet ./...
|
||||||
go fmt ./...
|
go fmt ./...
|
||||||
$(GINKGO) -r --label-filter 'unit && !flaky' \
|
$(GINKGO) -r --label-filter unit \
|
||||||
--randomize-all --randomize-suites \
|
--randomize-all --randomize-suites \
|
||||||
--flake-attempts=3 \
|
|
||||||
--fail-on-pending --keep-going \
|
|
||||||
--trace
|
|
||||||
|
|
||||||
.PHONY: unit-test-local-bellatrix
|
|
||||||
unit-test-local-bellatrix:
|
|
||||||
go vet ./...
|
|
||||||
go fmt ./...
|
|
||||||
$(GINKGO) -r --label-filter 'unit && !flaky && bellatrix' \
|
|
||||||
--randomize-all --randomize-suites \
|
|
||||||
--flake-attempts=3 \
|
|
||||||
--fail-on-pending --keep-going \
|
--fail-on-pending --keep-going \
|
||||||
--trace
|
--trace
|
||||||
|
|
||||||
@ -86,40 +75,11 @@ unit-test-ci:
|
|||||||
go vet ./...
|
go vet ./...
|
||||||
go fmt ./...
|
go fmt ./...
|
||||||
$(GINKGO) -r --label-filter unit \
|
$(GINKGO) -r --label-filter unit \
|
||||||
--randomize-all --randomize-suites
|
|
||||||
--flake-attempts=3 \
|
|
||||||
--fail-on-pending --keep-going \
|
|
||||||
--cover --coverprofile=cover.profile \
|
|
||||||
--trace --json-report=report.json
|
|
||||||
|
|
||||||
.PHONY: system-test-ci
|
|
||||||
system-test-ci:
|
|
||||||
go vet ./...
|
|
||||||
go fmt ./...
|
|
||||||
$(GINKGO) -r --label-filter system \
|
|
||||||
--randomize-all --randomize-suites \
|
--randomize-all --randomize-suites \
|
||||||
--fail-on-pending --keep-going \
|
--fail-on-pending --keep-going \
|
||||||
--cover --coverprofile=cover.profile \
|
--cover --coverprofile=cover.profile \
|
||||||
--trace --json-report=report.json
|
--trace --json-report=report.json
|
||||||
|
|
||||||
.PHONY: system-test-local
|
|
||||||
system-test-local:
|
|
||||||
go vet ./...
|
|
||||||
go fmt ./...
|
|
||||||
$(GINKGO) -r --label-filter system \
|
|
||||||
--randomize-all --randomize-suites \
|
|
||||||
--fail-on-pending --keep-going \
|
|
||||||
--trace
|
|
||||||
|
|
||||||
.PHONY: e2e-test-local
|
|
||||||
e2e-test-local:
|
|
||||||
go vet ./...
|
|
||||||
go fmt ./...
|
|
||||||
$(GINKGO) -r --label-filter e2e \
|
|
||||||
--randomize-all --randomize-suites \
|
|
||||||
--fail-on-pending --keep-going \
|
|
||||||
--trace
|
|
||||||
|
|
||||||
|
|
||||||
.PHONY: build
|
.PHONY: build
|
||||||
build:
|
build:
|
||||||
@ -129,4 +89,4 @@ build:
|
|||||||
## Build docker image
|
## Build docker image
|
||||||
.PHONY: docker-build
|
.PHONY: docker-build
|
||||||
docker-build:
|
docker-build:
|
||||||
docker build -t vulcanize/ipld-eth-beacon-indexer .
|
docker build -t vulcanize/ipld-ethcl-indexer .
|
22
README.md
22
README.md
@ -1,4 +1,4 @@
|
|||||||
- [ipld-eth-beacon-indexer](#ipld-eth-beacon-indexer)
|
- [ipld-ethcl-indexer](#ipld-ethcl-indexer)
|
||||||
- [Running the Application](#running-the-application)
|
- [Running the Application](#running-the-application)
|
||||||
- [Development Patterns](#development-patterns)
|
- [Development Patterns](#development-patterns)
|
||||||
- [Logging](#logging)
|
- [Logging](#logging)
|
||||||
@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
<small><i><a href='http://ecotrust-canada.github.io/markdown-toc/'>Table of contents generated with markdown-toc</a></i></small>
|
<small><i><a href='http://ecotrust-canada.github.io/markdown-toc/'>Table of contents generated with markdown-toc</a></i></small>
|
||||||
|
|
||||||
# ipld-eth-beacon-indexer
|
# ipld-ethcl-indexer
|
||||||
|
|
||||||
This application will capture all the `BeaconState`'s and `SignedBeaconBlock`'s from the consensus chain on Ethereum. This application is going to connect to the lighthouse client, but hypothetically speaking, it should be interchangeable with any eth2 beacon node.
|
This application will capture all the `BeaconState`'s and `SignedBeaconBlock`'s from the consensus chain on Ethereum. This application is going to connect to the lighthouse client, but hypothetically speaking, it should be interchangeable with any eth2 beacon node.
|
||||||
|
|
||||||
@ -22,12 +22,12 @@ To run the application, do as follows:
|
|||||||
|
|
||||||
1. Setup the prerequisite applications.
|
1. Setup the prerequisite applications.
|
||||||
a. Run a beacon client (such as lighthouse).
|
a. Run a beacon client (such as lighthouse).
|
||||||
b. Run a postgres DB for eth-beacon.
|
b. Run a postgres DB for ethcl.
|
||||||
c. You can utilize the `stack-orchestrator` [repository](https://github.com/vulcanize/stack-orchestrato).
|
c. You can utilize the `stack-orchestrator` [repository](https://github.com/vulcanize/stack-orchestrato).
|
||||||
|
|
||||||
```
|
```
|
||||||
./wrapper.sh -e skip \
|
./wrapper.sh -e skip \
|
||||||
-d ../docker/local/docker-compose-ipld-eth-beacon-db.yml \
|
-d ../docker/local/docker-compose-ethcl-db.yml \
|
||||||
-d ../docker/latest/docker-compose-lighthouse.yml \
|
-d ../docker/latest/docker-compose-lighthouse.yml \
|
||||||
-v remove \
|
-v remove \
|
||||||
-p ../local-config.sh
|
-p ../local-config.sh
|
||||||
@ -37,7 +37,19 @@ To run the application, do as follows:
|
|||||||
2. Run the start up command.
|
2. Run the start up command.
|
||||||
|
|
||||||
```
|
```
|
||||||
go run -race main.go capture full --config ./example.ipld-eth-beacon-indexer-config.json
|
go run -race main.go capture head --db.address localhost \
|
||||||
|
--db.password password \
|
||||||
|
--db.port 8076 \
|
||||||
|
--db.username vdbm \
|
||||||
|
--db.name vulcanize_testing \
|
||||||
|
--db.driver PGX \
|
||||||
|
--bc.address localhost \
|
||||||
|
--bc.port 5052 \
|
||||||
|
--bc.connectionProtocol http \
|
||||||
|
--t.skipSync=true \
|
||||||
|
--log.level info \
|
||||||
|
--log.output=true \
|
||||||
|
--kg.increment 100
|
||||||
```
|
```
|
||||||
|
|
||||||
## Running Tests
|
## Running Tests
|
||||||
|
@ -51,4 +51,4 @@ This package contains useful functions for logging.
|
|||||||
|
|
||||||
## `internal/shutdown`
|
## `internal/shutdown`
|
||||||
|
|
||||||
This package is used to shutdown the `ipld-eth-beacon-indexer`. It calls the `pkg/gracefulshutdown` package.
|
This package is used to shutdown the `ipld-ethcl-indexer`. It calls the `pkg/gracefulshutdown` package.
|
||||||
|
20
cmd/boot.go
20
cmd/boot.go
@ -23,10 +23,9 @@ import (
|
|||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/vulcanize/ipld-ethcl-indexer/internal/boot"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
|
"github.com/vulcanize/ipld-ethcl-indexer/internal/shutdown"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// bootCmd represents the boot command
|
// bootCmd represents the boot command
|
||||||
@ -45,12 +44,9 @@ func bootApp() {
|
|||||||
log.Info("Starting the application in boot mode.")
|
log.Info("Starting the application in boot mode.")
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
BC, DB, err := boot.BootApplicationWithRetry(ctx, dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, testDisregardSync)
|
||||||
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
|
||||||
viper.GetInt("kg.increment"), "boot", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"),
|
|
||||||
viper.GetBool("bc.performBeaconBlockProcessing"), viper.GetBool("bc.performBeaconStateProcessing"))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
StopApplicationPreBoot(err, Db)
|
loghelper.LogError(err).Error("Unable to Start application")
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Boot complete, we are going to shutdown.")
|
log.Info("Boot complete, we are going to shutdown.")
|
||||||
@ -61,11 +57,11 @@ func bootApp() {
|
|||||||
notifierCh <- syscall.SIGTERM
|
notifierCh <- syscall.SIGTERM
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err = shutdown.ShutdownBoot(ctx, notifierCh, maxWaitSecondsShutdown, Db, Bc)
|
err = shutdown.ShutdownServices(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
|
loghelper.LogError(err).Error("Ungracefully Shutdown ipld-ethcl-indexer!")
|
||||||
} else {
|
} else {
|
||||||
log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
|
log.Info("Gracefully shutdown ipld-ethcl-indexer")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
123
cmd/capture.go
123
cmd/capture.go
@ -17,7 +17,6 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -26,30 +25,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
dbUsername string
|
dbUsername string
|
||||||
dbPassword string
|
dbPassword string
|
||||||
dbName string
|
dbName string
|
||||||
dbAddress string
|
dbAddress string
|
||||||
dbDriver string
|
dbDriver string
|
||||||
dbPort int
|
dbPort int
|
||||||
bcAddress string
|
bcAddress string
|
||||||
bcPort int
|
bcPort int
|
||||||
bcBootRetryInterval int
|
bcConnectionProtocol string
|
||||||
bcBootMaxRetry int
|
bcType string
|
||||||
bcConnectionProtocol string
|
maxWaitSecondsShutdown time.Duration = time.Duration(5) * time.Second
|
||||||
bcType string
|
notifierCh chan os.Signal = make(chan os.Signal, 1)
|
||||||
bcMaxHistoricProcessWorker int
|
testDisregardSync bool
|
||||||
bcUniqueNodeIdentifier int
|
|
||||||
bcCheckDb bool
|
|
||||||
kgMaxWorker int
|
|
||||||
kgTableIncrement int
|
|
||||||
kgProcessGaps bool
|
|
||||||
pmMetrics bool
|
|
||||||
pmAddress string
|
|
||||||
pmPort int
|
|
||||||
maxWaitSecondsShutdown time.Duration = time.Duration(20) * time.Second
|
|
||||||
notifierCh chan os.Signal = make(chan os.Signal, 1)
|
|
||||||
testDisregardSync bool
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// captureCmd represents the capture command
|
// captureCmd represents the capture command
|
||||||
@ -74,50 +62,35 @@ func init() {
|
|||||||
captureCmd.PersistentFlags().StringVarP(&dbName, "db.name", "n", "", "Database name connect to DB(required)")
|
captureCmd.PersistentFlags().StringVarP(&dbName, "db.name", "n", "", "Database name connect to DB(required)")
|
||||||
captureCmd.PersistentFlags().StringVarP(&dbDriver, "db.driver", "", "", "Database Driver to connect to DB(required)")
|
captureCmd.PersistentFlags().StringVarP(&dbDriver, "db.driver", "", "", "Database Driver to connect to DB(required)")
|
||||||
captureCmd.PersistentFlags().IntVarP(&dbPort, "db.port", "", 0, "Port to connect to DB(required)")
|
captureCmd.PersistentFlags().IntVarP(&dbPort, "db.port", "", 0, "Port to connect to DB(required)")
|
||||||
//err := captureCmd.MarkPersistentFlagRequired("db.username")
|
err := captureCmd.MarkPersistentFlagRequired("db.username")
|
||||||
// exitErr(err)
|
exitErr(err)
|
||||||
// err = captureCmd.MarkPersistentFlagRequired("db.password")
|
err = captureCmd.MarkPersistentFlagRequired("db.password")
|
||||||
// exitErr(err)
|
exitErr(err)
|
||||||
// err = captureCmd.MarkPersistentFlagRequired("db.address")
|
err = captureCmd.MarkPersistentFlagRequired("db.address")
|
||||||
// exitErr(err)
|
exitErr(err)
|
||||||
// err = captureCmd.MarkPersistentFlagRequired("db.port")
|
err = captureCmd.MarkPersistentFlagRequired("db.port")
|
||||||
// exitErr(err)
|
exitErr(err)
|
||||||
// err = captureCmd.MarkPersistentFlagRequired("db.name")
|
err = captureCmd.MarkPersistentFlagRequired("db.name")
|
||||||
// exitErr(err)
|
exitErr(err)
|
||||||
// err = captureCmd.MarkPersistentFlagRequired("db.driver")
|
err = captureCmd.MarkPersistentFlagRequired("db.driver")
|
||||||
// exitErr(err)
|
exitErr(err)
|
||||||
|
|
||||||
//// Beacon Client Specific
|
//// Beacon Client Specific
|
||||||
captureCmd.PersistentFlags().StringVarP(&bcAddress, "bc.address", "l", "", "Address to connect to beacon node (required)")
|
captureCmd.PersistentFlags().StringVarP(&bcAddress, "bc.address", "l", "", "Address to connect to beacon node (required)")
|
||||||
captureCmd.PersistentFlags().StringVarP(&bcType, "bc.type", "", "lighthouse", "The beacon client we are using, options are prysm and lighthouse.")
|
captureCmd.PersistentFlags().StringVarP(&bcType, "bc.type", "", "lighthouse", "The beacon client we are using, options are prysm and lighthouse.")
|
||||||
captureCmd.PersistentFlags().IntVarP(&bcPort, "bc.port", "r", 0, "Port to connect to beacon node (required )")
|
captureCmd.PersistentFlags().IntVarP(&bcPort, "bc.port", "r", 0, "Port to connect to beacon node (required )")
|
||||||
captureCmd.PersistentFlags().StringVarP(&bcConnectionProtocol, "bc.connectionProtocol", "", "http", "protocol for connecting to the beacon node.")
|
captureCmd.PersistentFlags().StringVarP(&bcConnectionProtocol, "bc.connectionProtocol", "", "http", "protocol for connecting to the beacon node.")
|
||||||
captureCmd.PersistentFlags().IntVarP(&bcBootRetryInterval, "bc.bootRetryInterval", "", 30, "The amount of time to wait between retries while booting the application")
|
err = captureCmd.MarkPersistentFlagRequired("bc.address")
|
||||||
captureCmd.PersistentFlags().IntVarP(&bcBootMaxRetry, "bc.bootMaxRetry", "", 5, "The amount of time to wait between retries while booting the application")
|
exitErr(err)
|
||||||
captureCmd.PersistentFlags().IntVarP(&bcMaxHistoricProcessWorker, "bc.maxHistoricProcessWorker", "", 30, "The number of workers that should be actively processing slots from the eth-beacon.historic_process table. Be careful of system memory.")
|
err = captureCmd.MarkPersistentFlagRequired("bc.port")
|
||||||
captureCmd.PersistentFlags().IntVarP(&bcUniqueNodeIdentifier, "bc.uniqueNodeIdentifier", "", 0, "The unique identifier of this application. Each application connecting to the DB should have a unique identifier.")
|
exitErr(err)
|
||||||
captureCmd.PersistentFlags().BoolVarP(&bcCheckDb, "bc.checkDb", "", true, "Should we check to see if the slot exists in the DB before writing it?")
|
|
||||||
// err = captureCmd.MarkPersistentFlagRequired("bc.address")
|
|
||||||
// exitErr(err)
|
|
||||||
// err = captureCmd.MarkPersistentFlagRequired("bc.port")
|
|
||||||
// exitErr(err)
|
|
||||||
|
|
||||||
//// Known Gaps specific
|
|
||||||
captureCmd.PersistentFlags().BoolVarP(&kgProcessGaps, "kg.processKnownGaps", "", true, "Should we process the slots within the eth-beacon.known_gaps table.")
|
|
||||||
captureCmd.PersistentFlags().IntVarP(&kgTableIncrement, "kg.increment", "", 10000, "The max slots within a single entry to the known_gaps table.")
|
|
||||||
captureCmd.PersistentFlags().IntVarP(&kgMaxWorker, "kg.maxKnownGapsWorker", "", 30, "The number of workers that should be actively processing slots from the eth-beacon.known_gaps table. Be careful of system memory.")
|
|
||||||
|
|
||||||
// Prometheus Specific
|
|
||||||
captureCmd.PersistentFlags().BoolVarP(&pmMetrics, "pm.metrics", "", true, "Should we capture prometheus metrics.")
|
|
||||||
captureCmd.PersistentFlags().StringVarP(&pmAddress, "pm.address", "", "localhost", "Address to send the prometheus metrics.")
|
|
||||||
captureCmd.PersistentFlags().IntVarP(&pmPort, "pm.port", "", 9000, "The port to send prometheus metrics.")
|
|
||||||
|
|
||||||
//// Testing Specific
|
//// Testing Specific
|
||||||
captureCmd.PersistentFlags().BoolVar(&testDisregardSync, "t.skipSync", false, "Should we disregard the head sync?")
|
captureCmd.PersistentFlags().BoolVar(&testDisregardSync, "t.skipSync", false, "Should we disregard the head sync?")
|
||||||
|
|
||||||
// Bind Flags with Viper
|
// Bind Flags with Viper
|
||||||
//// DB Flags
|
//// DB Flags
|
||||||
err := viper.BindPFlag("db.username", captureCmd.PersistentFlags().Lookup("db.username"))
|
err = viper.BindPFlag("db.username", captureCmd.PersistentFlags().Lookup("db.username"))
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
err = viper.BindPFlag("db.password", captureCmd.PersistentFlags().Lookup("db.password"))
|
err = viper.BindPFlag("db.password", captureCmd.PersistentFlags().Lookup("db.password"))
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
@ -127,14 +100,14 @@ func init() {
|
|||||||
exitErr(err)
|
exitErr(err)
|
||||||
err = viper.BindPFlag("db.name", captureCmd.PersistentFlags().Lookup("db.name"))
|
err = viper.BindPFlag("db.name", captureCmd.PersistentFlags().Lookup("db.name"))
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
err = viper.BindPFlag("db.driver", captureCmd.PersistentFlags().Lookup("db.driver"))
|
|
||||||
exitErr(err)
|
|
||||||
|
|
||||||
//// Testing Specific
|
|
||||||
err = viper.BindPFlag("t.skipSync", captureCmd.PersistentFlags().Lookup("t.skipSync"))
|
err = viper.BindPFlag("t.skipSync", captureCmd.PersistentFlags().Lookup("t.skipSync"))
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
|
|
||||||
//// LH specific
|
// Testing Specific
|
||||||
|
err = viper.BindPFlag("t.driver", captureCmd.PersistentFlags().Lookup("db.driver"))
|
||||||
|
exitErr(err)
|
||||||
|
|
||||||
|
// LH specific
|
||||||
err = viper.BindPFlag("bc.address", captureCmd.PersistentFlags().Lookup("bc.address"))
|
err = viper.BindPFlag("bc.address", captureCmd.PersistentFlags().Lookup("bc.address"))
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
err = viper.BindPFlag("bc.type", captureCmd.PersistentFlags().Lookup("bc.type"))
|
err = viper.BindPFlag("bc.type", captureCmd.PersistentFlags().Lookup("bc.type"))
|
||||||
@ -143,40 +116,14 @@ func init() {
|
|||||||
exitErr(err)
|
exitErr(err)
|
||||||
err = viper.BindPFlag("bc.connectionProtocol", captureCmd.PersistentFlags().Lookup("bc.connectionProtocol"))
|
err = viper.BindPFlag("bc.connectionProtocol", captureCmd.PersistentFlags().Lookup("bc.connectionProtocol"))
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
err = viper.BindPFlag("bc.bootRetryInterval", captureCmd.PersistentFlags().Lookup("bc.bootRetryInterval"))
|
|
||||||
exitErr(err)
|
|
||||||
err = viper.BindPFlag("bc.bootMaxRetry", captureCmd.PersistentFlags().Lookup("bc.bootMaxRetry"))
|
|
||||||
exitErr(err)
|
|
||||||
err = viper.BindPFlag("bc.maxHistoricProcessWorker", captureCmd.PersistentFlags().Lookup("bc.maxHistoricProcessWorker"))
|
|
||||||
exitErr(err)
|
|
||||||
err = viper.BindPFlag("bc.uniqueNodeIdentifier", captureCmd.PersistentFlags().Lookup("bc.uniqueNodeIdentifier"))
|
|
||||||
exitErr(err)
|
|
||||||
err = viper.BindPFlag("bc.checkDb", captureCmd.PersistentFlags().Lookup("bc.checkDb"))
|
|
||||||
exitErr(err)
|
|
||||||
// Here you will define your flags and configuration settings.
|
// Here you will define your flags and configuration settings.
|
||||||
|
|
||||||
//// Known Gap Specific
|
|
||||||
err = viper.BindPFlag("kg.processKnownGaps", captureCmd.PersistentFlags().Lookup("kg.processKnownGaps"))
|
|
||||||
exitErr(err)
|
|
||||||
err = viper.BindPFlag("kg.increment", captureCmd.PersistentFlags().Lookup("kg.increment"))
|
|
||||||
exitErr(err)
|
|
||||||
err = viper.BindPFlag("kg.processKnownGaps", captureCmd.PersistentFlags().Lookup("kg.maxKnownGapsWorker"))
|
|
||||||
exitErr(err)
|
|
||||||
|
|
||||||
// Prometheus Specific
|
|
||||||
err = viper.BindPFlag("pm.metrics", captureCmd.PersistentFlags().Lookup("pm.metrics"))
|
|
||||||
exitErr(err)
|
|
||||||
err = viper.BindPFlag("pm.address", captureCmd.PersistentFlags().Lookup("pm.address"))
|
|
||||||
exitErr(err)
|
|
||||||
err = viper.BindPFlag("pm.port", captureCmd.PersistentFlags().Lookup("pm.port"))
|
|
||||||
exitErr(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to catch any errors.
|
// Helper function to catch any errors.
|
||||||
// We need to capture these errors for the linter.
|
// We need to capture these errors for the linter.
|
||||||
func exitErr(err error) {
|
func exitErr(err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Error: ", err)
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
120
cmd/full.go
120
cmd/full.go
@ -1,120 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2022 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
|
||||||
|
|
||||||
// fullCmd represents the full command
|
|
||||||
var fullCmd = &cobra.Command{
|
|
||||||
Use: "full",
|
|
||||||
Short: "Capture all components of the application (head and historical)",
|
|
||||||
Long: `Capture all components of the application (head and historical)`,
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
startFullProcessing()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
captureCmd.AddCommand(fullCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// fullCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// fullCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start the application to track at head and historical processing.
|
|
||||||
func startFullProcessing() {
|
|
||||||
// Boot the application
|
|
||||||
log.Info("Starting the application in head tracking mode.")
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
|
||||||
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
|
||||||
viper.GetInt("kg.increment"), "head", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"),
|
|
||||||
viper.GetBool("bc.performBeaconBlockProcessing"), viper.GetBool("bc.performBeaconStateProcessing"))
|
|
||||||
if err != nil {
|
|
||||||
StopApplicationPreBoot(err, Db)
|
|
||||||
}
|
|
||||||
|
|
||||||
if viper.GetBool("pm.metrics") {
|
|
||||||
addr := viper.GetString("pm.address") + ":" + strconv.Itoa(viper.GetInt("pm.port"))
|
|
||||||
serveProm(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("The Beacon Client has booted successfully!")
|
|
||||||
// Capture head blocks
|
|
||||||
go Bc.CaptureHead()
|
|
||||||
|
|
||||||
hpContext, hpCancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
errG, _ := errgroup.WithContext(context.Background())
|
|
||||||
errG.Go(func() error {
|
|
||||||
errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"), beaconclient.Slot(viper.GetUint64("bc.minimumSlot")))
|
|
||||||
if len(errs) != 0 {
|
|
||||||
if len(errs) != 0 {
|
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
|
|
||||||
return fmt.Errorf("Application ended because there were too many error when attempting to process historic")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
kgCtx, KgCancel := context.WithCancel(context.Background())
|
|
||||||
if viper.GetBool("kg.processKnownGaps") {
|
|
||||||
go func() {
|
|
||||||
errG := new(errgroup.Group)
|
|
||||||
errG.Go(func() error {
|
|
||||||
errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"), beaconclient.Slot(viper.GetUint64("kg.minimumSlot")))
|
|
||||||
if len(errs) != 0 {
|
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
|
||||||
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err := errG.Wait(); err != nil {
|
|
||||||
loghelper.LogError(err).Error("Error with knownGaps processing")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown when the time is right.
|
|
||||||
err = shutdown.ShutdownFull(ctx, KgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
|
|
||||||
} else {
|
|
||||||
log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
77
cmd/head.go
77
cmd/head.go
@ -18,19 +18,18 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"os"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
|
"github.com/vulcanize/ipld-ethcl-indexer/internal/boot"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
|
"github.com/vulcanize/ipld-ethcl-indexer/internal/shutdown"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
"golang.org/x/sync/errgroup"
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
kgTableIncrement int
|
||||||
)
|
)
|
||||||
|
|
||||||
// headCmd represents the head command
|
// headCmd represents the head command
|
||||||
@ -49,66 +48,34 @@ func startHeadTracking() {
|
|||||||
log.Info("Starting the application in head tracking mode.")
|
log.Info("Starting the application in head tracking mode.")
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
BC, DB, err := boot.BootApplicationWithRetry(ctx, dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, testDisregardSync)
|
||||||
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
|
||||||
viper.GetInt("kg.increment"), "head", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"),
|
|
||||||
viper.GetBool("bc.performBeaconBlockProcessing"), viper.GetBool("bc.performBeaconStateProcessing"))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
StopApplicationPreBoot(err, Db)
|
loghelper.LogError(err).Error("Unable to Start application")
|
||||||
}
|
if DB != nil {
|
||||||
|
DB.Close()
|
||||||
if viper.GetBool("pm.metrics") {
|
}
|
||||||
addr := viper.GetString("pm.address") + ":" + strconv.Itoa(viper.GetInt("pm.port"))
|
os.Exit(1)
|
||||||
serveProm(addr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("The Beacon Client has booted successfully!")
|
log.Info("The Beacon Client has booted successfully!")
|
||||||
// Capture head blocks
|
// Capture head blocks
|
||||||
go Bc.CaptureHead()
|
go BC.CaptureHead(kgTableIncrement)
|
||||||
kgCtx, KgCancel := context.WithCancel(context.Background())
|
|
||||||
if viper.GetBool("kg.processKnownGaps") {
|
|
||||||
go func() {
|
|
||||||
errG := new(errgroup.Group)
|
|
||||||
errG.Go(func() error {
|
|
||||||
errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"), beaconclient.Slot(viper.GetUint64("kg.minimumSlot")))
|
|
||||||
if len(errs) != 0 {
|
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
|
||||||
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err := errG.Wait(); err != nil {
|
|
||||||
loghelper.LogError(err).Error("Error with knownGaps processing")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown when the time is right.
|
// Shutdown when the time is right.
|
||||||
err = shutdown.ShutdownHeadTracking(ctx, KgCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc)
|
err = shutdown.ShutdownServices(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
|
loghelper.LogError(err).Error("Ungracefully Shutdown ipld-ethcl-indexer!")
|
||||||
} else {
|
} else {
|
||||||
log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
|
log.Info("Gracefully shutdown ipld-ethcl-indexer")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
captureCmd.AddCommand(headCmd)
|
captureCmd.AddCommand(headCmd)
|
||||||
}
|
|
||||||
|
|
||||||
// Start prometheus server
|
// Known Gaps specific
|
||||||
func serveProm(addr string) {
|
captureCmd.PersistentFlags().IntVarP(&kgTableIncrement, "kg.increment", "", 10000, "The max slots within a single entry to the known_gaps table.")
|
||||||
mux := http.NewServeMux()
|
err := viper.BindPFlag("kg.increment", captureCmd.PersistentFlags().Lookup("kg.increment"))
|
||||||
mux.Handle("/metrics", promhttp.Handler())
|
exitErr(err)
|
||||||
|
|
||||||
srv := http.Server{
|
|
||||||
Addr: addr,
|
|
||||||
Handler: mux,
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
if err := srv.ListenAndServe(); err != nil {
|
|
||||||
loghelper.LogError(err).WithField("endpoint", addr).Error("Error with prometheus")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
@ -17,20 +17,9 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// historicCmd represents the historic command
|
// historicCmd represents the historic command
|
||||||
@ -39,70 +28,10 @@ var historicCmd = &cobra.Command{
|
|||||||
Short: "Capture the historic blocks and states.",
|
Short: "Capture the historic blocks and states.",
|
||||||
Long: `Capture the historic blocks and states.`,
|
Long: `Capture the historic blocks and states.`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
startHistoricProcessing()
|
fmt.Println("historic called")
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the application to process historical slots.
|
|
||||||
func startHistoricProcessing() {
|
|
||||||
// Boot the application
|
|
||||||
log.Info("Starting the application in head tracking mode.")
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
|
||||||
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
|
||||||
viper.GetInt("kg.increment"), "historic", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"),
|
|
||||||
viper.GetBool("bc.performBeaconBlockProcessing"), viper.GetBool("bc.performBeaconStateProcessing"))
|
|
||||||
if err != nil {
|
|
||||||
StopApplicationPreBoot(err, Db)
|
|
||||||
}
|
|
||||||
|
|
||||||
if viper.GetBool("pm.metrics") {
|
|
||||||
addr := viper.GetString("pm.address") + ":" + strconv.Itoa(viper.GetInt("pm.port"))
|
|
||||||
serveProm(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
hpContext, hpCancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
errG, _ := errgroup.WithContext(context.Background())
|
|
||||||
errG.Go(func() error {
|
|
||||||
errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"), beaconclient.Slot(viper.GetUint64("bc.minimumSlot")))
|
|
||||||
if len(errs) != 0 {
|
|
||||||
if len(errs) != 0 {
|
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
|
|
||||||
return fmt.Errorf("Application ended because there were too many error when attempting to process historic")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
kgContext, kgCancel := context.WithCancel(context.Background())
|
|
||||||
if viper.GetBool("kg.processKnownGaps") {
|
|
||||||
go func() {
|
|
||||||
errG := new(errgroup.Group)
|
|
||||||
errG.Go(func() error {
|
|
||||||
errs := Bc.ProcessKnownGaps(kgContext, viper.GetInt("kg.maxKnownGapsWorker"), beaconclient.Slot(viper.GetUint64("kg.minimumSlot")))
|
|
||||||
if len(errs) != 0 {
|
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
|
||||||
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err := errG.Wait(); err != nil {
|
|
||||||
loghelper.LogError(err).Error("Error with knownGaps processing")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown when the time is right.
|
|
||||||
err = shutdown.ShutdownHistoricProcessing(ctx, kgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!")
|
|
||||||
} else {
|
|
||||||
log.Info("Gracefully shutdown ipld-eth-beacon-indexer")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
captureCmd.AddCommand(historicCmd)
|
captureCmd.AddCommand(historicCmd)
|
||||||
|
|
||||||
@ -116,12 +45,3 @@ func init() {
|
|||||||
// is called directly, e.g.:
|
// is called directly, e.g.:
|
||||||
// historicCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
// historicCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop the application during its initial boot phases.
|
|
||||||
func StopApplicationPreBoot(startErr error, db sql.Database) {
|
|
||||||
loghelper.LogError(startErr).Error("Unable to Start application")
|
|
||||||
if db != nil {
|
|
||||||
db.Close()
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
11
cmd/root.go
11
cmd/root.go
@ -32,7 +32,7 @@ var (
|
|||||||
|
|
||||||
// rootCmd represents the base command when called without any subcommands
|
// rootCmd represents the base command when called without any subcommands
|
||||||
var rootCmd = &cobra.Command{
|
var rootCmd = &cobra.Command{
|
||||||
Use: "ipld-eth-beacon-indexer",
|
Use: "ipld-ethcl-indexer",
|
||||||
Short: "This application will keep track of all BeaconState's and SignedBeaconBlock's on the Beacon Chain.",
|
Short: "This application will keep track of all BeaconState's and SignedBeaconBlock's on the Beacon Chain.",
|
||||||
Long: `This is an application that will capture the BeaconState's and SignedBeaconBlock's on the Beacon Chain.
|
Long: `This is an application that will capture the BeaconState's and SignedBeaconBlock's on the Beacon Chain.
|
||||||
It can either do this will keeping track of head, or backfilling historic data.`,
|
It can either do this will keeping track of head, or backfilling historic data.`,
|
||||||
@ -47,7 +47,6 @@ It can either do this will keeping track of head, or backfilling historic data.`
|
|||||||
func Execute() {
|
func Execute() {
|
||||||
err := rootCmd.Execute()
|
err := rootCmd.Execute()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Err when executing rootCmd", err)
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -127,9 +126,9 @@ func init() {
|
|||||||
// will be global for your application.
|
// will be global for your application.
|
||||||
|
|
||||||
// Optional Flags
|
// Optional Flags
|
||||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.ipld-eth-beacon-indexer.yaml)")
|
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.ipld-ethcl-indexer.yaml)")
|
||||||
rootCmd.PersistentFlags().String("log.level", log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
|
rootCmd.PersistentFlags().String("log.level", log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
|
||||||
rootCmd.PersistentFlags().String("log.file", "ipld-eth-beacon-indexer.log", "file path for logging")
|
rootCmd.PersistentFlags().String("log.file", "ipld-ethcl-indexer.log", "file path for logging")
|
||||||
rootCmd.PersistentFlags().Bool("log.output", true, "Should we log to STDOUT")
|
rootCmd.PersistentFlags().Bool("log.output", true, "Should we log to STDOUT")
|
||||||
rootCmd.PersistentFlags().String("log.format", "json", "json or text")
|
rootCmd.PersistentFlags().String("log.format", "json", "json or text")
|
||||||
|
|
||||||
@ -160,10 +159,10 @@ func initConfig() {
|
|||||||
home, err := os.UserHomeDir()
|
home, err := os.UserHomeDir()
|
||||||
cobra.CheckErr(err)
|
cobra.CheckErr(err)
|
||||||
|
|
||||||
// Search config in home directory with name ".ipld-eth-beacon-indexer" (without extension).
|
// Search config in home directory with name ".ipld-ethcl-indexer" (without extension).
|
||||||
viper.AddConfigPath(home)
|
viper.AddConfigPath(home)
|
||||||
viper.SetConfigType("yaml")
|
viper.SetConfigType("yaml")
|
||||||
viper.SetConfigName(".ipld-eth-beacon-indexer")
|
viper.SetConfigName(".ipld-ethcl-indexer")
|
||||||
}
|
}
|
||||||
|
|
||||||
viper.AutomaticEnv() // read in environment variables that match
|
viper.AutomaticEnv() // read in environment variables that match
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
v "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/version"
|
v "github.com/vulcanize/ipld-ethcl-indexer/pkg/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -47,7 +47,7 @@ to quickly create a Cobra application.`,
|
|||||||
Patch: Patch,
|
Patch: Patch,
|
||||||
Meta: Meta,
|
Meta: Meta,
|
||||||
}
|
}
|
||||||
log.Infof("ipld-eth-beacon-indexer version: %s", version.GetVersionWithMeta())
|
log.Infof("ipld-ethcl-indexer version: %s", version.GetVersionWithMeta())
|
||||||
fmt.Println(version.GetVersionWithMeta())
|
fmt.Println(version.GetVersionWithMeta())
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,40 +0,0 @@
|
|||||||
{
|
|
||||||
"db": {
|
|
||||||
"address": "ipld-eth-beacon-db",
|
|
||||||
"password": "password",
|
|
||||||
"port": 5432,
|
|
||||||
"username": "vdbm",
|
|
||||||
"name": "vulcanize_testing",
|
|
||||||
"driver": "PGX"
|
|
||||||
},
|
|
||||||
"bc": {
|
|
||||||
"address": "lighthouse",
|
|
||||||
"port": 5052,
|
|
||||||
"type": "lighthouse",
|
|
||||||
"bootRetryInterval": 30,
|
|
||||||
"bootMaxRetry": 5,
|
|
||||||
"maxHistoricProcessWorker": 2,
|
|
||||||
"connectionProtocol": "http",
|
|
||||||
"uniqueNodeIdentifier": 100,
|
|
||||||
"checkDb": true
|
|
||||||
},
|
|
||||||
"t": {
|
|
||||||
"skipSync": true
|
|
||||||
},
|
|
||||||
"log": {
|
|
||||||
"level": "debug",
|
|
||||||
"output": true,
|
|
||||||
"file": "./ipld-eth-beacon-indexer.log",
|
|
||||||
"format": "json"
|
|
||||||
},
|
|
||||||
"kg": {
|
|
||||||
"increment": 10000,
|
|
||||||
"processKnownGaps": true,
|
|
||||||
"maxKnownGapsWorker": 2
|
|
||||||
},
|
|
||||||
"pm": {
|
|
||||||
"address": "localhost",
|
|
||||||
"port": 9000,
|
|
||||||
"metrics": true
|
|
||||||
}
|
|
||||||
}
|
|
@ -1 +0,0 @@
|
|||||||
../ipld-eth-beacon-config.json
|
|
@ -1,40 +0,0 @@
|
|||||||
{
|
|
||||||
"db": {
|
|
||||||
"address": "localhost",
|
|
||||||
"password": "secret12",
|
|
||||||
"port": 45432,
|
|
||||||
"username": "postgres",
|
|
||||||
"name": "postgres",
|
|
||||||
"driver": "PGX"
|
|
||||||
},
|
|
||||||
"bc": {
|
|
||||||
"address": "localhost",
|
|
||||||
"port": 8001,
|
|
||||||
"type": "lighthouse",
|
|
||||||
"bootRetryInterval": 30,
|
|
||||||
"bootMaxRetry": 5,
|
|
||||||
"maxHistoricProcessWorker": 2,
|
|
||||||
"connectionProtocol": "http",
|
|
||||||
"uniqueNodeIdentifier": 100,
|
|
||||||
"checkDb": true
|
|
||||||
},
|
|
||||||
"t": {
|
|
||||||
"skipSync": true
|
|
||||||
},
|
|
||||||
"log": {
|
|
||||||
"level": "debug",
|
|
||||||
"output": true,
|
|
||||||
"file": "./ipld-eth-beacon-indexer.log",
|
|
||||||
"format": "json"
|
|
||||||
},
|
|
||||||
"kg": {
|
|
||||||
"increment": 10000,
|
|
||||||
"processKnownGaps": true,
|
|
||||||
"maxKnownGapsWorker": 2
|
|
||||||
},
|
|
||||||
"pm": {
|
|
||||||
"address": "localhost",
|
|
||||||
"port": 9000,
|
|
||||||
"metrics": true
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,29 +1,40 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
sleep 10
|
sleep 10
|
||||||
echo "Starting ipld-eth-beacon-indexer"
|
echo "Starting ipld-ethcl-indexer"
|
||||||
|
|
||||||
cat /root/ipld-eth-beacon-config-docker.json | envsubst > /root/ipld-eth-beacon-config.json
|
echo /root/ipld-ethcl-indexer capture ${CAPTURE_MODE} --db.address $DB_ADDRESS \
|
||||||
|
--db.password $DB_PASSWORD \
|
||||||
|
--db.port $DB_PORT \
|
||||||
|
--db.username $DB_USER \
|
||||||
|
--db.name $DB_NAME \
|
||||||
|
--db.driver $DB_DRIVER \
|
||||||
|
--bc.address $BC_ADDRESS \
|
||||||
|
--bc.port $BC_PORT \
|
||||||
|
--log.level $LOG_LEVEL\
|
||||||
|
--t.skipSync=$SKIP_SYNC \
|
||||||
|
--kg.increment $KNOWN_GAP_INCREMENT
|
||||||
|
|
||||||
echo /root/ipld-eth-beacon-indexer capture ${CAPTURE_MODE} --config /root/ipld-eth-beacon-config.json > /root/ipld-eth-beacon-indexer.output
|
/root/ipld-ethcl-indexer capture ${CAPTURE_MODE} --db.address $DB_ADDRESS \
|
||||||
env
|
--db.password $DB_PASSWORD \
|
||||||
|
--db.port $DB_PORT \
|
||||||
|
--db.username $DB_USER \
|
||||||
|
--db.name $DB_NAME \
|
||||||
|
--db.driver $DB_DRIVER \
|
||||||
|
--bc.address $BC_ADDRESS \
|
||||||
|
--bc.port $BC_PORT \
|
||||||
|
--log.level $LOG_LEVEL \
|
||||||
|
--t.skipSync=$SKIP_SYNC \
|
||||||
|
--kg.increment $KNOWN_GAP_INCREMENT
|
||||||
|
|
||||||
if [ ${CAPTURE_MODE} == "boot" ]; then
|
rv=$?
|
||||||
/root/ipld-eth-beacon-indexer capture ${CAPTURE_MODE} --config /root/ipld-eth-beacon-config.json > /root/ipld-eth-beacon-indexer.output
|
|
||||||
rv=$?
|
|
||||||
|
|
||||||
if [ $rv != 0 ]; then
|
if [ $rv != 0 ]; then
|
||||||
echo "ipld-eth-beacon-indexer boot failed"
|
echo "ipld-ethcl-indexer startup failed"
|
||||||
else
|
echo 1 > /root/HEALTH
|
||||||
echo "ipld-eth-beacon-indexer boot succeeded"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo $rv > /root/HEALTH
|
|
||||||
echo $rv
|
|
||||||
cat /root/ipld-eth-beacon-indexer.output
|
|
||||||
|
|
||||||
tail -f /dev/null
|
|
||||||
else
|
else
|
||||||
exec /root/ipld-eth-beacon-indexer capture ${CAPTURE_MODE} --config /root/ipld-eth-beacon-config.json > /dev/null &
|
echo "ipld-ethcl-indexer startup succeeded"
|
||||||
tail -F ipld-eth-beacon-indexer.log
|
echo 0 > /root/HEALTH
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
tail -f /dev/null
|
117
go.mod
117
go.mod
@ -1,113 +1,110 @@
|
|||||||
module github.com/vulcanize/ipld-eth-beacon-indexer
|
module github.com/vulcanize/ipld-ethcl-indexer
|
||||||
|
|
||||||
go 1.18
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/ipfs/go-ipfs-blockstore v1.2.0
|
github.com/ipfs/go-ipfs-blockstore v1.2.0
|
||||||
github.com/ipfs/go-ipfs-ds-help v1.1.0
|
github.com/ipfs/go-ipfs-ds-help v1.1.0
|
||||||
github.com/jackc/pgconn v1.13.0
|
github.com/jackc/pgconn v1.12.0
|
||||||
github.com/multiformats/go-multihash v0.2.1
|
github.com/multiformats/go-multihash v0.1.0
|
||||||
github.com/onsi/ginkgo/v2 v2.1.4
|
github.com/onsi/ginkgo/v2 v2.1.4
|
||||||
github.com/onsi/gomega v1.19.0
|
github.com/onsi/gomega v1.19.0
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc
|
||||||
github.com/prometheus/client_golang v1.13.0
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/protolambda/zrnt v0.28.0
|
|
||||||
github.com/protolambda/ztyp v0.2.2
|
|
||||||
github.com/r3labs/sse/v2 v2.8.1
|
|
||||||
github.com/sirupsen/logrus v1.9.0
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
|
|
||||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
github.com/ethereum/go-ethereum v1.10.17 // indirect
|
||||||
github.com/ethereum/go-ethereum v1.10.25 // indirect
|
|
||||||
github.com/go-ole/go-ole v1.2.1 // indirect
|
|
||||||
github.com/go-stack/stack v1.8.0 // indirect
|
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
|
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/gorilla/websocket v1.4.2 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.0 // indirect
|
||||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||||
github.com/holiman/uint256 v1.2.0 // indirect
|
github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e // indirect
|
||||||
github.com/ipfs/bbloom v0.0.4 // indirect
|
github.com/ipfs/bbloom v0.0.4 // indirect
|
||||||
github.com/ipfs/go-block-format v0.0.3 // indirect
|
github.com/ipfs/go-block-format v0.0.3 // indirect
|
||||||
github.com/ipfs/go-cid v0.3.2 // indirect
|
github.com/ipfs/go-cid v0.1.0 // indirect
|
||||||
github.com/ipfs/go-datastore v0.6.0 // indirect
|
github.com/ipfs/go-datastore v0.5.0 // indirect
|
||||||
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
||||||
github.com/ipfs/go-ipld-format v0.4.0 // indirect
|
github.com/ipfs/go-ipld-format v0.3.0 // indirect
|
||||||
github.com/ipfs/go-log v1.0.5 // indirect
|
github.com/ipfs/go-log v1.0.5 // indirect
|
||||||
github.com/ipfs/go-log/v2 v2.5.1 // indirect
|
github.com/ipfs/go-log/v2 v2.5.0 // indirect
|
||||||
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||||
github.com/jackc/pgio v1.0.0 // indirect
|
github.com/jackc/pgio v1.0.0 // indirect
|
||||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
github.com/jackc/pgproto3/v2 v2.3.1 // indirect
|
github.com/jackc/pgproto3/v2 v2.3.0 // indirect
|
||||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||||
github.com/jackc/pgtype v1.12.0 // indirect
|
github.com/jackc/pgtype v1.11.0 // indirect
|
||||||
github.com/jackc/puddle v1.3.0 // indirect
|
github.com/jackc/puddle v1.2.1 // indirect
|
||||||
github.com/jbenet/goprocess v0.1.4 // indirect
|
github.com/jbenet/goprocess v0.1.4 // indirect
|
||||||
github.com/kilic/bls12-381 v0.1.0 // indirect
|
github.com/klauspost/cpuid/v2 v2.0.12 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.1.1 // indirect
|
|
||||||
github.com/lib/pq v1.10.5 // indirect
|
github.com/lib/pq v1.10.5 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
||||||
|
github.com/minio/highwayhash v1.0.1 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
|
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
github.com/multiformats/go-base32 v0.0.4 // indirect
|
||||||
github.com/multiformats/go-base36 v0.1.0 // indirect
|
github.com/multiformats/go-base36 v0.1.0 // indirect
|
||||||
github.com/multiformats/go-multibase v0.1.1 // indirect
|
github.com/multiformats/go-multibase v0.0.3 // indirect
|
||||||
github.com/multiformats/go-varint v0.0.6 // indirect
|
github.com/multiformats/go-varint v0.0.6 // indirect
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.37.0 // indirect
|
github.com/prometheus/common v0.32.1 // indirect
|
||||||
github.com/prometheus/procfs v0.8.0 // indirect
|
github.com/prometheus/procfs v0.7.3 // indirect
|
||||||
github.com/protolambda/bls12-381-util v0.0.0-20210720105258-a772f2aac13e // indirect
|
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 // indirect
|
||||||
github.com/rogpeppe/go-internal v1.8.0 // indirect
|
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220303211031-f753e083138c // indirect
|
||||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
github.com/tklauser/go-sysconf v0.3.5 // indirect
|
github.com/supranational/blst v0.3.5 // indirect
|
||||||
github.com/tklauser/numcpus v0.2.2 // indirect
|
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e // indirect
|
||||||
go.uber.org/atomic v1.10.0 // indirect
|
github.com/urfave/cli/v2 v2.3.0 // indirect
|
||||||
|
go.opencensus.io v0.23.0 // indirect
|
||||||
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.uber.org/multierr v1.8.0 // indirect
|
||||||
go.uber.org/zap v1.23.0 // indirect
|
go.uber.org/zap v1.21.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
|
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f // indirect
|
||||||
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7 // indirect
|
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect
|
||||||
golang.org/x/tools v0.1.10 // indirect
|
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/grpc v1.46.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
|
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
|
||||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
|
||||||
lukechampine.com/blake3 v1.1.7 // indirect
|
lukechampine.com/blake3 v1.1.7 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/ferranbt/fastssz v0.0.0-20220303160658-88bb965b6747 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||||
github.com/georgysavva/scany v1.2.0
|
github.com/georgysavva/scany v0.3.0
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/jackc/pgx/v4 v4.17.2
|
github.com/jackc/pgx/v4 v4.16.0
|
||||||
github.com/jarcoal/httpmock v1.2.0
|
github.com/jarcoal/httpmock v1.2.0
|
||||||
github.com/magiconair/properties v1.8.6 // indirect
|
github.com/magiconair/properties v1.8.6 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.0 // indirect
|
||||||
github.com/spf13/afero v1.9.2 // indirect
|
github.com/prysmaticlabs/prysm v1.4.2-0.20220504145118-df695346a53c
|
||||||
github.com/spf13/cast v1.5.0 // indirect
|
github.com/spf13/afero v1.8.2 // indirect
|
||||||
github.com/spf13/cobra v1.5.0
|
github.com/spf13/cast v1.4.1 // indirect
|
||||||
|
github.com/spf13/cobra v1.4.0
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/spf13/viper v1.13.0
|
github.com/spf13/viper v1.11.0
|
||||||
github.com/subosito/gotenv v1.4.1 // indirect
|
github.com/subosito/gotenv v1.2.0 // indirect
|
||||||
golang.org/x/sync v0.0.0-20220907140024-f12130a52804
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
golang.org/x/sys v0.0.0-20220907062415-87db552b00fd // indirect
|
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
)
|
)
|
||||||
|
@ -18,18 +18,19 @@ package boot
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql/postgres"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql/postgres"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
DB sql.Database = &postgres.DB{}
|
maxRetry = 5 // Max times to try to connect to the DB or BC at boot.
|
||||||
BC *beaconclient.BeaconClient = &beaconclient.BeaconClient{}
|
retryInterval = 30 // The time to wait between each try.
|
||||||
|
DB sql.Database = &postgres.DB{}
|
||||||
|
BC *beaconclient.BeaconClient = &beaconclient.BeaconClient{}
|
||||||
)
|
)
|
||||||
|
|
||||||
// This function will perform some boot operations. If any steps fail, the application will fail to start.
|
// This function will perform some boot operations. If any steps fail, the application will fail to start.
|
||||||
@ -41,18 +42,14 @@ var (
|
|||||||
// 2. Connect to the database.
|
// 2. Connect to the database.
|
||||||
//
|
//
|
||||||
// 3. Make sure the node is synced, unless disregardSync is true.
|
// 3. Make sure the node is synced, unless disregardSync is true.
|
||||||
func BootApplication(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string,
|
func BootApplication(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string, bcAddress string, bcPort int, bcConnectionProtocol string, disregardSync bool) (*beaconclient.BeaconClient, sql.Database, error) {
|
||||||
bcAddress string, bcPort int, bcConnectionProtocol string, bcKgTableIncrement int, disregardSync bool, uniqueNodeIdentifier int, checkDb bool, performBeaconBlockProcessing bool, performBeaconStateProcessing bool) (*beaconclient.BeaconClient, sql.Database, error) {
|
|
||||||
log.Info("Booting the Application")
|
log.Info("Booting the Application")
|
||||||
|
|
||||||
log.Debug("Creating the Beacon Client")
|
log.Debug("Creating the Beacon Client")
|
||||||
Bc, err := beaconclient.CreateBeaconClient(ctx, bcConnectionProtocol, bcAddress, bcPort, bcKgTableIncrement, uniqueNodeIdentifier, checkDb, performBeaconBlockProcessing, performBeaconStateProcessing)
|
BC = beaconclient.CreateBeaconClient(ctx, bcConnectionProtocol, bcAddress, bcPort)
|
||||||
if err != nil {
|
|
||||||
return Bc, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debug("Checking Beacon Client")
|
log.Debug("Checking Beacon Client")
|
||||||
err = Bc.CheckBeaconClient()
|
err := BC.CheckBeaconClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -63,92 +60,40 @@ func BootApplication(ctx context.Context, dbHostname string, dbPort int, dbName
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
Bc.Db = DB
|
BC.Db = DB
|
||||||
|
|
||||||
var status bool
|
var status bool
|
||||||
if !disregardSync {
|
if !disregardSync {
|
||||||
status, err = Bc.CheckHeadSync()
|
status, err = BC.CheckHeadSync()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to get the nodes sync status")
|
log.Error("Unable to get the nodes sync status")
|
||||||
return Bc, DB, err
|
return BC, DB, err
|
||||||
}
|
}
|
||||||
if status {
|
if status {
|
||||||
log.Error("The node is still syncing..")
|
log.Error("The node is still syncing..")
|
||||||
err = fmt.Errorf("The node is still syncing.")
|
err = fmt.Errorf("The node is still syncing.")
|
||||||
return Bc, DB, err
|
return BC, DB, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warn("We are not checking to see if the node has synced to head.")
|
log.Warn("We are not checking to see if the node has synced to head.")
|
||||||
}
|
}
|
||||||
return Bc, DB, nil
|
return BC, DB, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add retry logic to ensure that we are give the Beacon Client and the DB time to start.
|
// Add retry logic to ensure that we are give the Beacon Client and the DB time to start.
|
||||||
func BootApplicationWithRetry(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string,
|
func BootApplicationWithRetry(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string, bcAddress string, bcPort int, bcConnectionProtocol string, disregardSync bool) (*beaconclient.BeaconClient, sql.Database, error) {
|
||||||
bcAddress string, bcPort int, bcConnectionProtocol string, bcType string, bcRetryInterval int, bcMaxRetry int, bcKgTableIncrement int,
|
|
||||||
startUpMode string, disregardSync bool, uniqueNodeIdentifier int, checkDb bool, performBeaconBlockProcessing bool, performBeaconStateProcessing bool) (*beaconclient.BeaconClient, sql.Database, error) {
|
|
||||||
var err error
|
var err error
|
||||||
|
for i := 0; i < maxRetry; i++ {
|
||||||
if bcMaxRetry < 0 {
|
BC, DB, err = BootApplication(ctx, dbHostname, dbPort, dbName, dbUsername, dbPassword, driverName, bcAddress, bcPort, bcConnectionProtocol, disregardSync)
|
||||||
i := 0
|
|
||||||
for {
|
|
||||||
BC, DB, err = BootApplication(ctx, dbHostname, dbPort, dbName, dbUsername, dbPassword, driverName,
|
|
||||||
bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, disregardSync, uniqueNodeIdentifier, checkDb,
|
|
||||||
performBeaconBlockProcessing, performBeaconStateProcessing)
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"retryNumber": i,
|
|
||||||
"err": err,
|
|
||||||
}).Warn("Unable to boot application. Going to try again")
|
|
||||||
time.Sleep(time.Duration(bcRetryInterval) * time.Second)
|
|
||||||
i = i + 1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for i := 0; i < bcMaxRetry; i++ {
|
|
||||||
BC, DB, err = BootApplication(ctx, dbHostname, dbPort, dbName, dbUsername, dbPassword, driverName,
|
|
||||||
bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, disregardSync, uniqueNodeIdentifier, checkDb,
|
|
||||||
performBeaconBlockProcessing, performBeaconStateProcessing)
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"retryNumber": i,
|
|
||||||
"err": err,
|
|
||||||
}).Warn("Unable to boot application. Going to try again")
|
|
||||||
time.Sleep(time.Duration(bcRetryInterval) * time.Second)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch strings.ToLower(startUpMode) {
|
|
||||||
case "head":
|
|
||||||
BC.PerformHeadTracking = true
|
|
||||||
case "historic":
|
|
||||||
log.Debug("Performing additional boot steps for historical processing")
|
|
||||||
BC.PerformHistoricalProcessing = true
|
|
||||||
// This field is not currently used.
|
|
||||||
// The idea is, that if we are doing historially processing and we get a slot
|
|
||||||
// greater than this slot, then we would rerun this function.
|
|
||||||
// this would ensure that we have the slots necessary for processing
|
|
||||||
// within the beacon server.
|
|
||||||
|
|
||||||
// We can implement this feature if we notice any errors.
|
|
||||||
headSlot, err := BC.GetLatestSlotInBeaconServer(bcType)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return BC, DB, err
|
log.WithFields(log.Fields{
|
||||||
|
"retryNumber": i,
|
||||||
|
"err": err,
|
||||||
|
}).Warn("Unable to boot application. Going to try again")
|
||||||
|
time.Sleep(time.Duration(retryInterval) * time.Second)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
BC.UpdateLatestSlotInBeaconServer(int64(headSlot))
|
break
|
||||||
// Add another switch case for bcType if its ever needed.
|
|
||||||
case "boot":
|
|
||||||
log.Debug("Running application in boot mode.")
|
|
||||||
default:
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"startUpMode": startUpMode,
|
|
||||||
}).Error("The startUpMode provided is not handled.")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return BC, DB, err
|
return BC, DB, err
|
||||||
}
|
}
|
||||||
|
@ -20,73 +20,51 @@ import (
|
|||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
. "github.com/onsi/ginkgo/v2"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
|
"github.com/vulcanize/ipld-ethcl-indexer/internal/boot"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Boot", func() {
|
var _ = Describe("Boot", func() {
|
||||||
var (
|
var (
|
||||||
dbAddress string = "localhost"
|
dbAddress string = "localhost"
|
||||||
dbPort int = 8076
|
dbPort int = 8076
|
||||||
dbName string = "vulcanize_testing"
|
dbName string = "vulcanize_testing"
|
||||||
dbUsername string = "vdbm"
|
dbUsername string = "vdbm"
|
||||||
dbPassword string = "password"
|
dbPassword string = "password"
|
||||||
dbDriver string = "PGX"
|
dbDriver string = "PGX"
|
||||||
bcAddress string = "localhost"
|
bcAddress string = "localhost"
|
||||||
bcPort int = 5052
|
bcPort int = 5052
|
||||||
bcConnectionProtocol string = "http"
|
bcConnectionProtocol string = "http"
|
||||||
bcType string = "lighthouse"
|
|
||||||
bcBootRetryInterval int = 1
|
|
||||||
bcBootMaxRetry int = 5
|
|
||||||
bcKgTableIncrement int = 10
|
|
||||||
bcUniqueIdentifier int = 100
|
|
||||||
bcCheckDb bool = false
|
|
||||||
bcProcessBeaconBlocks bool = true
|
|
||||||
bcProcessBeaconState bool = true
|
|
||||||
)
|
)
|
||||||
Describe("Booting the application", Label("integration"), func() {
|
Describe("Booting the application", Label("integration"), func() {
|
||||||
Context("When the DB and BC are both up and running, we skip checking for a synced head, and we are processing head", func() {
|
Context("When the DB and BC are both up and running, and we skip checking for a synced head", func() {
|
||||||
It("Should connect successfully", func() {
|
It("Should connect successfully", func() {
|
||||||
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, true)
|
||||||
defer db.Close()
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Context("When the DB and BC are both up and running, we skip checking for a synced head, and we are processing historic ", func() {
|
|
||||||
It("Should connect successfully", func() {
|
|
||||||
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "historic", true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When the DB and BC are both up and running, and we check for a synced head", func() {
|
Context("When the DB and BC are both up and running, and we check for a synced head", func() {
|
||||||
It("Should not connect successfully", func() {
|
It("Should not connect successfully", func() {
|
||||||
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", false, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, false)
|
||||||
defer db.Close()
|
|
||||||
Expect(err).To(HaveOccurred())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Context("When the DB and BC are both up and running, we skip checking for a synced head, but the unique identifier is 0", func() {
|
|
||||||
It("Should not connect successfully", func() {
|
|
||||||
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", false, 0, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When the DB is running but not the BC", func() {
|
Context("When the DB is running but not the BC", func() {
|
||||||
It("Should not connect successfully", func() {
|
It("Should not connect successfully", func() {
|
||||||
_, _, err := boot.BootApplication(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
_, _, err := boot.BootApplication(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, true)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When the BC is running but not the DB", func() {
|
Context("When the BC is running but not the DB", func() {
|
||||||
It("Should not connect successfully", func() {
|
It("Should not connect successfully", func() {
|
||||||
_, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
_, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, true)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When neither the BC or DB are running", func() {
|
Context("When neither the BC or DB are running", func() {
|
||||||
It("Should not connect successfully", func() {
|
It("Should not connect successfully", func() {
|
||||||
_, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
_, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, true)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -20,16 +20,25 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/gracefulshutdown"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/gracefulshutdown"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Shutdown all the internal services for the application.
|
// Shutdown all the internal services for the application.
|
||||||
func ShutdownServices(ctx context.Context, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient, shutdownOperations map[string]gracefulshutdown.Operation) error {
|
func ShutdownServices(ctx context.Context, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
|
||||||
//successCh, errCh := gracefulshutdown.Shutdown(ctx, notifierCh, waitTime, )
|
successCh, errCh := gracefulshutdown.Shutdown(ctx, notifierCh, waitTime, map[string]gracefulshutdown.Operation{
|
||||||
successCh, errCh := gracefulshutdown.Shutdown(ctx, notifierCh, waitTime, shutdownOperations)
|
// Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
|
||||||
|
"beaconClient": func(ctx context.Context) error {
|
||||||
|
defer DB.Close()
|
||||||
|
err := BC.StopHeadTracking()
|
||||||
|
if err != nil {
|
||||||
|
loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-successCh:
|
case <-successCh:
|
||||||
@ -38,82 +47,3 @@ func ShutdownServices(ctx context.Context, notifierCh chan os.Signal, waitTime t
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper function for shutting down the head tracking process.
|
|
||||||
func ShutdownHeadTracking(ctx context.Context, kgCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
|
|
||||||
return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{
|
|
||||||
// Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
|
|
||||||
"beaconClient": func(ctx context.Context) error {
|
|
||||||
defer DB.Close()
|
|
||||||
err := BC.StopHeadTracking()
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking")
|
|
||||||
}
|
|
||||||
if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) {
|
|
||||||
err = BC.StopKnownGapsProcessing(kgCancel)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("Unable to stop processing known gaps")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper function for shutting down the head tracking process.
|
|
||||||
func ShutdownHistoricProcessing(ctx context.Context, kgCancel, hpCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
|
|
||||||
return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{
|
|
||||||
// Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
|
|
||||||
"beaconClient": func(ctx context.Context) error {
|
|
||||||
defer DB.Close()
|
|
||||||
err := BC.StopHistoric(hpCancel)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("Unable to stop processing historic")
|
|
||||||
}
|
|
||||||
if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) {
|
|
||||||
err = BC.StopKnownGapsProcessing(kgCancel)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("Unable to stop processing known gaps")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown the head and historical processing
|
|
||||||
func ShutdownFull(ctx context.Context, kgCancel, hpCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
|
|
||||||
return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{
|
|
||||||
// Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
|
|
||||||
"beaconClient": func(ctx context.Context) error {
|
|
||||||
defer DB.Close()
|
|
||||||
err := BC.StopHistoric(hpCancel)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("Unable to stop processing historic")
|
|
||||||
}
|
|
||||||
if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) {
|
|
||||||
err = BC.StopKnownGapsProcessing(kgCancel)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("Unable to stop processing known gaps")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = BC.StopHeadTracking()
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking")
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper function for shutting down the application in boot mode.
|
|
||||||
func ShutdownBoot(ctx context.Context, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error {
|
|
||||||
return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{
|
|
||||||
// Combining DB shutdown with BC because BC needs DB open to cleanly shutdown.
|
|
||||||
"Database": func(ctx context.Context) error {
|
|
||||||
return DB.Close()
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
@ -26,57 +26,46 @@ import (
|
|||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
. "github.com/onsi/ginkgo/v2"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"github.com/r3labs/sse/v2"
|
"github.com/r3labs/sse"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
|
"github.com/vulcanize/ipld-ethcl-indexer/internal/boot"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
|
"github.com/vulcanize/ipld-ethcl-indexer/internal/shutdown"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/gracefulshutdown"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/gracefulshutdown"
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
dbAddress string = "localhost"
|
|
||||||
dbPort int = 8076
|
|
||||||
dbName string = "vulcanize_testing"
|
|
||||||
dbUsername string = "vdbm"
|
|
||||||
dbPassword string = "password"
|
|
||||||
dbDriver string = "PGX"
|
|
||||||
bcAddress string = "localhost"
|
|
||||||
bcPort int = 5052
|
|
||||||
bcConnectionProtocol string = "http"
|
|
||||||
bcType string = "lighthouse"
|
|
||||||
bcBootRetryInterval int = 1
|
|
||||||
bcBootMaxRetry int = 5
|
|
||||||
bcKgTableIncrement int = 10
|
|
||||||
bcUniqueIdentifier int = 100
|
|
||||||
bcCheckDb bool = false
|
|
||||||
bcProcessBeaconBlocks bool = true
|
|
||||||
bcProcessBeaconState bool = true
|
|
||||||
maxWaitSecondsShutdown time.Duration = time.Duration(1) * time.Second
|
|
||||||
DB sql.Database
|
|
||||||
BC *beaconclient.BeaconClient
|
|
||||||
err error
|
|
||||||
ctx context.Context
|
|
||||||
notifierCh chan os.Signal
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Shutdown", func() {
|
var _ = Describe("Shutdown", func() {
|
||||||
|
var (
|
||||||
|
dbAddress string = "localhost"
|
||||||
|
dbPort int = 8076
|
||||||
|
dbName string = "vulcanize_testing"
|
||||||
|
dbUsername string = "vdbm"
|
||||||
|
dbPassword string = "password"
|
||||||
|
dbDriver string = "PGX"
|
||||||
|
bcAddress string = "localhost"
|
||||||
|
bcPort int = 5052
|
||||||
|
bcConnectionProtocol string = "http"
|
||||||
|
maxWaitSecondsShutdown time.Duration = time.Duration(1) * time.Second
|
||||||
|
DB sql.Database
|
||||||
|
BC *beaconclient.BeaconClient
|
||||||
|
err error
|
||||||
|
ctx context.Context
|
||||||
|
notifierCh chan os.Signal
|
||||||
|
)
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
ctx = context.Background()
|
ctx = context.Background()
|
||||||
BC, DB, err = boot.BootApplicationWithRetry(ctx, dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress,
|
BC, DB, err = boot.BootApplicationWithRetry(ctx, dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, true)
|
||||||
bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
|
||||||
notifierCh = make(chan os.Signal, 1)
|
notifierCh = make(chan os.Signal, 1)
|
||||||
Expect(err).To(BeNil())
|
Expect(err).To(BeNil())
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("Run Shutdown Function for head tracking,", Label("integration"), func() {
|
Describe("Run Shutdown Function,", Label("integration"), func() {
|
||||||
Context("When Channels are empty,", func() {
|
Context("When Channels are empty,", func() {
|
||||||
It("Should Shutdown Successfully.", func() {
|
It("Should Shutdown Successfully.", func() {
|
||||||
go func() {
|
go func() {
|
||||||
_, cancel := context.WithCancel(context.Background())
|
|
||||||
log.Debug("Starting shutdown chan")
|
log.Debug("Starting shutdown chan")
|
||||||
err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC)
|
err = shutdown.ShutdownServices(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC)
|
||||||
log.Debug("We have completed the shutdown...")
|
log.Debug("We have completed the shutdown...")
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
}()
|
}()
|
||||||
@ -87,9 +76,8 @@ var _ = Describe("Shutdown", func() {
|
|||||||
shutdownCh := make(chan bool)
|
shutdownCh := make(chan bool)
|
||||||
//log.SetLevel(log.DebugLevel)
|
//log.SetLevel(log.DebugLevel)
|
||||||
go func() {
|
go func() {
|
||||||
_, cancel := context.WithCancel(context.Background())
|
|
||||||
log.Debug("Starting shutdown chan")
|
log.Debug("Starting shutdown chan")
|
||||||
err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC)
|
err = shutdown.ShutdownServices(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC)
|
||||||
log.Debug("We have completed the shutdown...")
|
log.Debug("We have completed the shutdown...")
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
shutdownCh <- true
|
shutdownCh <- true
|
||||||
@ -122,8 +110,7 @@ var _ = Describe("Shutdown", func() {
|
|||||||
//log.SetLevel(log.DebugLevel)
|
//log.SetLevel(log.DebugLevel)
|
||||||
go func() {
|
go func() {
|
||||||
log.Debug("Starting shutdown chan")
|
log.Debug("Starting shutdown chan")
|
||||||
_, cancel := context.WithCancel(context.Background())
|
err = shutdown.ShutdownServices(ctx, notifierCh, maxWaitSecondsShutdown, DB, BC)
|
||||||
err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC)
|
|
||||||
log.Debug("We have completed the shutdown...")
|
log.Debug("We have completed the shutdown...")
|
||||||
Expect(err).To(MatchError(gracefulshutdown.TimeoutErr(maxWaitSecondsShutdown.String())))
|
Expect(err).To(MatchError(gracefulshutdown.TimeoutErr(maxWaitSecondsShutdown.String())))
|
||||||
shutdownCh <- true
|
shutdownCh <- true
|
||||||
|
@ -1,44 +0,0 @@
|
|||||||
{
|
|
||||||
"db": {
|
|
||||||
"address": "${POSTGRES_HOST}",
|
|
||||||
"password": "${POSTGRES_PASSWORD}",
|
|
||||||
"port": ${POSTGRES_PORT},
|
|
||||||
"username": "${POSTGRES_USER}",
|
|
||||||
"name": "${POSTGRES_DB}",
|
|
||||||
"driver": "PGX"
|
|
||||||
},
|
|
||||||
"bc": {
|
|
||||||
"address": "${LIGHTHOUSE_HOST}",
|
|
||||||
"port": ${LIGHTHOUSE_PORT},
|
|
||||||
"type": "lighthouse",
|
|
||||||
"bootRetryInterval": 30,
|
|
||||||
"bootMaxRetry": 5,
|
|
||||||
"maxHistoricProcessWorker": ${BC_MAX_HISTORIC_PROCESS_WORKER},
|
|
||||||
"connectionProtocol": "${LIGHTHOUSE_PROTOCOL}",
|
|
||||||
"uniqueNodeIdentifier": ${BC_UNIQUE_NODE_IDENTIFIER},
|
|
||||||
"checkDb": ${BC_CHECK_DB},
|
|
||||||
"performBeaconStateProcessing": ${BC_BEACON_STATE_PROCESSING_ENABLED},
|
|
||||||
"performBeaconBlockProcessing": ${BC_BEACON_BLOCK_PROCESSING_ENABLED},
|
|
||||||
"minimumSlot": ${BC_MINIMUM_SLOT}
|
|
||||||
},
|
|
||||||
"t": {
|
|
||||||
"skipSync": true
|
|
||||||
},
|
|
||||||
"log": {
|
|
||||||
"level": "${LOG_LEVEL}",
|
|
||||||
"output": true,
|
|
||||||
"file": "./ipld-eth-beacon-indexer.log",
|
|
||||||
"format": "json"
|
|
||||||
},
|
|
||||||
"kg": {
|
|
||||||
"increment": ${KG_INCREMENT},
|
|
||||||
"processKnownGaps": ${KG_PROCESS_KNOWN_GAPS_ENABLED},
|
|
||||||
"maxKnownGapsWorker": ${KG_MAX_KNOWN_GAPS_WORKER},
|
|
||||||
"minimumSlot": ${KG_MINIMUM_SLOT}
|
|
||||||
},
|
|
||||||
"pm": {
|
|
||||||
"address": "${PROM_HOST}",
|
|
||||||
"port": ${PROM_PORT},
|
|
||||||
"metrics": ${PROM_METRICS_ENABLED}
|
|
||||||
}
|
|
||||||
}
|
|
2
main.go
2
main.go
@ -19,7 +19,7 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
|
|||||||
*/
|
*/
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "github.com/vulcanize/ipld-eth-beacon-indexer/cmd"
|
import "github.com/vulcanize/ipld-ethcl-indexer/cmd"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
cmd.Execute()
|
cmd.Execute()
|
||||||
|
@ -18,11 +18,10 @@ package beaconclient
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/r3labs/sse/v2"
|
|
||||||
|
"github.com/r3labs/sse"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
|
||||||
"math/rand"
|
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: Use prysms config values instead of hardcoding them here.
|
// TODO: Use prysms config values instead of hardcoding them here.
|
||||||
@ -32,46 +31,42 @@ var (
|
|||||||
bcReorgTopicEndpoint = "/eth/v1/events?topics=chain_reorg" // Endpoint used to subscribe to the head of the chain
|
bcReorgTopicEndpoint = "/eth/v1/events?topics=chain_reorg" // Endpoint used to subscribe to the head of the chain
|
||||||
BcBlockQueryEndpoint = "/eth/v2/beacon/blocks/" // Endpoint to query individual Blocks
|
BcBlockQueryEndpoint = "/eth/v2/beacon/blocks/" // Endpoint to query individual Blocks
|
||||||
BcStateQueryEndpoint = "/eth/v2/debug/beacon/states/" // Endpoint to query individual States
|
BcStateQueryEndpoint = "/eth/v2/debug/beacon/states/" // Endpoint to query individual States
|
||||||
BcSyncStatusEndpoint = "/eth/v1/node/syncing" // The endpoint to check to see if the beacon server is still trying to sync to head.
|
BcSyncStatusEndpoint = "/eth/v1/node/syncing"
|
||||||
LhDbInfoEndpoint = "/lighthouse/database/info" // The endpoint for the LIGHTHOUSE server to get the database information.
|
|
||||||
BcBlockRootEndpoint = func(slot string) string {
|
BcBlockRootEndpoint = func(slot string) string {
|
||||||
return "/eth/v1/beacon/blocks/" + slot + "/root"
|
return "/eth/v1/beacon/blocks/" + slot + "/root"
|
||||||
}
|
}
|
||||||
bcSlotsPerEpoch uint64 = 32 // Number of slots in a single Epoch
|
bcSlotsPerEpoch = 32 // Number of slots in a single Epoch
|
||||||
//bcSlotPerHistoricalVector = 8192 // The number of slots in a historic vector.
|
//bcSlotPerHistoricalVector = 8192 // The number of slots in a historic vector.
|
||||||
//bcFinalizedTopicEndpoint = "/eth/v1/events?topics=finalized_checkpoint" // Endpoint used to subscribe to the head of the chain
|
//bcFinalizedTopicEndpoint = "/eth/v1/events?topics=finalized_checkpoint" // Endpoint used to subscribe to the head of the chain
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A structure utilized for keeping track of various metrics. Currently, mostly used in testing.
|
||||||
|
type BeaconClientMetrics struct {
|
||||||
|
HeadTrackingInserts uint64 // Number of head events we successfully wrote to the DB.
|
||||||
|
HeadTrackingReorgs uint64 // Number of reorg events we successfully wrote to the DB.
|
||||||
|
HeadTrackingKnownGaps uint64 // Number of known_gaps we successfully wrote to the DB.
|
||||||
|
HeadError uint64 // Number of errors that occurred when decoding the head message.
|
||||||
|
HeadReorgError uint64 // Number of errors that occurred when decoding the reorg message.
|
||||||
|
}
|
||||||
|
|
||||||
// A struct that capture the Beacon Server that the Beacon Client will be interacting with and querying.
|
// A struct that capture the Beacon Server that the Beacon Client will be interacting with and querying.
|
||||||
type BeaconClient struct {
|
type BeaconClient struct {
|
||||||
Context context.Context // A context generic context with multiple uses.
|
Context context.Context // A context generic context with multiple uses.
|
||||||
ServerEndpoint string // What is the endpoint of the beacon server.
|
ServerEndpoint string // What is the endpoint of the beacon server.
|
||||||
Db sql.Database // Database object used for reads and writes.
|
PerformHistoricalProcessing bool // Should we perform historical processing?
|
||||||
Metrics *BeaconClientMetrics // An object used to keep track of certain BeaconClient Metrics.
|
Db sql.Database // Database object used for reads and writes.
|
||||||
KnownGapTableIncrement int // The max number of slots within a single known_gaps table entry.
|
Metrics *BeaconClientMetrics // An object used to keep track of certain BeaconClient Metrics.
|
||||||
UniqueNodeIdentifier int // The unique identifier within the cluster of this individual node.
|
KnownGapTableIncrement int // The max number of slots within a single known_gaps table entry.
|
||||||
KnownGapsProcess KnownGapsProcessing // object keeping track of knowngaps processing
|
|
||||||
CheckDb bool // Should we check the DB to see if the slot exists before processing it?
|
|
||||||
PerformBeaconStateProcessing bool // Should we process BeaconStates?
|
|
||||||
PerformBeaconBlockProcessing bool // Should we process BeaconBlocks?
|
|
||||||
|
|
||||||
// Used for Head Tracking
|
// Used for Head Tracking
|
||||||
|
|
||||||
PerformHeadTracking bool // Should we track head?
|
PerformHeadTracking bool // Should we track head?
|
||||||
StartingSlot Slot // If we're performing head tracking. What is the first slot we processed.
|
StartingSlot int // If we're performing head tracking. What is the first slot we processed.
|
||||||
PreviousSlot Slot // Whats the previous slot we processed
|
PreviousSlot int // Whats the previous slot we processed
|
||||||
PreviousBlockRoot string // Whats the previous block root, used to check the next blocks parent.
|
PreviousBlockRoot string // Whats the previous block root, used to check the next blocks parent.
|
||||||
|
CheckKnownGaps bool // Should we check for gaps at start up.
|
||||||
HeadTracking *SseEvents[Head] // Track the head block
|
HeadTracking *SseEvents[Head] // Track the head block
|
||||||
ReOrgTracking *SseEvents[ChainReorg] // Track all Reorgs
|
ReOrgTracking *SseEvents[ChainReorg] // Track all Reorgs
|
||||||
//FinalizationTracking *SseEvents[FinalizedCheckpoint] // Track all finalization checkpoints
|
//FinalizationTracking *SseEvents[FinalizedCheckpoint] // Track all finalization checkpoints
|
||||||
|
|
||||||
// Used for Historical Processing
|
|
||||||
|
|
||||||
// The latest available slot within the Beacon Server. We can't query any slot greater than this.
|
|
||||||
// This value is lazily updated. Therefore at times it will be outdated.
|
|
||||||
LatestSlotInBeaconServer int64
|
|
||||||
PerformHistoricalProcessing bool // Should we perform historical processing?
|
|
||||||
HistoricalProcess HistoricProcessing // object keeping track of historical processing
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A struct to keep track of relevant the head event topic.
|
// A struct to keep track of relevant the head event topic.
|
||||||
@ -80,7 +75,7 @@ type SseEvents[P ProcessedEvents] struct {
|
|||||||
MessagesCh chan *sse.Event // Contains all the messages from the SSE Channel
|
MessagesCh chan *sse.Event // Contains all the messages from the SSE Channel
|
||||||
ErrorCh chan *SseError // Contains any errors while SSE streaming occurred
|
ErrorCh chan *SseError // Contains any errors while SSE streaming occurred
|
||||||
ProcessCh chan *P // Used to capture processed data in its proper struct.
|
ProcessCh chan *P // Used to capture processed data in its proper struct.
|
||||||
sseClient *sse.Client // sse.Client object that is used to interact with the SSE stream
|
SseClient *sse.Client // sse.Client object that is used to interact with the SSE stream
|
||||||
}
|
}
|
||||||
|
|
||||||
// An object to capture any errors when turning an SSE message to JSON.
|
// An object to capture any errors when turning an SSE message to JSON.
|
||||||
@ -90,33 +85,20 @@ type SseError struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A Function to create the BeaconClient.
|
// A Function to create the BeaconClient.
|
||||||
func CreateBeaconClient(ctx context.Context, connectionProtocol string, bcAddress string, bcPort int,
|
func CreateBeaconClient(ctx context.Context, connectionProtocol string, bcAddress string, bcPort int) *BeaconClient {
|
||||||
bcKgTableIncrement int, uniqueNodeIdentifier int, checkDb bool, performBeaconBlockProcessing bool, performBeaconStateProcessing bool) (*BeaconClient, error) {
|
|
||||||
if uniqueNodeIdentifier == 0 {
|
|
||||||
uniqueNodeIdentifier := rand.Int()
|
|
||||||
log.WithField("randomUniqueNodeIdentifier", uniqueNodeIdentifier).Warn("No uniqueNodeIdentifier provided, we are going to use a randomly generated one.")
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics, err := CreateBeaconClientMetrics()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint := fmt.Sprintf("%s://%s:%d", connectionProtocol, bcAddress, bcPort)
|
endpoint := fmt.Sprintf("%s://%s:%d", connectionProtocol, bcAddress, bcPort)
|
||||||
log.Info("Creating the BeaconClient")
|
log.Info("Creating the BeaconClient")
|
||||||
return &BeaconClient{
|
return &BeaconClient{
|
||||||
Context: ctx,
|
Context: ctx,
|
||||||
ServerEndpoint: endpoint,
|
ServerEndpoint: endpoint,
|
||||||
KnownGapTableIncrement: bcKgTableIncrement,
|
HeadTracking: createSseEvent[Head](endpoint, BcHeadTopicEndpoint),
|
||||||
HeadTracking: createSseEvent[Head](endpoint, BcHeadTopicEndpoint),
|
ReOrgTracking: createSseEvent[ChainReorg](endpoint, bcReorgTopicEndpoint),
|
||||||
ReOrgTracking: createSseEvent[ChainReorg](endpoint, bcReorgTopicEndpoint),
|
Metrics: &BeaconClientMetrics{
|
||||||
Metrics: metrics,
|
HeadTrackingInserts: 0,
|
||||||
UniqueNodeIdentifier: uniqueNodeIdentifier,
|
HeadTrackingReorgs: 0,
|
||||||
CheckDb: checkDb,
|
},
|
||||||
PerformBeaconBlockProcessing: performBeaconBlockProcessing,
|
|
||||||
PerformBeaconStateProcessing: performBeaconStateProcessing,
|
|
||||||
//FinalizationTracking: createSseEvent[FinalizedCheckpoint](endpoint, bcFinalizedTopicEndpoint),
|
//FinalizationTracking: createSseEvent[FinalizedCheckpoint](endpoint, bcFinalizedTopicEndpoint),
|
||||||
}, nil
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create all the channels to handle a SSE events
|
// Create all the channels to handle a SSE events
|
||||||
@ -127,40 +109,10 @@ func createSseEvent[P ProcessedEvents](baseEndpoint string, path string) *SseEve
|
|||||||
MessagesCh: make(chan *sse.Event, 1),
|
MessagesCh: make(chan *sse.Event, 1),
|
||||||
ErrorCh: make(chan *SseError),
|
ErrorCh: make(chan *SseError),
|
||||||
ProcessCh: make(chan *P),
|
ProcessCh: make(chan *P),
|
||||||
|
SseClient: func(endpoint string) *sse.Client {
|
||||||
|
log.WithFields(log.Fields{"endpoint": endpoint}).Info("Creating SSE client")
|
||||||
|
return sse.NewClient(endpoint)
|
||||||
|
}(endpoint),
|
||||||
}
|
}
|
||||||
return sseEvents
|
return sseEvents
|
||||||
}
|
}
|
||||||
|
|
||||||
func (se *SseEvents[P]) Connect() error {
|
|
||||||
if nil == se.sseClient {
|
|
||||||
se.initClient()
|
|
||||||
}
|
|
||||||
return se.sseClient.SubscribeChanRaw(se.MessagesCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (se *SseEvents[P]) Disconnect() {
|
|
||||||
if nil == se.sseClient {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{"endpoint": se.Endpoint}).Info("Disconnecting and destroying SSE client")
|
|
||||||
se.sseClient.Unsubscribe(se.MessagesCh)
|
|
||||||
se.sseClient.Connection.CloseIdleConnections()
|
|
||||||
se.sseClient = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (se *SseEvents[P]) initClient() {
|
|
||||||
if nil != se.sseClient {
|
|
||||||
se.Disconnect()
|
|
||||||
}
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{"endpoint": se.Endpoint}).Info("Creating SSE client")
|
|
||||||
client := sse.NewClient(se.Endpoint)
|
|
||||||
client.ReconnectNotify = func(err error, duration time.Duration) {
|
|
||||||
log.WithFields(log.Fields{"endpoint": se.Endpoint}).Debug("Reconnecting SSE client")
|
|
||||||
}
|
|
||||||
client.OnDisconnect(func(c *sse.Client) {
|
|
||||||
log.WithFields(log.Fields{"endpoint": se.Endpoint}).Debug("SSE client disconnected")
|
|
||||||
})
|
|
||||||
se.sseClient = client
|
|
||||||
}
|
|
||||||
|
@ -21,13 +21,16 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This function will perform all the heavy lifting for tracking the head of the chain.
|
// This function will perform all the heavy lifting for tracking the head of the chain.
|
||||||
func (bc *BeaconClient) CaptureHead() {
|
func (bc *BeaconClient) CaptureHead(knownGapsTableIncrement int) {
|
||||||
|
bc.KnownGapTableIncrement = knownGapsTableIncrement
|
||||||
log.Info("We are tracking the head of the chain.")
|
log.Info("We are tracking the head of the chain.")
|
||||||
|
//bc.tempHelper()
|
||||||
go bc.handleHead()
|
go bc.handleHead()
|
||||||
|
//go bc.handleFinalizedCheckpoint()
|
||||||
go bc.handleReorg()
|
go bc.handleReorg()
|
||||||
bc.captureEventTopic()
|
bc.captureEventTopic()
|
||||||
}
|
}
|
||||||
@ -50,7 +53,7 @@ func (bc *BeaconClient) StopHeadTracking() error {
|
|||||||
// This function closes the SSE subscription, but waits until the MessagesCh is empty
|
// This function closes the SSE subscription, but waits until the MessagesCh is empty
|
||||||
func (se *SseEvents[ProcessedEvents]) finishProcessingChannel(finish chan<- bool) {
|
func (se *SseEvents[ProcessedEvents]) finishProcessingChannel(finish chan<- bool) {
|
||||||
loghelper.LogEndpoint(se.Endpoint).Info("Received a close event.")
|
loghelper.LogEndpoint(se.Endpoint).Info("Received a close event.")
|
||||||
se.Disconnect()
|
se.SseClient.Unsubscribe(se.MessagesCh)
|
||||||
for len(se.MessagesCh) != 0 || len(se.ProcessCh) != 0 {
|
for len(se.MessagesCh) != 0 || len(se.ProcessCh) != 0 {
|
||||||
time.Sleep(time.Duration(shutdownWaitInterval) * time.Millisecond)
|
time.Sleep(time.Duration(shutdownWaitInterval) * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,179 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2022 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
// This file will call all the functions to start and stop capturing the head of the beacon chain.
|
|
||||||
|
|
||||||
package beaconclient
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This function will perform all the heavy lifting for tracking the head of the chain.
|
|
||||||
func (bc *BeaconClient) CaptureHistoric(ctx context.Context, maxWorkers int, minimumSlot Slot) []error {
|
|
||||||
log.Info("We are starting the historical processing service.")
|
|
||||||
bc.HistoricalProcess = HistoricProcessing{db: bc.Db, metrics: bc.Metrics, uniqueNodeIdentifier: bc.UniqueNodeIdentifier}
|
|
||||||
errs := handleBatchProcess(ctx, maxWorkers, bc.HistoricalProcess, bc.SlotProcessingDetails(), bc.Metrics.IncrementHistoricSlotProcessed, minimumSlot)
|
|
||||||
log.Debug("Exiting Historical")
|
|
||||||
return errs
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function will perform all the necessary clean up tasks for stopping historical processing.
|
|
||||||
func (bc *BeaconClient) StopHistoric(cancel context.CancelFunc) error {
|
|
||||||
log.Info("We are stopping the historical processing service.")
|
|
||||||
cancel()
|
|
||||||
err := bc.HistoricalProcess.releaseDbLocks()
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the eth_beacon.historic_processing table. Manual Intervention is needed!")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// An interface to enforce any batch processing. Currently there are two use cases for this.
|
|
||||||
//
|
|
||||||
// 1. Historic Processing
|
|
||||||
//
|
|
||||||
// 2. Known Gaps Processing
|
|
||||||
type BatchProcessing interface {
|
|
||||||
getSlotRange(context.Context, chan<- slotsToProcess, Slot) []error // Write the slots to process in a channel, return an error if you cant get the next slots to write.
|
|
||||||
handleProcessingErrors(context.Context, <-chan batchHistoricError) // Custom logic to handle errors.
|
|
||||||
removeTableEntry(context.Context, <-chan slotsToProcess) error // With the provided start and end slot, remove the entry from the database.
|
|
||||||
releaseDbLocks() error // Update the checked_out column to false for whatever table is being updated.
|
|
||||||
}
|
|
||||||
|
|
||||||
/// ^^^
|
|
||||||
// Might be better to remove the interface and create a single struct that historicalProcessing
|
|
||||||
// and knownGapsProcessing can use. The struct would contain all the SQL strings that they need.
|
|
||||||
// And the only difference in logic for processing would be within the error handling.
|
|
||||||
// Which can be a function we pass into handleBatchProcess()
|
|
||||||
|
|
||||||
// A struct to pass around indicating a table entry for slots to process.
|
|
||||||
type slotsToProcess struct {
|
|
||||||
startSlot Slot // The start slot
|
|
||||||
endSlot Slot // The end slot
|
|
||||||
}
|
|
||||||
|
|
||||||
type batchHistoricError struct {
|
|
||||||
err error // The error that occurred when attempting to a slot
|
|
||||||
errProcess string // The process that caused the error.
|
|
||||||
slot Slot // The slot which the error is for.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper function for the BatchProcessing interface.
|
|
||||||
// This function will take the structure that needs batch processing.
|
|
||||||
// It follows a generic format.
|
|
||||||
// Get new entries from any given table.
|
|
||||||
// 1. Add it to the slotsCh.
|
|
||||||
//
|
|
||||||
// 2. Run the maximum specified workers to handle individual slots. We need a maximum because we don't want
|
|
||||||
// To store too many SSZ objects in memory.
|
|
||||||
//
|
|
||||||
// 3. Process the slots and send the err to the ErrCh. Each structure can define how it wants its own errors handled.
|
|
||||||
//
|
|
||||||
// 4. Remove the slot entry from the DB.
|
|
||||||
//
|
|
||||||
// 5. Handle any errors.
|
|
||||||
func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing, spd SlotProcessingDetails, incrementTracker func(uint64), minimumSlot Slot) []error {
|
|
||||||
slotsCh := make(chan slotsToProcess)
|
|
||||||
workCh := make(chan Slot)
|
|
||||||
processedCh := make(chan slotsToProcess)
|
|
||||||
errCh := make(chan batchHistoricError)
|
|
||||||
finalErrCh := make(chan []error, 1)
|
|
||||||
|
|
||||||
// Checkout Rows with same node Identifier.
|
|
||||||
err := bp.releaseDbLocks()
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error(("We are unable to un-checkout entries at the start!"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start workers
|
|
||||||
for w := 1; w <= maxWorkers; w++ {
|
|
||||||
log.WithFields(log.Fields{"maxWorkers": maxWorkers}).Debug("Starting batch processing workers")
|
|
||||||
|
|
||||||
go processSlotRangeWorker(ctx, workCh, errCh, spd, incrementTracker)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process all ranges and send each individual slot to the worker.
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case slots := <-slotsCh:
|
|
||||||
if slots.startSlot > slots.endSlot {
|
|
||||||
log.Error("We received a batch process request where the startSlot is greater than the end slot.")
|
|
||||||
errCh <- batchHistoricError{
|
|
||||||
err: fmt.Errorf("We received a startSlot where the start was greater than the end."),
|
|
||||||
errProcess: "RangeOrder",
|
|
||||||
slot: slots.startSlot,
|
|
||||||
}
|
|
||||||
errCh <- batchHistoricError{
|
|
||||||
err: fmt.Errorf("We received a endSlot where the start was greater than the end."),
|
|
||||||
errProcess: "RangeOrder",
|
|
||||||
slot: slots.endSlot,
|
|
||||||
}
|
|
||||||
} else if slots.startSlot == slots.endSlot {
|
|
||||||
log.WithField("slot", slots.startSlot).Debug("Added new slot to workCh")
|
|
||||||
workCh <- slots.startSlot
|
|
||||||
processedCh <- slots
|
|
||||||
} else {
|
|
||||||
for i := slots.startSlot; i <= slots.endSlot; i++ {
|
|
||||||
workCh <- i
|
|
||||||
log.WithField("slot", i).Debug("Added new slot to workCh")
|
|
||||||
}
|
|
||||||
processedCh <- slots
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Remove entries, end the application if a row cannot be removed..
|
|
||||||
go func() {
|
|
||||||
errG := new(errgroup.Group)
|
|
||||||
errG.Go(func() error {
|
|
||||||
return bp.removeTableEntry(ctx, processedCh)
|
|
||||||
})
|
|
||||||
if err := errG.Wait(); err != nil {
|
|
||||||
finalErrCh <- []error{err}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Process errors from slot processing.
|
|
||||||
go bp.handleProcessingErrors(ctx, errCh)
|
|
||||||
|
|
||||||
// Get slots from the DB.
|
|
||||||
go func() {
|
|
||||||
errs := bp.getSlotRange(ctx, slotsCh, minimumSlot) // Periodically adds new entries....
|
|
||||||
if errs != nil {
|
|
||||||
finalErrCh <- errs
|
|
||||||
}
|
|
||||||
finalErrCh <- nil
|
|
||||||
log.Debug("We are stopping the processing of adding new entries")
|
|
||||||
}()
|
|
||||||
log.Debug("Waiting for shutdown signal from channel")
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
log.Debug("Received shutdown signal from channel")
|
|
||||||
return nil
|
|
||||||
case errs := <-finalErrCh:
|
|
||||||
log.Debug("Finishing the batchProcess")
|
|
||||||
return errs
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,306 +0,0 @@
|
|||||||
package beaconclient_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/jarcoal/httpmock"
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
kgCheckCheckedOutStmt = `SELECT * FROM eth_beacon.known_gaps WHERE checked_out=true `
|
|
||||||
hpCheckCheckedOutStmt = `SELECT * FROM eth_beacon.historic_process WHERE checked_out=true `
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ = Describe("Capturehistoric", func() {
|
|
||||||
|
|
||||||
Describe("Run the application in historic mode", Label("unit", "behavioral", "historical"), func() {
|
|
||||||
Context("Phase0 + Altairs: When we need to process a multiple blocks in a multiple entries in the eth_beacon.historic_process table.", Label("deb"), func() {
|
|
||||||
It("Successfully Process the Blocks", func() {
|
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "99")
|
|
||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
|
||||||
defer httpmock.DeactivateAndReset()
|
|
||||||
BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 101, 10)
|
|
||||||
BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0)
|
|
||||||
// Run Two seperate processes
|
|
||||||
BeaconNodeTester.writeEventToHistoricProcess(bc, 2375703, 2375703, 10)
|
|
||||||
BeaconNodeTester.runHistoricalProcess(bc, 2, 3, 0, 0, 0)
|
|
||||||
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
validatePopularBatchBlocks(bc)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Context("When the start block is greater than the endBlock", func() {
|
|
||||||
It("Should Add two entries to the knownGaps table", func() {
|
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "99")
|
|
||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
|
||||||
defer httpmock.DeactivateAndReset()
|
|
||||||
BeaconNodeTester.writeEventToHistoricProcess(bc, 101, 100, 10)
|
|
||||||
BeaconNodeTester.runHistoricalProcess(bc, 2, 0, 0, 2, 0)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Context("Processing the Genesis block", Label("genesis"), func() {
|
|
||||||
It("Should Process properly", func() {
|
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "100")
|
|
||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
|
||||||
defer httpmock.DeactivateAndReset()
|
|
||||||
BeaconNodeTester.writeEventToHistoricProcess(bc, 0, 0, 10)
|
|
||||||
BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0)
|
|
||||||
validateSlot(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, 0, "proposed")
|
|
||||||
if bc.PerformBeaconBlockProcessing {
|
|
||||||
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, BeaconNodeTester.TestEvents["0"].CorrectParentRoot, BeaconNodeTester.TestEvents["0"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["0"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["0"].CorrectExecutionPayloadHeader)
|
|
||||||
}
|
|
||||||
if bc.PerformBeaconStateProcessing {
|
|
||||||
validateBeaconState(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, BeaconNodeTester.TestEvents["0"].CorrectBeaconStateMhKey)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Context("When there is a skipped slot", func() {
|
|
||||||
It("Should process the slot properly.", func() {
|
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "3797055")
|
|
||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
|
||||||
defer httpmock.DeactivateAndReset()
|
|
||||||
BeaconNodeTester.writeEventToHistoricProcess(bc, 3797056, 3797056, 10)
|
|
||||||
BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0)
|
|
||||||
validateSlot(bc, BeaconNodeTester.TestEvents["3797056"].HeadMessage, 118658, "skipped")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Describe("Running the Application to process Known Gaps", Label("unit", "behavioral", "knownGaps"), func() {
|
|
||||||
Context("Phase0 + Altairs: When we need to process a multiple blocks in a multiple entries in the eth_beacon.known_gaps table.", func() {
|
|
||||||
It("Successfully Process the Blocks", func() {
|
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "99")
|
|
||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
|
||||||
defer httpmock.DeactivateAndReset()
|
|
||||||
BeaconNodeTester.writeEventToKnownGaps(bc, 100, 101)
|
|
||||||
BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 0, 0)
|
|
||||||
// Run Two seperate processes
|
|
||||||
BeaconNodeTester.writeEventToKnownGaps(bc, 2375703, 2375703)
|
|
||||||
BeaconNodeTester.runKnownGapsProcess(bc, 2, 3, 0, 0, 0)
|
|
||||||
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
validatePopularBatchBlocks(bc)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Context("When the start block is greater than the endBlock", func() {
|
|
||||||
It("Should Add two entries to the knownGaps table", func() {
|
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "104")
|
|
||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
|
||||||
defer httpmock.DeactivateAndReset()
|
|
||||||
BeaconNodeTester.writeEventToKnownGaps(bc, 101, 100)
|
|
||||||
BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 2, 0)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Context("When theres a reprocessing error", Label("reprocessingError", "flaky"), func() {
|
|
||||||
It("Should update the reprocessing error.", func() {
|
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "99")
|
|
||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
|
||||||
defer httpmock.DeactivateAndReset()
|
|
||||||
// We dont have an entry in the BeaconNodeTester for this slot
|
|
||||||
BeaconNodeTester.writeEventToHistoricProcess(bc, 105, 105, 10)
|
|
||||||
BeaconNodeTester.runHistoricalProcess(bc, 2, 0, 0, 1, 0)
|
|
||||||
BeaconNodeTester.runKnownGapsProcess(bc, 2, 0, 0, 1, 1)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Describe("Running the application in Historic, Head, and KnownGaps mode", Label("unit", "historical", "full"), func() {
|
|
||||||
Context("When it recieves a head, historic and known Gaps message (in order)", func() {
|
|
||||||
It("Should process them all successfully.", func() {
|
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "2375702")
|
|
||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
|
||||||
defer httpmock.DeactivateAndReset()
|
|
||||||
// Head
|
|
||||||
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
|
|
||||||
|
|
||||||
// Historical
|
|
||||||
BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10)
|
|
||||||
BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0)
|
|
||||||
|
|
||||||
// Known Gaps
|
|
||||||
BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101)
|
|
||||||
BeaconNodeTester.runKnownGapsProcess(bc, 2, 3, 0, 0, 0)
|
|
||||||
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
validatePopularBatchBlocks(bc)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Context("When it recieves a historic, head and known Gaps message (in order)", func() {
|
|
||||||
It("Should process them all successfully.", func() {
|
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "2375702")
|
|
||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
|
||||||
defer httpmock.DeactivateAndReset()
|
|
||||||
// Historical
|
|
||||||
BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10)
|
|
||||||
BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0)
|
|
||||||
|
|
||||||
// Head
|
|
||||||
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
|
|
||||||
|
|
||||||
// Known Gaps
|
|
||||||
BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101)
|
|
||||||
BeaconNodeTester.runKnownGapsProcess(bc, 2, 3, 0, 0, 0)
|
|
||||||
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
validatePopularBatchBlocks(bc)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Context("When it recieves a known Gaps, historic and head message (in order)", func() {
|
|
||||||
It("Should process them all successfully.", func() {
|
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "2375702")
|
|
||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
|
||||||
defer httpmock.DeactivateAndReset()
|
|
||||||
// Known Gaps
|
|
||||||
BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101)
|
|
||||||
BeaconNodeTester.runKnownGapsProcess(bc, 2, 1, 0, 0, 0)
|
|
||||||
|
|
||||||
// Historical
|
|
||||||
BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10)
|
|
||||||
BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0)
|
|
||||||
|
|
||||||
// Head
|
|
||||||
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
|
|
||||||
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
validatePopularBatchBlocks(bc)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// This function will write an even to the eth_beacon.known_gaps table
|
|
||||||
func (tbc TestBeaconNode) writeEventToKnownGaps(bc *beaconclient.BeaconClient, startSlot, endSlot int) {
|
|
||||||
log.Debug("We are writing the necessary events to batch process")
|
|
||||||
insertKnownGapsStmt := `INSERT INTO eth_beacon.known_gaps (start_slot, end_slot)
|
|
||||||
VALUES ($1, $2);`
|
|
||||||
res, err := bc.Db.Exec(context.Background(), insertKnownGapsStmt, startSlot, endSlot)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
rows, err := res.RowsAffected()
|
|
||||||
if rows != 1 {
|
|
||||||
Fail("We didnt write...")
|
|
||||||
}
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function will write an even to the eth_beacon.known_gaps table
|
|
||||||
func (tbc TestBeaconNode) writeEventToHistoricProcess(bc *beaconclient.BeaconClient, startSlot, endSlot, priority int) {
|
|
||||||
log.Debug("We are writing the necessary events to batch process")
|
|
||||||
insertHistoricProcessingStmt := `INSERT INTO eth_beacon.historic_process (start_slot, end_slot, priority)
|
|
||||||
VALUES ($1, $2, $3);`
|
|
||||||
res, err := bc.Db.Exec(context.Background(), insertHistoricProcessingStmt, startSlot, endSlot, priority)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
rows, err := res.RowsAffected()
|
|
||||||
if rows != 1 {
|
|
||||||
Fail("We didnt write...")
|
|
||||||
}
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start the CaptureHistoric function, and check for the correct inserted slots.
|
|
||||||
func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
go bc.CaptureHistoric(ctx, maxWorkers, 0)
|
|
||||||
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
|
|
||||||
log.Debug("Calling the stop function for historical processing..")
|
|
||||||
err := bc.StopHistoric(cancel)
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
validateAllRowsCheckedOut(bc.Db, hpCheckCheckedOutStmt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper function that processes knownGaps
|
|
||||||
func (tbc TestBeaconNode) runKnownGapsProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
go bc.ProcessKnownGaps(ctx, maxWorkers, 0)
|
|
||||||
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
|
|
||||||
err := bc.StopKnownGapsProcessing(cancel)
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
validateAllRowsCheckedOut(bc.Db, kgCheckCheckedOutStmt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateMetrics(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
|
||||||
curRetry := 0
|
|
||||||
value := atomic.LoadUint64(&bc.Metrics.SlotInserts)
|
|
||||||
for value != expectedInserts {
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
curRetry = curRetry + 1
|
|
||||||
if curRetry == maxRetry {
|
|
||||||
Fail(fmt.Sprintf("Too many retries have occurred. The number of inserts expected %d, the number that actually occurred, %d", expectedInserts, atomic.LoadUint64(&bc.Metrics.SlotInserts)))
|
|
||||||
}
|
|
||||||
value = atomic.LoadUint64(&bc.Metrics.SlotInserts)
|
|
||||||
}
|
|
||||||
curRetry = 0
|
|
||||||
value = atomic.LoadUint64(&bc.Metrics.KnownGapsInserts)
|
|
||||||
for value != expectedKnownGaps {
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
curRetry = curRetry + 1
|
|
||||||
if curRetry == maxRetry {
|
|
||||||
Fail(fmt.Sprintf("Too many retries have occurred. The number of knownGaps expected %d, the number that actually occurred, %d", expectedKnownGaps, atomic.LoadUint64(&bc.Metrics.KnownGapsInserts)))
|
|
||||||
}
|
|
||||||
value = atomic.LoadUint64(&bc.Metrics.KnownGapsInserts)
|
|
||||||
}
|
|
||||||
curRetry = 0
|
|
||||||
value = atomic.LoadUint64(&bc.Metrics.KnownGapsReprocessError)
|
|
||||||
for value != expectedKnownGapsReprocessError {
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
curRetry = curRetry + 1
|
|
||||||
if curRetry == maxRetry {
|
|
||||||
Fail(fmt.Sprintf("Too many retries have occurred. The number of knownGapsReprocessingErrors expected %d, the number that actually occurred, %d", expectedKnownGapsReprocessError, value))
|
|
||||||
}
|
|
||||||
log.Debug("&bc.Metrics.KnownGapsReprocessError: ", &bc.Metrics.KnownGapsReprocessError)
|
|
||||||
value = atomic.LoadUint64(&bc.Metrics.KnownGapsReprocessError)
|
|
||||||
}
|
|
||||||
curRetry = 0
|
|
||||||
value = atomic.LoadUint64(&bc.Metrics.ReorgInserts)
|
|
||||||
for value != expectedReorgs {
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
curRetry = curRetry + 1
|
|
||||||
if curRetry == maxRetry {
|
|
||||||
Fail(fmt.Sprintf("Too many retries have occurred. The number of Reorgs expected %d, the number that actually occurred, %d", expectedReorgs, atomic.LoadUint64(&bc.Metrics.ReorgInserts)))
|
|
||||||
}
|
|
||||||
value = atomic.LoadUint64(&bc.Metrics.ReorgInserts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A wrapper function to validate a few popular blocks
|
|
||||||
func validatePopularBatchBlocks(bc *beaconclient.BeaconClient) {
|
|
||||||
validateSlot(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, "proposed")
|
|
||||||
if bc.PerformBeaconBlockProcessing {
|
|
||||||
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["100"].CorrectExecutionPayloadHeader)
|
|
||||||
}
|
|
||||||
if bc.PerformBeaconStateProcessing {
|
|
||||||
validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
validateSlot(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, "proposed")
|
|
||||||
if bc.PerformBeaconBlockProcessing {
|
|
||||||
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, BeaconNodeTester.TestEvents["100"].HeadMessage.Block, BeaconNodeTester.TestEvents["101"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["101"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["101"].CorrectExecutionPayloadHeader)
|
|
||||||
}
|
|
||||||
if bc.PerformBeaconStateProcessing {
|
|
||||||
validateBeaconState(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, BeaconNodeTester.TestEvents["101"].CorrectBeaconStateMhKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
validateSlot(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, "proposed")
|
|
||||||
if bc.PerformBeaconBlockProcessing {
|
|
||||||
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["2375703"].CorrectExecutionPayloadHeader)
|
|
||||||
}
|
|
||||||
if bc.PerformBeaconStateProcessing {
|
|
||||||
validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure all rows have checked_out as false.
|
|
||||||
func validateAllRowsCheckedOut(db sql.Database, checkStmt string) {
|
|
||||||
res, err := db.Exec(context.Background(), checkStmt)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
rows, err := res.RowsAffected()
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
Expect(rows).To(Equal(int64(0)))
|
|
||||||
}
|
|
@ -1,214 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2022 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
package beaconclient
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
MissingBeaconServerType error = fmt.Errorf("The beacon server type provided is not handled.")
|
|
||||||
LighthouseMissingSlots error = fmt.Errorf("Anchor is not nil. This means lighthouse has not backfilled all the slots from Genesis to head.")
|
|
||||||
)
|
|
||||||
|
|
||||||
// The sync response when checking if the node is synced.
|
|
||||||
type Sync struct {
|
|
||||||
Data SyncData `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// The sync data
|
|
||||||
type SyncData struct {
|
|
||||||
IsSync bool `json:"is_syncing"`
|
|
||||||
HeadSlot string `json:"head_slot"`
|
|
||||||
SyncDistance string `json:"sync_distance"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function will check to see if we are synced up with the head of chain.
|
|
||||||
// {"data":{"is_syncing":true,"head_slot":"62528","sync_distance":"3734299"}}
|
|
||||||
func (bc BeaconClient) CheckHeadSync() (bool, error) {
|
|
||||||
syncStatus, err := bc.QueryHeadSync()
|
|
||||||
if err != nil {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return syncStatus.Data.IsSync, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bc BeaconClient) QueryHeadSync() (Sync, error) {
|
|
||||||
var syncStatus Sync
|
|
||||||
bcSync := bc.ServerEndpoint + BcSyncStatusEndpoint
|
|
||||||
resp, err := http.Get(bcSync)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogEndpoint(bcSync).Error("Unable to check the sync status")
|
|
||||||
return syncStatus, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
|
||||||
loghelper.LogEndpoint(bcSync).WithFields(log.Fields{"returnCode": resp.StatusCode}).Error("Error when getting the sync status")
|
|
||||||
return syncStatus, fmt.Errorf("Querying the sync status returned a non 2xx status code, code provided: %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer resp.Body.Close()
|
|
||||||
var body bytes.Buffer
|
|
||||||
buf := bufio.NewWriter(&body)
|
|
||||||
_, err = io.Copy(buf, resp.Body)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return syncStatus, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(body.Bytes(), &syncStatus); err != nil {
|
|
||||||
loghelper.LogEndpoint(bcSync).WithFields(log.Fields{
|
|
||||||
"rawMessage": body.String(),
|
|
||||||
"err": err,
|
|
||||||
}).Error("Unable to unmarshal sync status")
|
|
||||||
return syncStatus, err
|
|
||||||
}
|
|
||||||
return syncStatus, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The response when checking the lighthouse nodes DB info: /lighthouse/database/info
|
|
||||||
type LighthouseDatabaseInfo struct {
|
|
||||||
SchemaVersion int `json:"schema_version"`
|
|
||||||
Config LhDbConfig `json:"config"`
|
|
||||||
Split LhDbSplit `json:"split"`
|
|
||||||
Anchor LhDbAnchor `json:"anchor"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// The config field within the DatabaseInfo response.
|
|
||||||
type LhDbConfig struct {
|
|
||||||
SlotsPerRestorePoint int `json:"slots_per_restore_point"`
|
|
||||||
SlotsPerRestorePointSetExplicitly bool `json:"slots_per_restore_point_set_explicitly"`
|
|
||||||
BlockCacheSize int `json:"block_cache_size"`
|
|
||||||
CompactOnInit bool `json:"compact_on_init"`
|
|
||||||
CompactOnPrune bool `json:"compact_on_prune"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// The split field within the DatabaseInfo response.
|
|
||||||
type LhDbSplit struct {
|
|
||||||
Slot string `json:"slot"`
|
|
||||||
StateRoot string `json:"state_root"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// The anchor field within the DatabaseInfo response.
|
|
||||||
type LhDbAnchor struct {
|
|
||||||
AnchorSlot string `json:"anchor_slot"`
|
|
||||||
OldestBlockSlot string `json:"oldest_block_slot"`
|
|
||||||
OldestBlockParent string `json:"oldest_block_parent"`
|
|
||||||
StateUpperLimit string `json:"state_upper_limit"`
|
|
||||||
StateLowerLimit string `json:"state_lower_limit"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function will notify us what the head slot is.
|
|
||||||
func (bc BeaconClient) queryHeadSlotInBeaconServer() (int, error) {
|
|
||||||
syncStatus, err := bc.QueryHeadSync()
|
|
||||||
if err != nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
headSlot, err := strconv.Atoi(syncStatus.Data.HeadSlot)
|
|
||||||
if err != nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
return headSlot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// return the lighthouse Database Info
|
|
||||||
func (bc BeaconClient) queryLighthouseDbInfo() (LighthouseDatabaseInfo, error) {
|
|
||||||
var dbInfo LighthouseDatabaseInfo
|
|
||||||
|
|
||||||
lhDbInfo := bc.ServerEndpoint + LhDbInfoEndpoint
|
|
||||||
resp, err := http.Get(lhDbInfo)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogEndpoint(lhDbInfo).Error("Unable to get the lighthouse database information")
|
|
||||||
return dbInfo, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
|
||||||
loghelper.LogEndpoint(lhDbInfo).WithFields(log.Fields{"returnCode": resp.StatusCode}).Error("Error when getting the lighthouse database information")
|
|
||||||
return dbInfo, fmt.Errorf("Querying the lighthouse database information returned a non 2xx status code, code provided: %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer resp.Body.Close()
|
|
||||||
var body bytes.Buffer
|
|
||||||
buf := bufio.NewWriter(&body)
|
|
||||||
_, err = io.Copy(buf, resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return dbInfo, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(body.Bytes(), &dbInfo); err != nil {
|
|
||||||
loghelper.LogEndpoint(lhDbInfo).WithFields(log.Fields{
|
|
||||||
"rawMessage": body.String(),
|
|
||||||
"err": err,
|
|
||||||
}).Error("Unable to unmarshal the lighthouse database information")
|
|
||||||
return dbInfo, err
|
|
||||||
}
|
|
||||||
return dbInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function will tell us what the latest slot is that the beacon server has available. This is important as
|
|
||||||
// it will ensure us that we have all slots prior to the given slot.
|
|
||||||
func (bc BeaconClient) GetLatestSlotInBeaconServer(beaconServerType string) (int, error) {
|
|
||||||
switch strings.ToLower(beaconServerType) {
|
|
||||||
case "lighthouse":
|
|
||||||
headSlot, err := bc.queryHeadSlotInBeaconServer()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
lhDb, err := bc.queryLighthouseDbInfo()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if lhDb.Anchor == (LhDbAnchor{}) {
|
|
||||||
//atomic.StoreInt64(&bc.LatestSlotInBeaconServer, int64(headSlot))
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"headSlot": headSlot,
|
|
||||||
}).Info("Anchor is nil, the lighthouse client has all the nodes from genesis to head.")
|
|
||||||
return headSlot, nil
|
|
||||||
} else {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"lhDb.Anchor": lhDb.Anchor,
|
|
||||||
}).Info(LighthouseMissingSlots.Error())
|
|
||||||
log.Info("We will add a feature down the road to wait for anchor to be null, if its needed.")
|
|
||||||
return 0, LighthouseMissingSlots
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
log.WithFields(log.Fields{"BeaconServerType": beaconServerType}).Error(MissingBeaconServerType.Error())
|
|
||||||
return 0, MissingBeaconServerType
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A wrapper function for updating the latest slot.
|
|
||||||
func (bc BeaconClient) UpdateLatestSlotInBeaconServer(headSlot int64) {
|
|
||||||
curr := atomic.LoadInt64(&bc.LatestSlotInBeaconServer)
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"Previous Latest Slot": curr,
|
|
||||||
"New Latest Slot": headSlot,
|
|
||||||
}).Debug("Swapping Head Slot")
|
|
||||||
atomic.SwapInt64(&bc.LatestSlotInBeaconServer, int64(headSlot))
|
|
||||||
}
|
|
72
pkg/beaconclient/checksyncstatus.go
Normal file
72
pkg/beaconclient/checksyncstatus.go
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2022 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
package beaconclient
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The sync response
|
||||||
|
type Sync struct {
|
||||||
|
Data SyncData `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// The sync data
|
||||||
|
type SyncData struct {
|
||||||
|
IsSync bool `json:"is_syncing"`
|
||||||
|
HeadSlot string `json:"head_slot"`
|
||||||
|
SyncDistance string `json:"sync_distance"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function will check to see if we are synced up with the head of chain.
|
||||||
|
//{"data":{"is_syncing":true,"head_slot":"62528","sync_distance":"3734299"}}
|
||||||
|
func (bc BeaconClient) CheckHeadSync() (bool, error) {
|
||||||
|
bcSync := bc.ServerEndpoint + BcSyncStatusEndpoint
|
||||||
|
resp, err := http.Get(bcSync)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
loghelper.LogEndpoint(bcSync).Error("Unable to check the sync status")
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||||
|
loghelper.LogEndpoint(bcSync).WithFields(log.Fields{"returnCode": resp.StatusCode}).Error("Error when getting the sync status")
|
||||||
|
return true, fmt.Errorf("Querying the sync status returned a non 2xx status code, code provided: %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer resp.Body.Close()
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var syncStatus Sync
|
||||||
|
if err := json.Unmarshal(body, &syncStatus); err != nil {
|
||||||
|
loghelper.LogEndpoint(bcSync).WithFields(log.Fields{
|
||||||
|
"rawMessage": string(body),
|
||||||
|
"err": err,
|
||||||
|
}).Error("Unable to unmarshal sync status")
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return syncStatus.Data.IsSync, nil
|
||||||
|
}
|
@ -1,441 +0,0 @@
|
|||||||
package beaconclient
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"github.com/protolambda/zrnt/eth2/beacon/altair"
|
|
||||||
"github.com/protolambda/zrnt/eth2/beacon/bellatrix"
|
|
||||||
"github.com/protolambda/zrnt/eth2/beacon/common"
|
|
||||||
"github.com/protolambda/zrnt/eth2/beacon/phase0"
|
|
||||||
"github.com/protolambda/zrnt/eth2/configs"
|
|
||||||
"github.com/protolambda/ztyp/codec"
|
|
||||||
"github.com/protolambda/ztyp/tree"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Eth1Data common.Eth1Data
|
|
||||||
type Root common.Root
|
|
||||||
type Signature common.BLSSignature
|
|
||||||
type Slot uint64
|
|
||||||
type Epoch uint64
|
|
||||||
type ExecutionPayloadHeader common.ExecutionPayloadHeader
|
|
||||||
|
|
||||||
func ParseSlot(v string) (Slot, error) {
|
|
||||||
slotNum, err := strconv.ParseUint(v, 10, 64)
|
|
||||||
return Slot(slotNum), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Slot) Format() string {
|
|
||||||
return strconv.FormatUint(uint64(*s), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Slot) Number() uint64 {
|
|
||||||
return uint64(*s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Slot) Plus(v uint64) Slot {
|
|
||||||
return Slot(v + s.Number())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Slot) PlusInt(v int) Slot {
|
|
||||||
return s.Plus(uint64(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Epoch) Format() string {
|
|
||||||
return strconv.FormatUint(uint64(*e), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
type BeaconBlock struct {
|
|
||||||
spec *common.Spec
|
|
||||||
bellatrix *bellatrix.BeaconBlock
|
|
||||||
altair *altair.BeaconBlock
|
|
||||||
phase0 *phase0.BeaconBlock
|
|
||||||
}
|
|
||||||
|
|
||||||
type BeaconBlockBody struct {
|
|
||||||
spec *common.Spec
|
|
||||||
bellatrix *bellatrix.BeaconBlockBody
|
|
||||||
altair *altair.BeaconBlockBody
|
|
||||||
phase0 *phase0.BeaconBlockBody
|
|
||||||
}
|
|
||||||
|
|
||||||
type BeaconState struct {
|
|
||||||
spec *common.Spec
|
|
||||||
bellatrix *bellatrix.BeaconState
|
|
||||||
altair *altair.BeaconState
|
|
||||||
phase0 *phase0.BeaconState
|
|
||||||
}
|
|
||||||
|
|
||||||
type SignedBeaconBlock struct {
|
|
||||||
spec *common.Spec
|
|
||||||
bellatrix *bellatrix.SignedBeaconBlock
|
|
||||||
altair *altair.SignedBeaconBlock
|
|
||||||
phase0 *phase0.SignedBeaconBlock
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlock) UnmarshalSSZ(ssz []byte) error {
|
|
||||||
spec := chooseSpec(s.spec)
|
|
||||||
|
|
||||||
var bellatrix bellatrix.SignedBeaconBlock
|
|
||||||
err := bellatrix.Deserialize(spec, makeDecodingReader(ssz))
|
|
||||||
if nil == err {
|
|
||||||
s.bellatrix = &bellatrix
|
|
||||||
s.altair = nil
|
|
||||||
s.phase0 = nil
|
|
||||||
log.Info("Unmarshalled Bellatrix SignedBeaconBlock")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var altair altair.SignedBeaconBlock
|
|
||||||
err = altair.Deserialize(spec, makeDecodingReader(ssz))
|
|
||||||
if nil == err {
|
|
||||||
s.bellatrix = nil
|
|
||||||
s.altair = &altair
|
|
||||||
s.phase0 = nil
|
|
||||||
log.Info("Unmarshalled Altair SignedBeaconBlock")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var phase0 phase0.SignedBeaconBlock
|
|
||||||
err = phase0.Deserialize(spec, makeDecodingReader(ssz))
|
|
||||||
if nil == err {
|
|
||||||
s.bellatrix = nil
|
|
||||||
s.altair = nil
|
|
||||||
s.phase0 = &phase0
|
|
||||||
log.Info("Unmarshalled Phase0 SignedBeaconBlock")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.bellatrix = nil
|
|
||||||
s.altair = nil
|
|
||||||
s.phase0 = nil
|
|
||||||
|
|
||||||
log.Warning("Unable to unmarshal SignedBeaconBlock")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlock) MarshalSSZ() ([]byte, error) {
|
|
||||||
spec := chooseSpec(s.spec)
|
|
||||||
var err error
|
|
||||||
var buf bytes.Buffer
|
|
||||||
encodingWriter := codec.NewEncodingWriter(&buf)
|
|
||||||
|
|
||||||
if s.IsBellatrix() {
|
|
||||||
err = s.bellatrix.Serialize(spec, encodingWriter)
|
|
||||||
}
|
|
||||||
if s.IsAltair() {
|
|
||||||
err = s.altair.Serialize(spec, encodingWriter)
|
|
||||||
}
|
|
||||||
if s.IsPhase0() {
|
|
||||||
err = s.phase0.Serialize(spec, encodingWriter)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlock) IsBellatrix() bool {
|
|
||||||
return s.bellatrix != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlock) IsAltair() bool {
|
|
||||||
return s.altair != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlock) IsPhase0() bool {
|
|
||||||
return s.phase0 != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlock) GetBellatrix() *bellatrix.SignedBeaconBlock {
|
|
||||||
return s.bellatrix
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlock) GetAltair() *altair.SignedBeaconBlock {
|
|
||||||
return s.altair
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlock) GetPhase0() *phase0.SignedBeaconBlock {
|
|
||||||
return s.phase0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlock) Signature() Signature {
|
|
||||||
if s.IsBellatrix() {
|
|
||||||
return Signature(s.bellatrix.Signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsAltair() {
|
|
||||||
return Signature(s.altair.Signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsPhase0() {
|
|
||||||
return Signature(s.phase0.Signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Signature{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SignedBeaconBlock) Block() *BeaconBlock {
|
|
||||||
if s.IsBellatrix() {
|
|
||||||
return &BeaconBlock{bellatrix: &s.bellatrix.Message, spec: s.spec}
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsAltair() {
|
|
||||||
return &BeaconBlock{altair: &s.altair.Message, spec: s.spec}
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsPhase0() {
|
|
||||||
return &BeaconBlock{phase0: &s.phase0.Message, spec: s.spec}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlock) IsBellatrix() bool {
|
|
||||||
return b.bellatrix != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlock) IsAltair() bool {
|
|
||||||
return b.altair != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlock) IsPhase0() bool {
|
|
||||||
return b.phase0 != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconBlock) GetBellatrix() *bellatrix.BeaconBlock {
|
|
||||||
return s.bellatrix
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconBlock) GetAltair() *altair.BeaconBlock {
|
|
||||||
return s.altair
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconBlock) GetPhase0() *phase0.BeaconBlock {
|
|
||||||
return s.phase0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlock) ParentRoot() Root {
|
|
||||||
if b.IsBellatrix() {
|
|
||||||
return Root(b.bellatrix.ParentRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.IsAltair() {
|
|
||||||
return Root(b.altair.ParentRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.IsPhase0() {
|
|
||||||
return Root(b.phase0.ParentRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Root{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlock) StateRoot() Root {
|
|
||||||
if b.IsBellatrix() {
|
|
||||||
return Root(b.bellatrix.StateRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.IsAltair() {
|
|
||||||
return Root(b.altair.StateRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.IsPhase0() {
|
|
||||||
return Root(b.phase0.StateRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Root{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlock) Body() *BeaconBlockBody {
|
|
||||||
if b.IsBellatrix() {
|
|
||||||
return &BeaconBlockBody{bellatrix: &b.bellatrix.Body, spec: b.spec}
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.IsAltair() {
|
|
||||||
return &BeaconBlockBody{altair: &b.altair.Body, spec: b.spec}
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.IsPhase0() {
|
|
||||||
return &BeaconBlockBody{phase0: &b.phase0.Body, spec: b.spec}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlockBody) IsBellatrix() bool {
|
|
||||||
return b.bellatrix != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlockBody) IsAltair() bool {
|
|
||||||
return b.altair != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlockBody) IsPhase0() bool {
|
|
||||||
return b.phase0 != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlockBody) Eth1Data() Eth1Data {
|
|
||||||
if b.IsBellatrix() {
|
|
||||||
return Eth1Data(b.bellatrix.Eth1Data)
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.IsAltair() {
|
|
||||||
return Eth1Data(b.altair.Eth1Data)
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.IsPhase0() {
|
|
||||||
return Eth1Data(b.phase0.Eth1Data)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Eth1Data{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlockBody) ExecutionPayloadHeader() *ExecutionPayloadHeader {
|
|
||||||
if b.IsBellatrix() {
|
|
||||||
payloadHeader := b.bellatrix.ExecutionPayload.Header(chooseSpec(b.spec))
|
|
||||||
return (*ExecutionPayloadHeader)(payloadHeader)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BeaconBlock) HashTreeRoot() Root {
|
|
||||||
spec := chooseSpec(b.spec)
|
|
||||||
hashFn := tree.GetHashFn()
|
|
||||||
|
|
||||||
if b.IsBellatrix() {
|
|
||||||
return Root(b.bellatrix.HashTreeRoot(spec, hashFn))
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.IsAltair() {
|
|
||||||
return Root(b.altair.HashTreeRoot(spec, hashFn))
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.IsPhase0() {
|
|
||||||
return Root(b.phase0.HashTreeRoot(spec, hashFn))
|
|
||||||
}
|
|
||||||
|
|
||||||
return Root{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconState) UnmarshalSSZ(ssz []byte) error {
|
|
||||||
spec := chooseSpec(s.spec)
|
|
||||||
|
|
||||||
var bellatrix bellatrix.BeaconState
|
|
||||||
err := bellatrix.Deserialize(spec, makeDecodingReader(ssz))
|
|
||||||
if nil == err {
|
|
||||||
s.bellatrix = &bellatrix
|
|
||||||
s.altair = nil
|
|
||||||
s.phase0 = nil
|
|
||||||
log.Info("Unmarshalled Bellatrix BeaconState")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var altair altair.BeaconState
|
|
||||||
err = altair.Deserialize(spec, makeDecodingReader(ssz))
|
|
||||||
if nil == err {
|
|
||||||
s.bellatrix = nil
|
|
||||||
s.altair = &altair
|
|
||||||
s.phase0 = nil
|
|
||||||
log.Info("Unmarshalled Altair BeaconState")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var phase0 phase0.BeaconState
|
|
||||||
err = phase0.Deserialize(spec, makeDecodingReader(ssz))
|
|
||||||
if nil == err {
|
|
||||||
s.bellatrix = nil
|
|
||||||
s.altair = nil
|
|
||||||
s.phase0 = &phase0
|
|
||||||
log.Info("Unmarshalled Phase0 BeaconState")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.bellatrix = nil
|
|
||||||
s.altair = nil
|
|
||||||
s.phase0 = nil
|
|
||||||
|
|
||||||
log.Warning("Unable to unmarshal BeaconState")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconState) MarshalSSZ() ([]byte, error) {
|
|
||||||
spec := chooseSpec(s.spec)
|
|
||||||
var err error
|
|
||||||
var buf bytes.Buffer
|
|
||||||
encodingWriter := codec.NewEncodingWriter(&buf)
|
|
||||||
|
|
||||||
if s.IsBellatrix() {
|
|
||||||
err = s.bellatrix.Serialize(spec, encodingWriter)
|
|
||||||
} else if s.IsAltair() {
|
|
||||||
err = s.altair.Serialize(spec, encodingWriter)
|
|
||||||
} else if s.IsPhase0() {
|
|
||||||
err = s.phase0.Serialize(spec, encodingWriter)
|
|
||||||
} else {
|
|
||||||
err = errors.New("BeaconState not set")
|
|
||||||
}
|
|
||||||
|
|
||||||
if nil != err {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconState) IsBellatrix() bool {
|
|
||||||
return s.bellatrix != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconState) IsAltair() bool {
|
|
||||||
return s.altair != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconState) IsPhase0() bool {
|
|
||||||
return s.phase0 != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconState) HashTreeRoot() Root {
|
|
||||||
spec := chooseSpec(s.spec)
|
|
||||||
hashFn := tree.GetHashFn()
|
|
||||||
|
|
||||||
if s.IsBellatrix() {
|
|
||||||
return Root(s.bellatrix.HashTreeRoot(spec, hashFn))
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsAltair() {
|
|
||||||
return Root(s.altair.HashTreeRoot(spec, hashFn))
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.IsPhase0() {
|
|
||||||
return Root(s.phase0.HashTreeRoot(spec, hashFn))
|
|
||||||
}
|
|
||||||
|
|
||||||
return Root{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconState) GetBellatrix() *bellatrix.BeaconState {
|
|
||||||
return s.bellatrix
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconState) GetAltair() *altair.BeaconState {
|
|
||||||
return s.altair
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BeaconState) GetPhase0() *phase0.BeaconState {
|
|
||||||
return s.phase0
|
|
||||||
}
|
|
||||||
|
|
||||||
func chooseSpec(spec *common.Spec) *common.Spec {
|
|
||||||
if nil == spec {
|
|
||||||
return configs.Mainnet
|
|
||||||
}
|
|
||||||
return spec
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeDecodingReader(ssz []byte) *codec.DecodingReader {
|
|
||||||
return codec.NewDecodingReader(bytes.NewReader(ssz), uint64(len(ssz)))
|
|
||||||
}
|
|
@ -18,69 +18,46 @@ package beaconclient
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/jackc/pgx/v4"
|
"strconv"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Statement to upsert to the eth_beacon.slots table.
|
// Statement to upsert to the ethcl.slots table.
|
||||||
UpsertSlotsStmt string = `
|
UpsertSlotsStmt string = `
|
||||||
INSERT INTO eth_beacon.slots (epoch, slot, block_root, state_root, status)
|
INSERT INTO ethcl.slots (epoch, slot, block_root, state_root, status)
|
||||||
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
|
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
|
||||||
// Statement to upsert to the eth_beacon.signed_blocks table.
|
// Statement to upsert to the ethcl.signed_beacon_blocks table.
|
||||||
UpsertSignedBeaconBlockStmt string = `
|
UpsertSignedBeaconBlockStmt string = `
|
||||||
INSERT INTO eth_beacon.signed_block (slot, block_root, parent_block_root, eth1_data_block_hash, mh_key)
|
INSERT INTO ethcl.signed_beacon_block (slot, block_root, parent_block_root, eth1_block_hash, mh_key)
|
||||||
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
|
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
|
||||||
UpsertSignedBeaconBlockWithPayloadStmt string = `
|
// Statement to upsert to the ethcl.beacon_state table.
|
||||||
INSERT INTO eth_beacon.signed_block (slot, block_root, parent_block_root, eth1_data_block_hash, mh_key,
|
|
||||||
payload_block_number, payload_timestamp, payload_block_hash,
|
|
||||||
payload_parent_hash, payload_state_root, payload_receipts_root,
|
|
||||||
payload_transactions_root)
|
|
||||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) ON CONFLICT (slot, block_root) DO NOTHING`
|
|
||||||
// Statement to upsert to the eth_beacon.state table.
|
|
||||||
UpsertBeaconState string = `
|
UpsertBeaconState string = `
|
||||||
INSERT INTO eth_beacon.state (slot, state_root, mh_key)
|
INSERT INTO ethcl.beacon_state (slot, state_root, mh_key)
|
||||||
VALUES ($1, $2, $3) ON CONFLICT (slot, state_root) DO NOTHING`
|
VALUES ($1, $2, $3) ON CONFLICT (slot, state_root) DO NOTHING`
|
||||||
// Statement to upsert to the public.blocks table.
|
// Statement to upsert to the public.blocks table.
|
||||||
UpsertBlocksStmt string = `
|
UpsertBlocksStmt string = `
|
||||||
INSERT INTO public.blocks (key, data)
|
INSERT INTO public.blocks (key, data)
|
||||||
VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`
|
VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`
|
||||||
UpdateForkedStmt string = `UPDATE eth_beacon.slots
|
UpdateForkedStmt string = `UPDATE ethcl.slots
|
||||||
SET status='forked'
|
SET status='forked'
|
||||||
WHERE slot=$1 AND block_root<>$2
|
WHERE slot=$1 AND block_root<>$2
|
||||||
RETURNING block_root;`
|
RETURNING block_root;`
|
||||||
UpdateProposedStmt string = `UPDATE eth_beacon.slots
|
UpdateProposedStmt string = `UPDATE ethcl.slots
|
||||||
SET status='proposed'
|
SET status='proposed'
|
||||||
WHERE slot=$1 AND block_root=$2
|
WHERE slot=$1 AND block_root=$2
|
||||||
RETURNING block_root;`
|
RETURNING block_root;`
|
||||||
CheckProposedStmt string = `SELECT slot, block_root
|
CheckProposedStmt string = `SELECT slot, block_root
|
||||||
FROM eth_beacon.slots
|
FROM ethcl.slots
|
||||||
WHERE slot=$1 AND block_root=$2;`
|
WHERE slot=$1 AND block_root=$2;`
|
||||||
// Check to see if the slot and block_root exist in eth_beacon.signed_block
|
|
||||||
CheckSignedBeaconBlockStmt string = `SELECT slot, block_root
|
|
||||||
FROM eth_beacon.signed_block
|
|
||||||
WHERE slot=$1 AND block_root=$2`
|
|
||||||
// Check to see if the slot and state_root exist in eth_beacon.state
|
|
||||||
CheckBeaconStateStmt string = `SELECT slot, state_root
|
|
||||||
FROM eth_beacon.state
|
|
||||||
WHERE slot=$1 AND state_root=$2`
|
|
||||||
// Used to get a single slot from the table if it exists
|
|
||||||
QueryBySlotStmt string = `SELECT slot
|
|
||||||
FROM eth_beacon.slots
|
|
||||||
WHERE slot=$1`
|
|
||||||
// Statement to insert known_gaps. We don't pass in timestamp, we let the server take care of that one.
|
// Statement to insert known_gaps. We don't pass in timestamp, we let the server take care of that one.
|
||||||
UpsertKnownGapsStmt string = `
|
UpsertKnownGapsStmt string = `
|
||||||
INSERT INTO eth_beacon.known_gaps (start_slot, end_slot, checked_out, reprocessing_error, entry_error, entry_process)
|
INSERT INTO ethcl.known_gaps (start_slot, end_slot, checked_out, reprocessing_error, entry_error, entry_process)
|
||||||
VALUES ($1, $2, $3, $4, $5, $6) on CONFLICT (start_slot, end_slot) DO NOTHING`
|
VALUES ($1, $2, $3, $4, $5, $6) on CONFLICT (start_slot, end_slot) DO NOTHING`
|
||||||
UpsertKnownGapsErrorStmt string = `
|
QueryHighestSlotStmt string = "SELECT COALESCE(MAX(slot), 0) FROM ethcl.slots"
|
||||||
UPDATE eth_beacon.known_gaps
|
|
||||||
SET reprocessing_error=$3, priority=priority+1
|
|
||||||
WHERE start_slot=$1 AND end_slot=$2;`
|
|
||||||
// Get the highest slot if one exists
|
|
||||||
QueryHighestSlotStmt string = "SELECT COALESCE(MAX(slot), 0) FROM eth_beacon.slots"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Put all functionality to prepare the write object
|
// Put all functionality to prepare the write object
|
||||||
@ -88,33 +65,24 @@ VALUES ($1, $2, $3, $4, $5, $6) on CONFLICT (start_slot, end_slot) DO NOTHING`
|
|||||||
// Remove any of it from the processslot file.
|
// Remove any of it from the processslot file.
|
||||||
type DatabaseWriter struct {
|
type DatabaseWriter struct {
|
||||||
Db sql.Database
|
Db sql.Database
|
||||||
Tx sql.Tx
|
|
||||||
Ctx context.Context
|
|
||||||
Metrics *BeaconClientMetrics
|
Metrics *BeaconClientMetrics
|
||||||
DbSlots *DbSlots
|
DbSlots *DbSlots
|
||||||
DbSignedBeaconBlock *DbSignedBeaconBlock
|
DbSignedBeaconBlock *DbSignedBeaconBlock
|
||||||
DbBeaconState *DbBeaconState
|
DbBeaconState *DbBeaconState
|
||||||
rawBeaconState *[]byte
|
rawBeaconState []byte
|
||||||
rawSignedBeaconBlock *[]byte
|
rawSignedBeaconBlock []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateDatabaseWrite(db sql.Database, slot Slot, stateRoot string, blockRoot string, parentBlockRoot string,
|
func CreateDatabaseWrite(db sql.Database, slot int, stateRoot string, blockRoot string, parentBlockRoot string,
|
||||||
eth1DataBlockHash string, payloadHeader *ExecutionPayloadHeader, status string, rawSignedBeaconBlock *[]byte, rawBeaconState *[]byte, metrics *BeaconClientMetrics) (*DatabaseWriter, error) {
|
eth1BlockHash string, status string, rawSignedBeaconBlock []byte, rawBeaconState []byte, metrics *BeaconClientMetrics) (*DatabaseWriter, error) {
|
||||||
ctx := context.Background()
|
|
||||||
tx, err := db.Begin(ctx)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("We are unable to Begin a SQL transaction")
|
|
||||||
}
|
|
||||||
dw := &DatabaseWriter{
|
dw := &DatabaseWriter{
|
||||||
Db: db,
|
Db: db,
|
||||||
Tx: tx,
|
|
||||||
Ctx: ctx,
|
|
||||||
rawBeaconState: rawBeaconState,
|
rawBeaconState: rawBeaconState,
|
||||||
rawSignedBeaconBlock: rawSignedBeaconBlock,
|
rawSignedBeaconBlock: rawSignedBeaconBlock,
|
||||||
Metrics: metrics,
|
Metrics: metrics,
|
||||||
}
|
}
|
||||||
dw.prepareSlotsModel(slot, stateRoot, blockRoot, status)
|
dw.prepareSlotsModel(slot, stateRoot, blockRoot, status)
|
||||||
err = dw.prepareSignedBeaconBlockModel(slot, blockRoot, parentBlockRoot, eth1DataBlockHash, payloadHeader)
|
err := dw.prepareSignedBeaconBlockModel(slot, blockRoot, parentBlockRoot, eth1BlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -127,11 +95,11 @@ func CreateDatabaseWrite(db sql.Database, slot Slot, stateRoot string, blockRoot
|
|||||||
|
|
||||||
// Write functions to write each all together...
|
// Write functions to write each all together...
|
||||||
// Should I do one atomic write?
|
// Should I do one atomic write?
|
||||||
// Create the model for the eth_beacon.slots table
|
// Create the model for the ethcl.slots table
|
||||||
func (dw *DatabaseWriter) prepareSlotsModel(slot Slot, stateRoot string, blockRoot string, status string) {
|
func (dw *DatabaseWriter) prepareSlotsModel(slot int, stateRoot string, blockRoot string, status string) {
|
||||||
dw.DbSlots = &DbSlots{
|
dw.DbSlots = &DbSlots{
|
||||||
Epoch: calculateEpoch(slot, bcSlotsPerEpoch),
|
Epoch: calculateEpoch(slot, bcSlotsPerEpoch),
|
||||||
Slot: slot.Number(),
|
Slot: strconv.Itoa(slot),
|
||||||
StateRoot: stateRoot,
|
StateRoot: stateRoot,
|
||||||
BlockRoot: blockRoot,
|
BlockRoot: blockRoot,
|
||||||
Status: status,
|
Status: status,
|
||||||
@ -140,46 +108,31 @@ func (dw *DatabaseWriter) prepareSlotsModel(slot Slot, stateRoot string, blockRo
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the model for the eth_beacon.signed_block table.
|
// Create the model for the ethcl.signed_beacon_block table.
|
||||||
func (dw *DatabaseWriter) prepareSignedBeaconBlockModel(slot Slot, blockRoot string, parentBlockRoot string, eth1DataBlockHash string,
|
func (dw *DatabaseWriter) prepareSignedBeaconBlockModel(slot int, blockRoot string, parentBlockRoot string, eth1BlockHash string) error {
|
||||||
payloadHeader *ExecutionPayloadHeader) error {
|
|
||||||
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.BlockRoot))
|
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.BlockRoot))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
dw.DbSignedBeaconBlock = &DbSignedBeaconBlock{
|
dw.DbSignedBeaconBlock = &DbSignedBeaconBlock{
|
||||||
Slot: slot.Number(),
|
Slot: strconv.Itoa(slot),
|
||||||
BlockRoot: blockRoot,
|
BlockRoot: blockRoot,
|
||||||
ParentBlock: parentBlockRoot,
|
ParentBlock: parentBlockRoot,
|
||||||
Eth1DataBlockHash: eth1DataBlockHash,
|
Eth1BlockHash: eth1BlockHash,
|
||||||
MhKey: mhKey,
|
MhKey: mhKey,
|
||||||
ExecutionPayloadHeader: nil,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if nil != payloadHeader {
|
|
||||||
dw.DbSignedBeaconBlock.ExecutionPayloadHeader = &DbExecutionPayloadHeader{
|
|
||||||
BlockNumber: uint64(payloadHeader.BlockNumber),
|
|
||||||
Timestamp: uint64(payloadHeader.Timestamp),
|
|
||||||
BlockHash: toHex(payloadHeader.BlockHash),
|
|
||||||
ParentHash: toHex(payloadHeader.ParentHash),
|
|
||||||
StateRoot: toHex(payloadHeader.StateRoot),
|
|
||||||
ReceiptsRoot: toHex(payloadHeader.ReceiptsRoot),
|
|
||||||
TransactionsRoot: toHex(payloadHeader.TransactionsRoot),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debug("dw.DbSignedBeaconBlock: ", dw.DbSignedBeaconBlock)
|
log.Debug("dw.DbSignedBeaconBlock: ", dw.DbSignedBeaconBlock)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the model for the eth_beacon.state table.
|
// Create the model for the ethcl.beacon_state table.
|
||||||
func (dw *DatabaseWriter) prepareBeaconStateModel(slot Slot, stateRoot string) error {
|
func (dw *DatabaseWriter) prepareBeaconStateModel(slot int, stateRoot string) error {
|
||||||
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.StateRoot))
|
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.StateRoot))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
dw.DbBeaconState = &DbBeaconState{
|
dw.DbBeaconState = &DbBeaconState{
|
||||||
Slot: slot.Number(),
|
Slot: strconv.Itoa(slot),
|
||||||
StateRoot: stateRoot,
|
StateRoot: stateRoot,
|
||||||
MhKey: mhKey,
|
MhKey: mhKey,
|
||||||
}
|
}
|
||||||
@ -187,72 +140,49 @@ func (dw *DatabaseWriter) prepareBeaconStateModel(slot Slot, stateRoot string) e
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add all the data for a given slot to a SQL transaction.
|
// Write all the data for a given slot.
|
||||||
// Originally it wrote to each table individually.
|
func (dw *DatabaseWriter) writeFullSlot() error {
|
||||||
func (dw *DatabaseWriter) transactFullSlot() error {
|
|
||||||
// If an error occurs, write to knownGaps table.
|
// If an error occurs, write to knownGaps table.
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"slot": dw.DbSlots.Slot,
|
"slot": dw.DbSlots.Slot,
|
||||||
}).Debug("Starting to write to the DB.")
|
}).Debug("Starting to write to the DB.")
|
||||||
err := dw.transactSlots()
|
err := dw.writeSlots()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon.slots table...")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Debug("We finished writing to the eth_beacon.slots table.")
|
|
||||||
if dw.DbSlots.Status != "skipped" {
|
if dw.DbSlots.Status != "skipped" {
|
||||||
//errG, _ := errgroup.WithContext(context.Background())
|
err = dw.writeSignedBeaconBlocks()
|
||||||
//errG.Go(func() error {
|
|
||||||
// return dw.transactSignedBeaconBlocks()
|
|
||||||
//})
|
|
||||||
//errG.Go(func() error {
|
|
||||||
// return dw.transactBeaconState()
|
|
||||||
//})
|
|
||||||
//if err := errG.Wait(); err != nil {
|
|
||||||
// loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon block or state table...")
|
|
||||||
// return err
|
|
||||||
//}
|
|
||||||
// Might want to seperate writing to public.blocks so we can do this concurrently...
|
|
||||||
// Cant concurrently write because we are using a transaction.
|
|
||||||
err := dw.transactSignedBeaconBlocks()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon block table...")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = dw.transactBeaconState()
|
err = dw.writeBeaconState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("We couldn't write to the eth_beacon state table...")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dw.Metrics.IncrementSlotInserts(1)
|
dw.Metrics.IncrementHeadTrackingInserts(1)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add data for the eth_beacon.slots table to a transaction. For now this is only one function.
|
// Write the information for the generic slots table. For now this is only one function.
|
||||||
// But in the future if we need to incorporate any FK's or perform any actions to write to the
|
// But in the future if we need to incorporate any FK's or perform any actions to write to the
|
||||||
// slots table we can do it all here.
|
// slots table we can do it all here.
|
||||||
func (dw *DatabaseWriter) transactSlots() error {
|
func (dw *DatabaseWriter) writeSlots() error {
|
||||||
return dw.upsertSlots()
|
return dw.upsertSlots()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upsert to the eth_beacon.slots table.
|
// Upsert to the ethcl.slots table.
|
||||||
func (dw *DatabaseWriter) upsertSlots() error {
|
func (dw *DatabaseWriter) upsertSlots() error {
|
||||||
_, err := dw.Tx.Exec(dw.Ctx, UpsertSlotsStmt, dw.DbSlots.Epoch, dw.DbSlots.Slot, dw.DbSlots.BlockRoot, dw.DbSlots.StateRoot, dw.DbSlots.Status)
|
_, err := dw.Db.Exec(context.Background(), UpsertSlotsStmt, dw.DbSlots.Epoch, dw.DbSlots.Slot, dw.DbSlots.BlockRoot, dw.DbSlots.StateRoot, dw.DbSlots.Status)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the eth_beacon.slots table")
|
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the ethcl.slots table")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the information for the signed_block to a transaction.
|
// Write the information for the signed_beacon_block.
|
||||||
func (dw *DatabaseWriter) transactSignedBeaconBlocks() error {
|
func (dw *DatabaseWriter) writeSignedBeaconBlocks() error {
|
||||||
if nil == dw.rawSignedBeaconBlock || len(*dw.rawSignedBeaconBlock) == 0 {
|
|
||||||
log.Warn("Skipping writing of empty BeaconBlock.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := dw.upsertPublicBlocks(dw.DbSignedBeaconBlock.MhKey, dw.rawSignedBeaconBlock)
|
err := dw.upsertPublicBlocks(dw.DbSignedBeaconBlock.MhKey, dw.rawSignedBeaconBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -265,8 +195,8 @@ func (dw *DatabaseWriter) transactSignedBeaconBlocks() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Upsert to public.blocks.
|
// Upsert to public.blocks.
|
||||||
func (dw *DatabaseWriter) upsertPublicBlocks(key string, data *[]byte) error {
|
func (dw *DatabaseWriter) upsertPublicBlocks(key string, data []byte) error {
|
||||||
_, err := dw.Tx.Exec(dw.Ctx, UpsertBlocksStmt, key, *data)
|
_, err := dw.Db.Exec(context.Background(), UpsertBlocksStmt, key, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the public.blocks table")
|
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the public.blocks table")
|
||||||
return err
|
return err
|
||||||
@ -274,50 +204,18 @@ func (dw *DatabaseWriter) upsertPublicBlocks(key string, data *[]byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upsert to the eth_beacon.signed_block table.
|
// Upsert to the ethcl.signed_beacon_block table.
|
||||||
func (dw *DatabaseWriter) upsertSignedBeaconBlock() error {
|
func (dw *DatabaseWriter) upsertSignedBeaconBlock() error {
|
||||||
block := dw.DbSignedBeaconBlock
|
_, err := dw.Db.Exec(context.Background(), UpsertSignedBeaconBlockStmt, dw.DbSignedBeaconBlock.Slot, dw.DbSignedBeaconBlock.BlockRoot, dw.DbSignedBeaconBlock.ParentBlock, dw.DbSignedBeaconBlock.Eth1BlockHash, dw.DbSignedBeaconBlock.MhKey)
|
||||||
var err error
|
|
||||||
if nil != block.ExecutionPayloadHeader {
|
|
||||||
_, err = dw.Tx.Exec(dw.Ctx,
|
|
||||||
UpsertSignedBeaconBlockWithPayloadStmt,
|
|
||||||
block.Slot,
|
|
||||||
block.BlockRoot,
|
|
||||||
block.ParentBlock,
|
|
||||||
block.Eth1DataBlockHash,
|
|
||||||
block.MhKey,
|
|
||||||
block.ExecutionPayloadHeader.BlockNumber,
|
|
||||||
block.ExecutionPayloadHeader.Timestamp,
|
|
||||||
block.ExecutionPayloadHeader.BlockHash,
|
|
||||||
block.ExecutionPayloadHeader.ParentHash,
|
|
||||||
block.ExecutionPayloadHeader.StateRoot,
|
|
||||||
block.ExecutionPayloadHeader.ReceiptsRoot,
|
|
||||||
block.ExecutionPayloadHeader.TransactionsRoot,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
_, err = dw.Tx.Exec(dw.Ctx,
|
|
||||||
UpsertSignedBeaconBlockStmt,
|
|
||||||
block.Slot,
|
|
||||||
block.BlockRoot,
|
|
||||||
block.ParentBlock,
|
|
||||||
block.Eth1DataBlockHash,
|
|
||||||
block.MhKey,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(dw.DbSlots.Slot, err).WithFields(log.Fields{"block_root": block.BlockRoot}).Error("Unable to write to the slot to the eth_beacon.signed_block table")
|
loghelper.LogSlotError(dw.DbSlots.Slot, err).WithFields(log.Fields{"block_root": dw.DbSignedBeaconBlock.BlockRoot}).Error("Unable to write to the slot to the ethcl.signed_beacon_block table")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the information for the state to a transaction.
|
// Write the information for the beacon_state.
|
||||||
func (dw *DatabaseWriter) transactBeaconState() error {
|
func (dw *DatabaseWriter) writeBeaconState() error {
|
||||||
if nil == dw.rawBeaconState || len(*dw.rawBeaconState) == 0 {
|
|
||||||
log.Warn("Skipping writing of empty BeaconState.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := dw.upsertPublicBlocks(dw.DbBeaconState.MhKey, dw.rawBeaconState)
|
err := dw.upsertPublicBlocks(dw.DbBeaconState.MhKey, dw.rawBeaconState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -329,197 +227,168 @@ func (dw *DatabaseWriter) transactBeaconState() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upsert to the eth_beacon.state table.
|
// Upsert to the ethcl.beacon_state table.
|
||||||
func (dw *DatabaseWriter) upsertBeaconState() error {
|
func (dw *DatabaseWriter) upsertBeaconState() error {
|
||||||
_, err := dw.Tx.Exec(dw.Ctx, UpsertBeaconState, dw.DbBeaconState.Slot, dw.DbBeaconState.StateRoot, dw.DbBeaconState.MhKey)
|
_, err := dw.Db.Exec(context.Background(), UpsertBeaconState, dw.DbBeaconState.Slot, dw.DbBeaconState.StateRoot, dw.DbBeaconState.MhKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the eth_beacon.state table")
|
loghelper.LogSlotError(dw.DbSlots.Slot, err).Error("Unable to write to the slot to the ethcl.beacon_state table")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update a given slot to be marked as forked within a transaction. Provide the slot and the latest latestBlockRoot.
|
// Update a given slot to be marked as forked. Provide the slot and the latest latestBlockRoot.
|
||||||
// We will mark all entries for the given slot that don't match the provided latestBlockRoot as forked.
|
// We will mark all entries for the given slot that don't match the provided latestBlockRoot as forked.
|
||||||
func transactReorgs(tx sql.Tx, ctx context.Context, slot Slot, latestBlockRoot string, metrics *BeaconClientMetrics) {
|
func writeReorgs(db sql.Database, slot string, latestBlockRoot string, metrics *BeaconClientMetrics) {
|
||||||
forkCount, err := updateForked(tx, ctx, slot, latestBlockRoot)
|
slotNum, strErr := strconv.Atoi(slot)
|
||||||
if err != nil {
|
if strErr != nil {
|
||||||
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("We ran into some trouble while updating all forks.")
|
loghelper.LogReorgError(slot, latestBlockRoot, strErr).Error("We can't convert the slot to an int...")
|
||||||
transactKnownGaps(tx, ctx, 1, slot, slot, err, "reorg", metrics)
|
|
||||||
}
|
}
|
||||||
proposedCount, err := updateProposed(tx, ctx, slot, latestBlockRoot)
|
|
||||||
|
forkCount, err := updateForked(db, slot, latestBlockRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("We ran into some trouble while trying to update the proposed slot.")
|
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We ran into some trouble while updating all forks.")
|
||||||
transactKnownGaps(tx, ctx, 1, slot, slot, err, "reorg", metrics)
|
writeKnownGaps(db, 1, slotNum, slotNum, err, "reorg", metrics)
|
||||||
|
}
|
||||||
|
proposedCount, err := updateProposed(db, slot, latestBlockRoot)
|
||||||
|
if err != nil {
|
||||||
|
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We ran into some trouble while trying to update the proposed slot.")
|
||||||
|
writeKnownGaps(db, 1, slotNum, slotNum, err, "reorg", metrics)
|
||||||
}
|
}
|
||||||
|
|
||||||
if forkCount > 0 {
|
if forkCount > 0 {
|
||||||
loghelper.LogReorg(slot.Number(), latestBlockRoot).WithFields(log.Fields{
|
loghelper.LogReorg(slot, latestBlockRoot).WithFields(log.Fields{
|
||||||
"forkCount": forkCount,
|
"forkCount": forkCount,
|
||||||
}).Info("Updated rows that were forked.")
|
}).Info("Updated rows that were forked.")
|
||||||
} else {
|
} else {
|
||||||
loghelper.LogReorg(slot.Number(), latestBlockRoot).WithFields(log.Fields{
|
loghelper.LogReorg(slot, latestBlockRoot).WithFields(log.Fields{
|
||||||
"forkCount": forkCount,
|
"forkCount": forkCount,
|
||||||
}).Warn("There were no forked rows to update.")
|
}).Warn("There were no forked rows to update.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if proposedCount == 1 {
|
if proposedCount == 1 {
|
||||||
loghelper.LogReorg(slot.Number(), latestBlockRoot).WithFields(log.Fields{
|
loghelper.LogReorg(slot, latestBlockRoot).WithFields(log.Fields{
|
||||||
"proposedCount": proposedCount,
|
"proposedCount": proposedCount,
|
||||||
}).Info("Updated the row that should have been marked as proposed.")
|
}).Info("Updated the row that should have been marked as proposed.")
|
||||||
} else if proposedCount > 1 {
|
} else if proposedCount > 1 {
|
||||||
loghelper.LogReorg(slot.Number(), latestBlockRoot).WithFields(log.Fields{
|
loghelper.LogReorg(slot, latestBlockRoot).WithFields(log.Fields{
|
||||||
"proposedCount": proposedCount,
|
"proposedCount": proposedCount,
|
||||||
}).Error("Too many rows were marked as proposed!")
|
}).Error("Too many rows were marked as proposed!")
|
||||||
transactKnownGaps(tx, ctx, 1, slot, slot, fmt.Errorf("Too many rows were marked as unproposed."), "reorg", metrics)
|
writeKnownGaps(db, 1, slotNum, slotNum, err, "reorg", metrics)
|
||||||
} else if proposedCount == 0 {
|
} else if proposedCount == 0 {
|
||||||
transactKnownGaps(tx, ctx, 1, slot, slot, fmt.Errorf("Unable to find properly proposed row in DB"), "reorg", metrics)
|
var count int
|
||||||
loghelper.LogReorg(slot.Number(), latestBlockRoot).Info("Updated the row that should have been marked as proposed.")
|
err := db.QueryRow(context.Background(), CheckProposedStmt, slot, latestBlockRoot).Scan(count)
|
||||||
}
|
if err != nil {
|
||||||
|
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("Unable to query proposed rows after reorg.")
|
||||||
metrics.IncrementReorgsInsert(1)
|
writeKnownGaps(db, 1, slotNum, slotNum, err, "reorg", metrics)
|
||||||
}
|
} else if count != 1 {
|
||||||
|
loghelper.LogReorg(slot, latestBlockRoot).WithFields(log.Fields{
|
||||||
// Wrapper function that will create a transaction and execute the function.
|
"proposedCount": count,
|
||||||
func writeReorgs(db sql.Database, slot Slot, latestBlockRoot string, metrics *BeaconClientMetrics) {
|
}).Warn("The proposed block was not marked as proposed...")
|
||||||
ctx := context.Background()
|
writeKnownGaps(db, 1, slotNum, slotNum, err, "reorg", metrics)
|
||||||
tx, err := db.Begin(ctx)
|
} else {
|
||||||
if err != nil {
|
loghelper.LogReorg(slot, latestBlockRoot).Info("Updated the row that should have been marked as proposed.")
|
||||||
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Fatal("Unable to create a new transaction for reorgs")
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
err := tx.Rollback(ctx)
|
|
||||||
if err != nil && err != pgx.ErrTxClosed {
|
|
||||||
loghelper.LogError(err).Error("We were unable to Rollback a transaction for reorgs")
|
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
transactReorgs(tx, ctx, slot, latestBlockRoot, metrics)
|
|
||||||
if err = tx.Commit(ctx); err != nil {
|
|
||||||
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Fatal("Unable to execute the transaction for reorgs")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics.IncrementHeadTrackingReorgs(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the slots table by marking the old slot's as forked.
|
// Update the slots table by marking the old slot's as forked.
|
||||||
func updateForked(tx sql.Tx, ctx context.Context, slot Slot, latestBlockRoot string) (int64, error) {
|
func updateForked(db sql.Database, slot string, latestBlockRoot string) (int64, error) {
|
||||||
res, err := tx.Exec(ctx, UpdateForkedStmt, slot, latestBlockRoot)
|
res, err := db.Exec(context.Background(), UpdateForkedStmt, slot, latestBlockRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("We are unable to update the eth_beacon.slots table with the forked slots")
|
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the ethcl.slots table with the forked slots")
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
count, err := res.RowsAffected()
|
count, err := res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("Unable to figure out how many entries were marked as forked.")
|
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("Unable to figure out how many entries were marked as forked.")
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return count, err
|
return count, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark a slot as proposed.
|
func updateProposed(db sql.Database, slot string, latestBlockRoot string) (int64, error) {
|
||||||
func updateProposed(tx sql.Tx, ctx context.Context, slot Slot, latestBlockRoot string) (int64, error) {
|
res, err := db.Exec(context.Background(), UpdateProposedStmt, slot, latestBlockRoot)
|
||||||
res, err := tx.Exec(ctx, UpdateProposedStmt, slot, latestBlockRoot)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("We are unable to update the eth_beacon.slots table with the proposed slot.")
|
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the ethcl.slots table with the proposed slot.")
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
count, err := res.RowsAffected()
|
count, err := res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("Unable to figure out how many entries were marked as proposed")
|
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("Unable to figure out how many entries were marked as proposed")
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return count, err
|
return count, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// A wrapper function to call upsertKnownGaps. This function will break down the range of known_gaps into
|
// A wrapper function to call upsertKnownGaps. This function will break down the range of known_gaos into
|
||||||
// smaller chunks. For example, instead of having an entry of 1-101, if we increment the entries by 10 slots, we would
|
// smaller chunks. For example, instead of having an entry of 1-101, if we increment the entries by 10 slots, we would
|
||||||
// have 10 entries as follows: 1-10, 11-20, etc...
|
// have 10 entries as follows: 1-10, 11-20, etc...
|
||||||
func transactKnownGaps(tx sql.Tx, ctx context.Context, tableIncrement int, startSlot Slot, endSlot Slot, entryError error, entryProcess string, metric *BeaconClientMetrics) {
|
func writeKnownGaps(db sql.Database, tableIncrement int, startSlot int, endSlot int, entryError error, entryProcess string, metric *BeaconClientMetrics) {
|
||||||
var entryErrorMsg string
|
if endSlot-startSlot <= tableIncrement {
|
||||||
if entryError == nil {
|
|
||||||
entryErrorMsg = ""
|
|
||||||
} else {
|
|
||||||
entryErrorMsg = entryError.Error()
|
|
||||||
}
|
|
||||||
if endSlot.Number()-startSlot.Number() <= uint64(tableIncrement) {
|
|
||||||
kgModel := DbKnownGaps{
|
kgModel := DbKnownGaps{
|
||||||
StartSlot: startSlot.Number(),
|
StartSlot: strconv.Itoa(startSlot),
|
||||||
EndSlot: endSlot.Number(),
|
EndSlot: strconv.Itoa(endSlot),
|
||||||
CheckedOut: false,
|
CheckedOut: false,
|
||||||
ReprocessingError: "",
|
ReprocessingError: "",
|
||||||
EntryError: entryErrorMsg,
|
EntryError: entryError.Error(),
|
||||||
EntryProcess: entryProcess,
|
EntryProcess: entryProcess,
|
||||||
}
|
}
|
||||||
upsertKnownGaps(tx, ctx, kgModel, metric)
|
upsertKnownGaps(db, kgModel, metric)
|
||||||
} else {
|
} else {
|
||||||
totalSlots := endSlot.Number() - startSlot.Number()
|
totalSlots := endSlot - startSlot
|
||||||
var chunks int
|
var chunks int
|
||||||
chunks = int(totalSlots / uint64(tableIncrement))
|
chunks = totalSlots / tableIncrement
|
||||||
if totalSlots%uint64(tableIncrement) != 0 {
|
if totalSlots%tableIncrement != 0 {
|
||||||
chunks = chunks + 1
|
chunks = chunks + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < chunks; i++ {
|
for i := 0; i < chunks; i++ {
|
||||||
var tempStart, tempEnd Slot
|
var tempStart, tempEnd int
|
||||||
tempStart = startSlot.PlusInt(i * tableIncrement)
|
tempStart = startSlot + (i * tableIncrement)
|
||||||
if i+1 == chunks {
|
if i+1 == chunks {
|
||||||
tempEnd = endSlot
|
tempEnd = endSlot
|
||||||
} else {
|
} else {
|
||||||
tempEnd = startSlot.PlusInt((i + 1) * tableIncrement)
|
tempEnd = startSlot + ((i + 1) * tableIncrement)
|
||||||
}
|
}
|
||||||
kgModel := DbKnownGaps{
|
kgModel := DbKnownGaps{
|
||||||
StartSlot: tempStart.Number(),
|
StartSlot: strconv.Itoa(tempStart),
|
||||||
EndSlot: tempEnd.Number(),
|
EndSlot: strconv.Itoa(tempEnd),
|
||||||
CheckedOut: false,
|
CheckedOut: false,
|
||||||
ReprocessingError: "",
|
ReprocessingError: "",
|
||||||
EntryError: entryErrorMsg,
|
EntryError: entryError.Error(),
|
||||||
EntryProcess: entryProcess,
|
EntryProcess: entryProcess,
|
||||||
}
|
}
|
||||||
upsertKnownGaps(tx, ctx, kgModel, metric)
|
upsertKnownGaps(db, kgModel, metric)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper function, instead of adding the knownGaps entries to a transaction, it will
|
// A function to upsert a single entry to the ethcl.known_gaps table.
|
||||||
// create the transaction and write it.
|
func upsertKnownGaps(db sql.Database, knModel DbKnownGaps, metric *BeaconClientMetrics) {
|
||||||
func writeKnownGaps(db sql.Database, tableIncrement int, startSlot Slot, endSlot Slot, entryError error, entryProcess string, metric *BeaconClientMetrics) {
|
_, err := db.Exec(context.Background(), UpsertKnownGapsStmt, knModel.StartSlot, knModel.EndSlot,
|
||||||
ctx := context.Background()
|
|
||||||
tx, err := db.Begin(ctx)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotRangeError(startSlot.Number(), endSlot.Number(), err).Fatal("Unable to create a new transaction for knownGaps")
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
err := tx.Rollback(ctx)
|
|
||||||
if err != nil && err != pgx.ErrTxClosed {
|
|
||||||
loghelper.LogError(err).Error("We were unable to Rollback a transaction for reorgs")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
transactKnownGaps(tx, ctx, tableIncrement, startSlot, endSlot, entryError, entryProcess, metric)
|
|
||||||
if err = tx.Commit(ctx); err != nil {
|
|
||||||
loghelper.LogSlotRangeError(startSlot.Number(), endSlot.Number(), err).Fatal("Unable to execute the transaction for knownGaps")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A function to upsert a single entry to the eth_beacon.known_gaps table.
|
|
||||||
func upsertKnownGaps(tx sql.Tx, ctx context.Context, knModel DbKnownGaps, metric *BeaconClientMetrics) {
|
|
||||||
_, err := tx.Exec(ctx, UpsertKnownGapsStmt, knModel.StartSlot, knModel.EndSlot,
|
|
||||||
knModel.CheckedOut, knModel.ReprocessingError, knModel.EntryError, knModel.EntryProcess)
|
knModel.CheckedOut, knModel.ReprocessingError, knModel.EntryError, knModel.EntryProcess)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"err": err,
|
"err": err,
|
||||||
"startSlot": knModel.StartSlot,
|
"startSlot": knModel.StartSlot,
|
||||||
"endSlot": knModel.EndSlot,
|
"endSlot": knModel.EndSlot,
|
||||||
}).Fatal("We are unable to write to the eth_beacon.known_gaps table!!! We will stop the application because of that.")
|
}).Fatal("We are unable to write to the ethcl.known_gaps table!!! We will stop the application because of that.")
|
||||||
}
|
}
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"startSlot": knModel.StartSlot,
|
"startSlot": knModel.StartSlot,
|
||||||
"endSlot": knModel.EndSlot,
|
"endSlot": knModel.EndSlot,
|
||||||
}).Warn("A new gap has been added to the eth_beacon.known_gaps table.")
|
}).Warn("A new gap has been added to the ethcl.known_gaps table.")
|
||||||
metric.IncrementKnownGapsInserts(1)
|
metric.IncrementHeadTrackingKnownGaps(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A function to write the gap between the highest slot in the DB and the first processed slot.
|
// A function to write the gap between the highest slot in the DB and the first processed slot.
|
||||||
func writeStartUpGaps(db sql.Database, tableIncrement int, firstSlot Slot, metric *BeaconClientMetrics) {
|
func writeStartUpGaps(db sql.Database, tableIncrement int, firstSlot int, metric *BeaconClientMetrics) {
|
||||||
var maxSlot Slot
|
var maxSlot int
|
||||||
err := db.QueryRow(context.Background(), QueryHighestSlotStmt).Scan(&maxSlot)
|
err := db.QueryRow(context.Background(), QueryHighestSlotStmt).Scan(&maxSlot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogError(err).Fatal("Unable to get the max block from the DB. We must close the application or we might have undetected gaps.")
|
loghelper.LogError(err).Fatal("Unable to get the max block from the DB. We must close the application or we might have undetected gaps.")
|
||||||
@ -531,121 +400,12 @@ func writeStartUpGaps(db sql.Database, tableIncrement int, firstSlot Slot, metri
|
|||||||
}).Fatal("Unable to get convert max block from DB to int. We must close the application or we might have undetected gaps.")
|
}).Fatal("Unable to get convert max block from DB to int. We must close the application or we might have undetected gaps.")
|
||||||
}
|
}
|
||||||
if maxSlot != firstSlot-1 {
|
if maxSlot != firstSlot-1 {
|
||||||
if maxSlot < firstSlot-1 {
|
writeKnownGaps(db, tableIncrement, maxSlot+1, firstSlot-1, fmt.Errorf(""), "startup", metric)
|
||||||
if maxSlot == 0 {
|
|
||||||
writeKnownGaps(db, tableIncrement, maxSlot, firstSlot-1, fmt.Errorf(""), "startup", metric)
|
|
||||||
} else {
|
|
||||||
writeKnownGaps(db, tableIncrement, maxSlot+1, firstSlot-1, fmt.Errorf(""), "startup", metric)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"maxSlot": maxSlot,
|
|
||||||
"firstSlot": firstSlot,
|
|
||||||
}).Warn("The maxSlot in the DB is greater than or equal to the first Slot we are processing.")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// A function to update a knownGap range with a reprocessing error.
|
|
||||||
func updateKnownGapErrors(db sql.Database, startSlot Slot, endSlot Slot, reprocessingErr error, metric *BeaconClientMetrics) error {
|
|
||||||
res, err := db.Exec(context.Background(), UpsertKnownGapsErrorStmt, startSlot, endSlot, reprocessingErr.Error())
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotRangeError(startSlot.Number(), endSlot.Number(), err).Error("Unable to update reprocessing_error")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
row, err := res.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotRangeError(startSlot.Number(), endSlot.Number(), err).Error("Unable to count rows affected when trying to update reprocessing_error.")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if row != 1 {
|
|
||||||
loghelper.LogSlotRangeError(startSlot.Number(), endSlot.Number(), err).WithFields(log.Fields{
|
|
||||||
"rowCount": row,
|
|
||||||
}).Error("The rows affected by the upsert for reprocessing_error is not 1.")
|
|
||||||
metric.IncrementKnownGapsReprocessError(1)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
metric.IncrementKnownGapsReprocessError(1)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A quick helper function to calculate the epoch.
|
// A quick helper function to calculate the epoch.
|
||||||
func calculateEpoch(slot Slot, slotPerEpoch uint64) uint64 {
|
func calculateEpoch(slot int, slotPerEpoch int) string {
|
||||||
return slot.Number() / slotPerEpoch
|
epoch := slot / slotPerEpoch
|
||||||
}
|
return strconv.Itoa(epoch)
|
||||||
|
|
||||||
// A helper function to check to see if the slot is processed.
|
|
||||||
func isSlotProcessed(db sql.Database, checkProcessStmt string, slot Slot) (bool, error) {
|
|
||||||
processRow, err := db.Exec(context.Background(), checkProcessStmt, slot)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
row, err := processRow.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if row > 0 {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check to see if this slot is in the DB. Check eth_beacon.slots, eth_beacon.signed_block
|
|
||||||
// and eth_beacon.state. If the slot exists, return true
|
|
||||||
func IsSlotInDb(ctx context.Context, db sql.Database, slot Slot, blockRoot string, stateRoot string) (bool, error) {
|
|
||||||
var (
|
|
||||||
isInBeaconState bool
|
|
||||||
isInSignedBeaconBlock bool
|
|
||||||
)
|
|
||||||
errG, _ := errgroup.WithContext(context.Background())
|
|
||||||
errG.Go(func() error {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
var err error
|
|
||||||
isInBeaconState, err = checkSlotAndRoot(db, CheckBeaconStateStmt, slot, stateRoot)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("Unable to check if the slot and stateroot exist in eth_beacon.state")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
})
|
|
||||||
errG.Go(func() error {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
var err error
|
|
||||||
isInSignedBeaconBlock, err = checkSlotAndRoot(db, CheckSignedBeaconBlockStmt, slot, blockRoot)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("Unable to check if the slot and block_root exist in eth_beacon.signed_block")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if err := errG.Wait(); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if isInBeaconState && isInSignedBeaconBlock {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provide a statement, slot, and root, and this function will check to see
|
|
||||||
// if the slot and root exist in the table.
|
|
||||||
func checkSlotAndRoot(db sql.Database, statement string, slot Slot, root string) (bool, error) {
|
|
||||||
processRow, err := db.Exec(context.Background(), statement, slot, root)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
row, err := processRow.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if row > 0 {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
}
|
||||||
|
@ -1,225 +0,0 @@
|
|||||||
package beaconclient_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/ethereum/go-ethereum/ethclient"
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
zcommon "github.com/protolambda/zrnt/eth2/beacon/common"
|
|
||||||
"github.com/protolambda/zrnt/eth2/configs"
|
|
||||||
"github.com/protolambda/ztyp/tree"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
|
||||||
"math/big"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ = Describe("e2emerge", Label("e2e"), func() {
|
|
||||||
e2eConfig := TestConfig
|
|
||||||
e2eConfig.port = getEnvInt("TEST_E2E_LIGHTHOUSE_PORT", 5052)
|
|
||||||
e2eConfig.performBeaconStateProcessing = false
|
|
||||||
e2eConfig.performBeaconBlockProcessing = true
|
|
||||||
|
|
||||||
level, _ := log.ParseLevel("debug")
|
|
||||||
log.SetLevel(level)
|
|
||||||
|
|
||||||
Describe("Run the application against a Merge testnet", func() {
|
|
||||||
Context("When we send a TX to geth", func() {
|
|
||||||
It("We should see the TX included in the ExecutionPayload of a BeaconBlock", func() {
|
|
||||||
bc := setUpTest(e2eConfig, "0")
|
|
||||||
go bc.CaptureHead()
|
|
||||||
|
|
||||||
tx, _ := sendTestTx()
|
|
||||||
beaconBlock := waitForTxToBeIndexed(bc.Db, tx)
|
|
||||||
Expect(beaconBlock).ToNot(BeNil())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
type SentTx struct {
|
|
||||||
hash string
|
|
||||||
raw []byte
|
|
||||||
blockNo uint64
|
|
||||||
blockHash string
|
|
||||||
txIndex uint
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *SentTx) RawHex() string {
|
|
||||||
return "0x" + hex.EncodeToString(tx.raw)
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitForTxToBeIndexed(db sql.Database, tx *SentTx) *beaconclient.DbSignedBeaconBlock {
|
|
||||||
var beaconBlock *beaconclient.DbSignedBeaconBlock = nil
|
|
||||||
for i := 0; i < 30; i++ {
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
record := lookForTxInDb(db, tx)
|
|
||||||
if nil != record {
|
|
||||||
beaconBlock = record
|
|
||||||
log.Debugf("Found ETH1 TX %s in SignedBeaconBlock %d/%s", tx.hash, beaconBlock.Slot, beaconBlock.BlockRoot)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return beaconBlock
|
|
||||||
}
|
|
||||||
|
|
||||||
func lookForTxInDb(db sql.Database, tx *SentTx) *beaconclient.DbSignedBeaconBlock {
|
|
||||||
sqlStatement := `SELECT * FROM eth_beacon.signed_block WHERE
|
|
||||||
payload_block_number = $1 AND
|
|
||||||
payload_block_hash = $2 AND
|
|
||||||
payload_transactions_root = $3`
|
|
||||||
|
|
||||||
// We can pre-calculate the root and query on it because we are only sending a single TX at a time.
|
|
||||||
// Otherwise we would need to lookup the root by block num+hash, then do a proof that its txs
|
|
||||||
// root includes our TX.
|
|
||||||
var ptxs = zcommon.PayloadTransactions{tx.raw}
|
|
||||||
txRoot := ptxs.HashTreeRoot(configs.Mainnet, tree.GetHashFn())
|
|
||||||
|
|
||||||
var slot uint64
|
|
||||||
var blockRoot, parentBlock, eth1DataBlockHash, mhKey string
|
|
||||||
|
|
||||||
var blockNumber, timestamp uint64
|
|
||||||
var blockHash, parentHash, stateRoot, receiptsRoot, transactionsRoot string
|
|
||||||
|
|
||||||
err := db.
|
|
||||||
QueryRow(context.Background(), sqlStatement, tx.blockNo, tx.blockHash, "0x"+hex.EncodeToString(txRoot[:])).
|
|
||||||
Scan(&slot, &blockRoot, &parentBlock, ð1DataBlockHash, &mhKey,
|
|
||||||
&blockNumber, ×tamp, &blockHash, &parentHash, &stateRoot,
|
|
||||||
&receiptsRoot, &transactionsRoot)
|
|
||||||
if nil != err {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &beaconclient.DbSignedBeaconBlock{
|
|
||||||
Slot: slot,
|
|
||||||
BlockRoot: blockRoot,
|
|
||||||
ParentBlock: parentBlock,
|
|
||||||
Eth1DataBlockHash: eth1DataBlockHash,
|
|
||||||
MhKey: mhKey,
|
|
||||||
ExecutionPayloadHeader: &beaconclient.DbExecutionPayloadHeader{
|
|
||||||
BlockNumber: blockNumber,
|
|
||||||
Timestamp: timestamp,
|
|
||||||
BlockHash: blockHash,
|
|
||||||
ParentHash: parentHash,
|
|
||||||
StateRoot: stateRoot,
|
|
||||||
ReceiptsRoot: receiptsRoot,
|
|
||||||
TransactionsRoot: transactionsRoot,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sendTestTx() (*SentTx, error) {
|
|
||||||
ctx := context.Background()
|
|
||||||
eth, err := createClient()
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
//TODO: Pull from test config / env.
|
|
||||||
tx, err := sendTransaction(
|
|
||||||
ctx,
|
|
||||||
eth,
|
|
||||||
getEnvStr("TEST_E2E_FROM_ADDR", "0xe6ce22afe802caf5ff7d3845cec8c736ecc8d61f"),
|
|
||||||
getEnvStr("TEST_E2E_TO_ADDR", "0xe22AD83A0dE117bA0d03d5E94Eb4E0d80a69C62a"),
|
|
||||||
int64(getEnvInt("TEST_E2E_AMOUNT", 10)),
|
|
||||||
getEnvStr("TEST_E2E_SIGNING_KEY", "0x888814df89c4358d7ddb3fa4b0213e7331239a80e1f013eaa7b2deca2a41a218"),
|
|
||||||
)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
txBin, err := tx.MarshalBinary()
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
|
|
||||||
for i := 0; i <= 30; i++ {
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
receipt, _ := eth.TransactionReceipt(ctx, tx.Hash())
|
|
||||||
if nil != receipt {
|
|
||||||
sentTx := &SentTx{
|
|
||||||
hash: tx.Hash().String(),
|
|
||||||
raw: txBin,
|
|
||||||
blockNo: receipt.BlockNumber.Uint64(),
|
|
||||||
blockHash: receipt.BlockHash.String(),
|
|
||||||
txIndex: receipt.TransactionIndex,
|
|
||||||
}
|
|
||||||
log.Debugf("Sent ETH1 TX %s (Block No: %d, Block Hash: %s)", sentTx.hash, sentTx.blockNo, sentTx.blockHash)
|
|
||||||
return sentTx, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = errors.New("Timed out waiting for TX.")
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func createClient() (*ethclient.Client, error) {
|
|
||||||
return ethclient.Dial(getEnvStr("TEST_E2E_GETH_URL", "http://localhost:8545"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendTransaction sends a transaction with 1 ETH to a specified address.
|
|
||||||
func sendTransaction(ctx context.Context, eth *ethclient.Client, fromAddr string, toAddr string, amount int64, signingKey string) (*types.Transaction, error) {
|
|
||||||
var (
|
|
||||||
from = common.HexToAddress(fromAddr)
|
|
||||||
to = common.HexToAddress(toAddr)
|
|
||||||
sk = crypto.ToECDSAUnsafe(common.FromHex(signingKey))
|
|
||||||
value = big.NewInt(amount)
|
|
||||||
gasLimit = uint64(getEnvInt("TEST_E2E_GAS_LIMIT", 21000))
|
|
||||||
)
|
|
||||||
// Retrieve the chainid (needed for signer)
|
|
||||||
chainid, err := eth.ChainID(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Retrieve the pending nonce
|
|
||||||
nonce, err := eth.PendingNonceAt(ctx, from)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Get suggested gas price
|
|
||||||
tipCap, _ := eth.SuggestGasTipCap(ctx)
|
|
||||||
feeCap, _ := eth.SuggestGasPrice(ctx)
|
|
||||||
log.Info("Tip cap: ", tipCap)
|
|
||||||
log.Info("Fee cap: ", feeCap)
|
|
||||||
// Create a new transaction
|
|
||||||
tx := types.NewTx(
|
|
||||||
&types.DynamicFeeTx{
|
|
||||||
ChainID: chainid,
|
|
||||||
Nonce: nonce,
|
|
||||||
GasTipCap: tipCap,
|
|
||||||
GasFeeCap: feeCap,
|
|
||||||
Gas: gasLimit,
|
|
||||||
To: &to,
|
|
||||||
Value: value,
|
|
||||||
Data: nil,
|
|
||||||
})
|
|
||||||
// Sign the transaction using our keys
|
|
||||||
signedTx, _ := types.SignTx(tx, types.NewLondonSigner(chainid), sk)
|
|
||||||
// Send the transaction to our node
|
|
||||||
return signedTx, eth.SendTransaction(ctx, signedTx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getEnvStr(envVar string, def string) string {
|
|
||||||
value, set := os.LookupEnv(envVar)
|
|
||||||
if set {
|
|
||||||
return value
|
|
||||||
} else {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getEnvInt(envVar string, def int) int {
|
|
||||||
value, set := os.LookupEnv(envVar)
|
|
||||||
if set {
|
|
||||||
number, err := strconv.Atoi(value)
|
|
||||||
if err != nil {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
return number
|
|
||||||
} else {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
}
|
|
@ -1 +0,0 @@
|
|||||||
../../../external/eth2.0-spec-tests
|
|
@ -20,7 +20,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This function will ensure that we can connect to the beacon client.
|
// This function will ensure that we can connect to the beacon client.
|
||||||
|
@ -20,26 +20,18 @@ import (
|
|||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
. "github.com/onsi/ginkgo/v2"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
beaconclient "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
beaconclient "github.com/vulcanize/ipld-ethcl-indexer/pkg/beaconclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Healthcheck", func() {
|
var _ = Describe("Healthcheck", func() {
|
||||||
var (
|
var (
|
||||||
Bc *beaconclient.BeaconClient
|
BC = beaconclient.CreateBeaconClient(context.Background(), "http", "localhost", 5052)
|
||||||
errBc *beaconclient.BeaconClient
|
errBc = beaconclient.CreateBeaconClient(context.Background(), "http", "blah-blah", 1010)
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
var err error
|
|
||||||
Bc, err = beaconclient.CreateBeaconClient(context.Background(), "http", "localhost", 5052, 10, bcUniqueIdentifier, false, true, true)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
errBc, err = beaconclient.CreateBeaconClient(context.Background(), "http", "blah-blah", 1010, 10, bcUniqueIdentifier, false, true, true)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
})
|
|
||||||
Describe("Connecting to the lighthouse client", Label("integration"), func() {
|
Describe("Connecting to the lighthouse client", Label("integration"), func() {
|
||||||
Context("When the client is running", func() {
|
Context("When the client is running", func() {
|
||||||
It("We should connect successfully", func() {
|
It("We should connect successfully", func() {
|
||||||
err := Bc.CheckBeaconClient()
|
err := BC.CheckBeaconClient()
|
||||||
Expect(err).To(BeNil())
|
Expect(err).To(BeNil())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -19,11 +19,10 @@ package beaconclient
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -34,11 +33,11 @@ var (
|
|||||||
// This function will capture all the SSE events for a given SseEvents object.
|
// This function will capture all the SSE events for a given SseEvents object.
|
||||||
// When new messages come in, it will ensure that they are decoded into JSON.
|
// When new messages come in, it will ensure that they are decoded into JSON.
|
||||||
// If any errors occur, it log the error information.
|
// If any errors occur, it log the error information.
|
||||||
func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMetricInc func(uint64), idleTimeout time.Duration) {
|
func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMetricInc func(uint64)) {
|
||||||
go func() {
|
go func() {
|
||||||
errG := new(errgroup.Group)
|
errG := new(errgroup.Group)
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
err := eventHandler.Connect()
|
err := eventHandler.SseClient.SubscribeChanRaw(eventHandler.MessagesCh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -55,21 +54,9 @@ func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMe
|
|||||||
}
|
}
|
||||||
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// TODO(telackey): Doesn't there need to be a check here that the handler hasn't been shutdown?
|
|
||||||
for {
|
for {
|
||||||
var idleTimer *time.Timer = nil
|
|
||||||
var idleTimerC <-chan time.Time = nil
|
|
||||||
if idleTimeout > 0 {
|
|
||||||
idleTimer = time.NewTimer(idleTimeout)
|
|
||||||
idleTimerC = idleTimer.C
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case message := <-eventHandler.MessagesCh:
|
case message := <-eventHandler.MessagesCh:
|
||||||
if nil != idleTimer {
|
|
||||||
idleTimer.Stop()
|
|
||||||
}
|
|
||||||
// Message can be nil if its a keep-alive message
|
// Message can be nil if its a keep-alive message
|
||||||
if len(message.Data) != 0 {
|
if len(message.Data) != 0 {
|
||||||
log.WithFields(log.Fields{"msg": string(message.Data)}).Debug("We are going to send the following message to be processed.")
|
log.WithFields(log.Fields{"msg": string(message.Data)}).Debug("We are going to send the following message to be processed.")
|
||||||
@ -77,9 +64,6 @@ func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMe
|
|||||||
}
|
}
|
||||||
|
|
||||||
case headErr := <-eventHandler.ErrorCh:
|
case headErr := <-eventHandler.ErrorCh:
|
||||||
if nil != idleTimer {
|
|
||||||
idleTimer.Stop()
|
|
||||||
}
|
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"endpoint": eventHandler.Endpoint,
|
"endpoint": eventHandler.Endpoint,
|
||||||
"err": headErr.err,
|
"err": headErr.err,
|
||||||
@ -87,21 +71,6 @@ func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMe
|
|||||||
},
|
},
|
||||||
).Error("Unable to handle event.")
|
).Error("Unable to handle event.")
|
||||||
errMetricInc(1)
|
errMetricInc(1)
|
||||||
|
|
||||||
case <-idleTimerC:
|
|
||||||
err := errors.New("SSE idle timeout")
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"endpoint": eventHandler.Endpoint,
|
|
||||||
"err": err,
|
|
||||||
"msg": err.Error(),
|
|
||||||
},
|
|
||||||
).Error("TIMEOUT - Attempting to resubscribe")
|
|
||||||
errMetricInc(1)
|
|
||||||
eventHandler.Disconnect()
|
|
||||||
err = eventHandler.Connect()
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Unable to re-subscribe.", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -124,6 +93,6 @@ func processMsg[P ProcessedEvents](msg []byte, processCh chan<- *P, errorCh chan
|
|||||||
// Capture all of the event topics.
|
// Capture all of the event topics.
|
||||||
func (bc *BeaconClient) captureEventTopic() {
|
func (bc *BeaconClient) captureEventTopic() {
|
||||||
log.Info("We are capturing all SSE events")
|
log.Info("We are capturing all SSE events")
|
||||||
go handleIncomingSseEvent(bc.HeadTracking, bc.Metrics.IncrementHeadError, time.Second*30)
|
go handleIncomingSseEvent(bc.HeadTracking, bc.Metrics.IncrementHeadError)
|
||||||
go handleIncomingSseEvent(bc.ReOrgTracking, bc.Metrics.IncrementReorgError, 0)
|
go handleIncomingSseEvent(bc.ReOrgTracking, bc.Metrics.IncrementHeadReorgError)
|
||||||
}
|
}
|
||||||
|
@ -17,112 +17,24 @@ package beaconclient
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Create a metric struct and register each channel with prometheus
|
|
||||||
func CreateBeaconClientMetrics() (*BeaconClientMetrics, error) {
|
|
||||||
metrics := &BeaconClientMetrics{
|
|
||||||
SlotInserts: 0,
|
|
||||||
ReorgInserts: 0,
|
|
||||||
KnownGapsInserts: 0,
|
|
||||||
KnownGapsProcessed: 0,
|
|
||||||
KnownGapsReprocessError: 0,
|
|
||||||
HeadError: 0,
|
|
||||||
HeadReorgError: 0,
|
|
||||||
}
|
|
||||||
err := prometheusRegisterHelper("slot_inserts", "Keeps track of the number of slots we have inserted.", &metrics.SlotInserts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = prometheusRegisterHelper("reorg_inserts", "Keeps track of the number of reorgs we have inserted.", &metrics.ReorgInserts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = prometheusRegisterHelper("known_gaps_inserts", "Keeps track of the number of known gaps we have inserted.", &metrics.KnownGapsInserts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = prometheusRegisterHelper("known_gaps_reprocess_error", "Keeps track of the number of known gaps that had errors when reprocessing, but the error was updated successfully.", &metrics.KnownGapsReprocessError)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = prometheusRegisterHelper("known_gaps_processed", "Keeps track of the number of known gaps we successfully processed.", &metrics.KnownGapsProcessed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = prometheusRegisterHelper("historic_slots_processed", "Keeps track of the number of historic slots we successfully processed.", &metrics.HistoricSlotProcessed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = prometheusRegisterHelper("head_error", "Keeps track of the number of errors we had processing head messages.", &metrics.HeadError)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = prometheusRegisterHelper("head_reorg_error", "Keeps track of the number of errors we had processing reorg messages.", &metrics.HeadReorgError)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return metrics, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func prometheusRegisterHelper(name string, help string, varPointer *uint64) error {
|
|
||||||
err := prometheus.Register(prometheus.NewCounterFunc(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Namespace: "beacon_client",
|
|
||||||
Subsystem: "",
|
|
||||||
Name: name,
|
|
||||||
Help: help,
|
|
||||||
ConstLabels: map[string]string{},
|
|
||||||
},
|
|
||||||
func() float64 {
|
|
||||||
return float64(atomic.LoadUint64(varPointer))
|
|
||||||
}))
|
|
||||||
if err != nil && err.Error() != "duplicate metrics collector registration attempted" {
|
|
||||||
loghelper.LogError(err).WithField("name", name).Error("Unable to register counter.")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A structure utilized for keeping track of various metrics. Currently, mostly used in testing.
|
|
||||||
type BeaconClientMetrics struct {
|
|
||||||
SlotInserts uint64 // Number of head events we successfully wrote to the DB.
|
|
||||||
ReorgInserts uint64 // Number of reorg events we successfully wrote to the DB.
|
|
||||||
KnownGapsInserts uint64 // Number of known_gaps we successfully wrote to the DB.
|
|
||||||
KnownGapsProcessed uint64 // Number of knownGaps processed.
|
|
||||||
KnownGapsReprocessError uint64 // Number of knownGaps that were updated with an error.
|
|
||||||
HistoricSlotProcessed uint64 // Number of historic slots successfully processed.
|
|
||||||
HeadError uint64 // Number of errors that occurred when decoding the head message.
|
|
||||||
HeadReorgError uint64 // Number of errors that occurred when decoding the reorg message.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper function to increment inserts. If we want to use mutexes later we can easily update all
|
// Wrapper function to increment inserts. If we want to use mutexes later we can easily update all
|
||||||
// occurrences here.
|
// occurrences here.
|
||||||
func (m *BeaconClientMetrics) IncrementSlotInserts(inc uint64) {
|
func (m *BeaconClientMetrics) IncrementHeadTrackingInserts(inc uint64) {
|
||||||
log.Debug("Incrementing Slot Insert")
|
atomic.AddUint64(&m.HeadTrackingInserts, inc)
|
||||||
atomic.AddUint64(&m.SlotInserts, inc)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper function to increment reorgs. If we want to use mutexes later we can easily update all
|
// Wrapper function to increment reorgs. If we want to use mutexes later we can easily update all
|
||||||
// occurrences here.
|
// occurrences here.
|
||||||
func (m *BeaconClientMetrics) IncrementReorgsInsert(inc uint64) {
|
func (m *BeaconClientMetrics) IncrementHeadTrackingReorgs(inc uint64) {
|
||||||
atomic.AddUint64(&m.ReorgInserts, inc)
|
atomic.AddUint64(&m.HeadTrackingReorgs, inc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper function to increment known gaps. If we want to use mutexes later we can easily update all
|
// Wrapper function to increment known gaps. If we want to use mutexes later we can easily update all
|
||||||
// occurrences here.
|
// occurrences here.
|
||||||
func (m *BeaconClientMetrics) IncrementKnownGapsInserts(inc uint64) {
|
func (m *BeaconClientMetrics) IncrementHeadTrackingKnownGaps(inc uint64) {
|
||||||
atomic.AddUint64(&m.KnownGapsInserts, inc)
|
atomic.AddUint64(&m.HeadTrackingKnownGaps, inc)
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper function to increment known gaps processed. If we want to use mutexes later we can easily update all
|
|
||||||
// occurrences here.
|
|
||||||
func (m *BeaconClientMetrics) IncrementKnownGapsProcessed(inc uint64) {
|
|
||||||
atomic.AddUint64(&m.KnownGapsProcessed, inc)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper function to increment head errors. If we want to use mutexes later we can easily update all
|
// Wrapper function to increment head errors. If we want to use mutexes later we can easily update all
|
||||||
@ -133,18 +45,6 @@ func (m *BeaconClientMetrics) IncrementHeadError(inc uint64) {
|
|||||||
|
|
||||||
// Wrapper function to increment reorg errors. If we want to use mutexes later we can easily update all
|
// Wrapper function to increment reorg errors. If we want to use mutexes later we can easily update all
|
||||||
// occurrences here.
|
// occurrences here.
|
||||||
func (m *BeaconClientMetrics) IncrementReorgError(inc uint64) {
|
func (m *BeaconClientMetrics) IncrementHeadReorgError(inc uint64) {
|
||||||
atomic.AddUint64(&m.HeadReorgError, inc)
|
atomic.AddUint64(&m.HeadReorgError, inc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper function to increment the number of knownGaps that were updated with reprocessing errors.
|
|
||||||
// If we want to use mutexes later we can easily update all occurrences here.
|
|
||||||
func (m *BeaconClientMetrics) IncrementKnownGapsReprocessError(inc uint64) {
|
|
||||||
atomic.AddUint64(&m.KnownGapsReprocessError, inc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper function to increment the number of historicSlots that were processed successfully.
|
|
||||||
// If we want to use mutexes later we can easily update all occurrences here.
|
|
||||||
func (m *BeaconClientMetrics) IncrementHistoricSlotProcessed(inc uint64) {
|
|
||||||
atomic.AddUint64(&m.HistoricSlotProcessed, inc)
|
|
||||||
}
|
|
||||||
|
@ -51,47 +51,36 @@ type ChainReorg struct {
|
|||||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// A struct to capture whats being written to the eth-beacon.slots table.
|
// A struct to capture whats being written to the ethcl.slots table.
|
||||||
type DbSlots struct {
|
type DbSlots struct {
|
||||||
Epoch uint64 // The epoch.
|
Epoch string // The epoch.
|
||||||
Slot uint64 // The slot.
|
Slot string // The slot.
|
||||||
BlockRoot string // The block root
|
BlockRoot string // The block root
|
||||||
StateRoot string // The state root
|
StateRoot string // The state root
|
||||||
Status string // The status, it can be proposed | forked | skipped.
|
Status string // The status, it can be proposed | forked | skipped.
|
||||||
}
|
}
|
||||||
|
|
||||||
// A struct to handle the details of an embedded Eth1-block (ie, the ExecutionPayload)
|
// A struct to capture whats being written to ethcl.signed_beacon_block table.
|
||||||
type DbExecutionPayloadHeader struct {
|
|
||||||
BlockNumber uint64
|
|
||||||
Timestamp uint64
|
|
||||||
BlockHash string
|
|
||||||
ParentHash string
|
|
||||||
StateRoot string
|
|
||||||
ReceiptsRoot string
|
|
||||||
TransactionsRoot string
|
|
||||||
}
|
|
||||||
|
|
||||||
// A struct to capture whats being written to eth-beacon.signed_block table.
|
|
||||||
type DbSignedBeaconBlock struct {
|
type DbSignedBeaconBlock struct {
|
||||||
Slot uint64 // The slot.
|
Slot string // The slot.
|
||||||
BlockRoot string // The block root
|
BlockRoot string // The block root
|
||||||
ParentBlock string // The parent block root.
|
ParentBlock string // The parent block root.
|
||||||
Eth1DataBlockHash string // The eth1 block_hash
|
Eth1BlockHash string // The eth1 block_hash
|
||||||
MhKey string // The ipld multihash key.
|
MhKey string // The ipld multihash key.
|
||||||
ExecutionPayloadHeader *DbExecutionPayloadHeader // The ExecutionPayloadHeader (after Bellatrix only).
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A struct to capture whats being written to eth-beacon.state table.
|
// A struct to capture whats being written to ethcl.beacon_state table.
|
||||||
type DbBeaconState struct {
|
type DbBeaconState struct {
|
||||||
Slot uint64 // The slot.
|
Slot string // The slot.
|
||||||
StateRoot string // The state root
|
StateRoot string // The state root
|
||||||
MhKey string // The ipld multihash key.
|
MhKey string // The ipld multihash key.
|
||||||
}
|
}
|
||||||
|
|
||||||
// A structure to capture whats being written to the eth-beacon.known_gaps table.
|
// A structure to capture whats being written to the ethcl.known_gaps table.
|
||||||
type DbKnownGaps struct {
|
type DbKnownGaps struct {
|
||||||
StartSlot uint64 // The start slot for known_gaps, inclusive.
|
StartSlot string // The start slot for known_gaps, inclusive.
|
||||||
EndSlot uint64 // The end slot for known_gaps, inclusive.
|
EndSlot string // The end slot for known_gaps, inclusive.
|
||||||
CheckedOut bool // Indicates if any process is currently processing this entry.
|
CheckedOut bool // Indicates if any process is currently processing this entry.
|
||||||
ReprocessingError string // The error that occurred when attempting to reprocess these entries.
|
ReprocessingError string // The error that occurred when attempting to reprocess these entries.
|
||||||
EntryError string // The error that caused this entry to be added to the table. Could be null.
|
EntryError string // The error that caused this entry to be added to the table. Could be null.
|
||||||
|
@ -20,10 +20,10 @@ import (
|
|||||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
const SSZ_SHA2_256_PREFIX uint64 = 0xb502
|
const SSZ_SHA2_256_PREFIX uint64 = 0xb501
|
||||||
|
|
||||||
// MultihashKeyFromSSZRoot converts a SSZ-SHA2-256 root hash into a blockstore prefixed multihash key
|
// MultihashKeyFromSSZRoot converts a SSZ-SHA2-256 root hash into a blockstore prefixed multihash key
|
||||||
func MultihashKeyFromSSZRoot(root []byte) (string, error) {
|
func MultihashKeyFromSSZRoot(root []byte) (string, error) {
|
||||||
|
@ -20,8 +20,12 @@ package beaconclient
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
|
||||||
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This function will perform the necessary steps to handle a reorg.
|
// This function will perform the necessary steps to handle a reorg.
|
||||||
@ -30,11 +34,7 @@ func (bc *BeaconClient) handleReorg() {
|
|||||||
for {
|
for {
|
||||||
reorg := <-bc.ReOrgTracking.ProcessCh
|
reorg := <-bc.ReOrgTracking.ProcessCh
|
||||||
log.WithFields(log.Fields{"reorg": reorg}).Debug("Received a new reorg message.")
|
log.WithFields(log.Fields{"reorg": reorg}).Debug("Received a new reorg message.")
|
||||||
slot, err := ParseSlot(reorg.Slot)
|
writeReorgs(bc.Db, reorg.Slot, reorg.NewHeadBlock, bc.Metrics)
|
||||||
if nil != err {
|
|
||||||
loghelper.LogSlotError(slot.Number(), err)
|
|
||||||
}
|
|
||||||
writeReorgs(bc.Db, slot, reorg.NewHeadBlock, bc.Metrics)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,7 +45,7 @@ func (bc *BeaconClient) handleHead() {
|
|||||||
for {
|
for {
|
||||||
head := <-bc.HeadTracking.ProcessCh
|
head := <-bc.HeadTracking.ProcessCh
|
||||||
// Process all the work here.
|
// Process all the work here.
|
||||||
slot, err := ParseSlot(head.Slot)
|
slot, err := strconv.Atoi(head.Slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
bc.HeadTracking.ErrorCh <- &SseError{
|
bc.HeadTracking.ErrorCh <- &SseError{
|
||||||
err: fmt.Errorf("Unable to turn the slot from string to int: %s", head.Slot),
|
err: fmt.Errorf("Unable to turn the slot from string to int: %s", head.Slot),
|
||||||
@ -56,20 +56,26 @@ func (bc *BeaconClient) handleHead() {
|
|||||||
if errorSlots != 0 && bc.PreviousSlot != 0 {
|
if errorSlots != 0 && bc.PreviousSlot != 0 {
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"lastProcessedSlot": bc.PreviousSlot,
|
"lastProcessedSlot": bc.PreviousSlot,
|
||||||
"errorSlots": errorSlots,
|
"errorMessages": errorSlots,
|
||||||
}).Warn("We added slots to the knownGaps table because we got bad head messages.")
|
}).Warn("We added slots to the knownGaps table because we got bad head messages.")
|
||||||
writeKnownGaps(bc.Db, bc.KnownGapTableIncrement, bc.PreviousSlot+1, slot, fmt.Errorf("Bad Head Messages"), "headProcessing", bc.Metrics)
|
writeKnownGaps(bc.Db, bc.KnownGapTableIncrement, bc.PreviousSlot, bcSlotsPerEpoch+errorSlots, fmt.Errorf("Bad Head Messages"), "headProcessing", bc.Metrics)
|
||||||
errorSlots = 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.WithFields(log.Fields{"head": head}).Debug("We are going to start processing the slot.")
|
log.WithFields(log.Fields{"head": head}).Debug("We are going to start processing the slot.")
|
||||||
|
|
||||||
// Not used anywhere yet but might be useful to have.
|
go func(db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, metrics *BeaconClientMetrics, knownGapsTableIncrement int) {
|
||||||
if bc.PreviousSlot == 0 && bc.PreviousBlockRoot == "" {
|
errG := new(errgroup.Group)
|
||||||
bc.StartingSlot = slot
|
errG.Go(func() error {
|
||||||
}
|
err = processHeadSlot(db, serverAddress, slot, blockRoot, stateRoot, previousSlot, previousBlockRoot, metrics, knownGapsTableIncrement)
|
||||||
|
if err != nil {
|
||||||
go processHeadSlot(slot, head.Block, head.State, bc.SlotProcessingDetails())
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err := errG.Wait(); err != nil {
|
||||||
|
loghelper.LogSlotError(strconv.Itoa(slot), err).Error("Unable to process a slot")
|
||||||
|
}
|
||||||
|
}(bc.Db, bc.ServerEndpoint, slot, head.Block, head.State, bc.PreviousSlot, bc.PreviousBlockRoot, bc.Metrics, bc.KnownGapTableIncrement)
|
||||||
|
|
||||||
log.WithFields(log.Fields{"head": head.Slot}).Debug("We finished calling processHeadSlot.")
|
log.WithFields(log.Fields{"head": head.Slot}).Debug("We finished calling processHeadSlot.")
|
||||||
|
|
||||||
@ -77,4 +83,5 @@ func (bc *BeaconClient) handleHead() {
|
|||||||
bc.PreviousSlot = slot
|
bc.PreviousSlot = slot
|
||||||
bc.PreviousBlockRoot = head.Block
|
bc.PreviousBlockRoot = head.Block
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,269 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2022 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// This file contains all the code to process historic slots.
|
|
||||||
|
|
||||||
package beaconclient
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/jackc/pgx/v4"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Get a single highest priority and non-checked out row row from eth_beacon.historical_process
|
|
||||||
getHpEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.historic_process
|
|
||||||
WHERE checked_out=false AND end_slot >= $1
|
|
||||||
ORDER BY priority ASC
|
|
||||||
LIMIT 1;`
|
|
||||||
// Used to periodically check to see if there is a new entry in the eth_beacon.historic_process table.
|
|
||||||
checkHpEntryStmt string = `SELECT * FROM eth_beacon.historic_process WHERE checked_out=false AND end_slot >= $1;`
|
|
||||||
// Used to checkout a row from the eth_beacon.historic_process table
|
|
||||||
lockHpEntryStmt string = `UPDATE eth_beacon.historic_process
|
|
||||||
SET checked_out=true, checked_out_by=$3
|
|
||||||
WHERE start_slot=$1 AND end_slot=$2;`
|
|
||||||
// Used to delete an entry from the eth_beacon.historic_process table
|
|
||||||
deleteHpEntryStmt string = `DELETE FROM eth_beacon.historic_process
|
|
||||||
WHERE start_slot=$1 AND end_slot=$2;`
|
|
||||||
// Used to update every single row that this node has checked out.
|
|
||||||
releaseHpLockStmt string = `UPDATE eth_beacon.historic_process
|
|
||||||
SET checked_out=false, checked_out_by=null
|
|
||||||
WHERE checked_out_by=$1`
|
|
||||||
)
|
|
||||||
|
|
||||||
type HistoricProcessing struct {
|
|
||||||
db sql.Database //db connection
|
|
||||||
metrics *BeaconClientMetrics // metrics for beaconclient
|
|
||||||
uniqueNodeIdentifier int // node unique identifier.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a single row of historical slots from the table.
|
|
||||||
func (hp HistoricProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess, minimumSlot Slot) []error {
|
|
||||||
return getBatchProcessRow(ctx, hp.db, getHpEntryStmt, checkHpEntryStmt, lockHpEntryStmt, slotCh, strconv.Itoa(hp.uniqueNodeIdentifier), minimumSlot)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the table entry.
|
|
||||||
func (hp HistoricProcessing) removeTableEntry(ctx context.Context, processCh <-chan slotsToProcess) error {
|
|
||||||
return removeRowPostProcess(ctx, hp.db, processCh, QueryBySlotStmt, deleteHpEntryStmt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the table entry.
|
|
||||||
func (hp HistoricProcessing) handleProcessingErrors(ctx context.Context, errMessages <-chan batchHistoricError) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case errMs := <-errMessages:
|
|
||||||
loghelper.LogSlotError(errMs.slot.Number(), errMs.err)
|
|
||||||
writeKnownGaps(hp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, hp.metrics)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// "un"-checkout the rows held by this DB in the eth_beacon.historical_process table.
|
|
||||||
func (hp HistoricProcessing) releaseDbLocks() error {
|
|
||||||
log.Debug("Updating all the entries to eth_beacon.historical processing")
|
|
||||||
res, err := hp.db.Exec(context.Background(), releaseHpLockStmt, hp.uniqueNodeIdentifier)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Unable to remove lock from eth_beacon.historical_processing table for node %d, error is %e", hp.uniqueNodeIdentifier, err)
|
|
||||||
}
|
|
||||||
log.Debug("Update all the entries to eth_beacon.historical processing")
|
|
||||||
rows, err := res.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Unable to calculated number of rows affected by releasing locks from eth_beacon.historical_processing table for node %d, error is %e", hp.uniqueNodeIdentifier, err)
|
|
||||||
}
|
|
||||||
log.WithField("rowCount", rows).Info("Released historicalProcess locks for specified rows.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process the slot range.
|
|
||||||
func processSlotRangeWorker(ctx context.Context, workCh <-chan Slot, errCh chan<- batchHistoricError, spd SlotProcessingDetails, incrementTracker func(uint64)) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case slot := <-workCh:
|
|
||||||
log.Debug("Handling slot: ", slot)
|
|
||||||
err, errProcess := handleHistoricSlot(ctx, slot, spd)
|
|
||||||
if err != nil {
|
|
||||||
errMs := batchHistoricError{
|
|
||||||
err: err,
|
|
||||||
errProcess: errProcess,
|
|
||||||
slot: slot,
|
|
||||||
}
|
|
||||||
errCh <- errMs
|
|
||||||
} else {
|
|
||||||
incrementTracker(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A wrapper function that insert the start_slot and end_slot from a single row into a channel.
|
|
||||||
// It also locks the row by updating the checked_out column.
|
|
||||||
// The statement for getting the start_slot and end_slot must be provided.
|
|
||||||
// The statement for "locking" the row must also be provided.
|
|
||||||
func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStmt string, checkNewRowsStmt string, checkOutRowStmt string, slotCh chan<- slotsToProcess, uniqueNodeIdentifier string, minimumSlot Slot) []error {
|
|
||||||
errCount := make([]error, 0)
|
|
||||||
|
|
||||||
// 5 is an arbitrary number. It allows us to retry a few times before
|
|
||||||
// ending the application.
|
|
||||||
prevErrCount := 0
|
|
||||||
for len(errCount) < 5 {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return errCount
|
|
||||||
default:
|
|
||||||
if len(errCount) != prevErrCount {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"errCount": errCount,
|
|
||||||
}).Error("New error entry added")
|
|
||||||
}
|
|
||||||
processRow, err := db.Exec(context.Background(), checkNewRowsStmt, minimumSlot)
|
|
||||||
if err != nil {
|
|
||||||
errCount = append(errCount, err)
|
|
||||||
}
|
|
||||||
row, err := processRow.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
errCount = append(errCount, err)
|
|
||||||
}
|
|
||||||
if row < 1 {
|
|
||||||
time.Sleep(3 * time.Second)
|
|
||||||
log.Debug("We are checking rows, be patient")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
log.Debug("We found a new row")
|
|
||||||
dbCtx := context.Background()
|
|
||||||
|
|
||||||
// Setup TX
|
|
||||||
tx, err := db.Begin(dbCtx)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).Error("We are unable to Begin a SQL transaction")
|
|
||||||
errCount = append(errCount, err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
err := tx.Rollback(dbCtx)
|
|
||||||
if err != nil && err != pgx.ErrTxClosed {
|
|
||||||
loghelper.LogError(err).Error("We were unable to Rollback a transaction")
|
|
||||||
errCount = append(errCount, err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Query the DB for slots.
|
|
||||||
sp := slotsToProcess{}
|
|
||||||
err = tx.QueryRow(dbCtx, getStartEndSlotStmt, minimumSlot).Scan(&sp.startSlot, &sp.endSlot)
|
|
||||||
if err != nil {
|
|
||||||
if err == pgx.ErrNoRows {
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
loghelper.LogSlotRangeStatementError(sp.startSlot.Number(), sp.endSlot.Number(), getStartEndSlotStmt, err).Error("Unable to get a row")
|
|
||||||
errCount = append(errCount, err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checkout the Row
|
|
||||||
res, err := tx.Exec(dbCtx, checkOutRowStmt, sp.startSlot, sp.endSlot, uniqueNodeIdentifier)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotRangeStatementError(sp.startSlot.Number(), sp.endSlot.Number(), checkOutRowStmt, err).Error("Unable to checkout the row")
|
|
||||||
errCount = append(errCount, err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
rows, err := res.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotRangeStatementError(sp.startSlot.Number(), sp.endSlot.Number(), checkOutRowStmt, fmt.Errorf("Unable to determine the rows affected when trying to checkout a row."))
|
|
||||||
errCount = append(errCount, err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if rows > 1 {
|
|
||||||
loghelper.LogSlotRangeStatementError(sp.startSlot.Number(), sp.endSlot.Number(), checkOutRowStmt, err).WithFields(log.Fields{
|
|
||||||
"rowsReturn": rows,
|
|
||||||
}).Error("We locked too many rows.....")
|
|
||||||
errCount = append(errCount, err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if rows == 0 {
|
|
||||||
loghelper.LogSlotRangeStatementError(sp.startSlot.Number(), sp.endSlot.Number(), checkOutRowStmt, err).WithFields(log.Fields{
|
|
||||||
"rowsReturn": rows,
|
|
||||||
}).Error("We did not lock a single row.")
|
|
||||||
errCount = append(errCount, err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
err = tx.Commit(dbCtx)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotRangeError(sp.startSlot.Number(), sp.endSlot.Number(), err).Error("Unable commit transactions.")
|
|
||||||
errCount = append(errCount, err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
log.WithField("slots", sp).Debug("Added a new slots to be processed")
|
|
||||||
slotCh <- sp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"ErrCount": errCount,
|
|
||||||
}).Error("The ErrCounter")
|
|
||||||
return errCount
|
|
||||||
}
|
|
||||||
|
|
||||||
// After a row has been processed it should be removed from its appropriate table.
|
|
||||||
func removeRowPostProcess(ctx context.Context, db sql.Database, processCh <-chan slotsToProcess, checkProcessedStmt, removeStmt string) error {
|
|
||||||
errCh := make(chan error)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
case slots := <-processCh:
|
|
||||||
// Make sure the start and end slot exist in the slots table.
|
|
||||||
go func() {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"startSlot": slots.startSlot,
|
|
||||||
"endSlot": slots.endSlot,
|
|
||||||
}).Debug("Starting to check to see if the following slots have been processed")
|
|
||||||
for {
|
|
||||||
isStartProcess, err := isSlotProcessed(db, checkProcessedStmt, slots.startSlot)
|
|
||||||
if err != nil {
|
|
||||||
errCh <- err
|
|
||||||
}
|
|
||||||
isEndProcess, err := isSlotProcessed(db, checkProcessedStmt, slots.endSlot)
|
|
||||||
if err != nil {
|
|
||||||
errCh <- err
|
|
||||||
}
|
|
||||||
if isStartProcess && isEndProcess {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(3 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := db.Exec(context.Background(), removeStmt, slots.startSlot.Number(), slots.endSlot.Number())
|
|
||||||
if err != nil {
|
|
||||||
errCh <- err
|
|
||||||
}
|
|
||||||
|
|
||||||
}()
|
|
||||||
if len(errCh) != 0 {
|
|
||||||
return <-errCh
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,137 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2022 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// This file contains all the code to process historic slots.
|
|
||||||
|
|
||||||
package beaconclient
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Get a single non-checked out row row from eth_beacon.known_gaps.
|
|
||||||
getKgEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.known_gaps
|
|
||||||
WHERE checked_out=false AND end_slot >= $1
|
|
||||||
ORDER BY priority ASC
|
|
||||||
LIMIT 1;`
|
|
||||||
// Used to periodically check to see if there is a new entry in the eth_beacon.known_gaps table.
|
|
||||||
checkKgEntryStmt string = `SELECT * FROM eth_beacon.known_gaps WHERE checked_out=false AND end_slot >= $1;`
|
|
||||||
// Used to checkout a row from the eth_beacon.known_gaps table
|
|
||||||
lockKgEntryStmt string = `UPDATE eth_beacon.known_gaps
|
|
||||||
SET checked_out=true, checked_out_by=$3
|
|
||||||
WHERE start_slot=$1 AND end_slot=$2;`
|
|
||||||
// Used to delete an entry from the knownGaps table
|
|
||||||
deleteKgEntryStmt string = `DELETE FROM eth_beacon.known_gaps
|
|
||||||
WHERE start_slot=$1 AND end_slot=$2;`
|
|
||||||
// Used to check to see if a single slot exists in the known_gaps table.
|
|
||||||
checkKgSingleSlotStmt string = `SELECT start_slot, end_slot FROM eth_beacon.known_gaps
|
|
||||||
WHERE start_slot=$1 AND end_slot=$2;`
|
|
||||||
// Used to update every single row that this node has checked out.
|
|
||||||
releaseKgLockStmt string = `UPDATE eth_beacon.known_gaps
|
|
||||||
SET checked_out=false, checked_out_by=null
|
|
||||||
WHERE checked_out_by=$1`
|
|
||||||
)
|
|
||||||
|
|
||||||
type KnownGapsProcessing struct {
|
|
||||||
db sql.Database //db connection
|
|
||||||
metrics *BeaconClientMetrics // metrics for beaconclient
|
|
||||||
uniqueNodeIdentifier int // node unique identifier.
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function will perform all the heavy lifting for tracking the head of the chain.
|
|
||||||
func (bc *BeaconClient) ProcessKnownGaps(ctx context.Context, maxWorkers int, minimumSlot Slot) []error {
|
|
||||||
log.Info("We are starting the known gaps processing service.")
|
|
||||||
bc.KnownGapsProcess = KnownGapsProcessing{db: bc.Db, uniqueNodeIdentifier: bc.UniqueNodeIdentifier, metrics: bc.Metrics}
|
|
||||||
errs := handleBatchProcess(ctx, maxWorkers, bc.KnownGapsProcess, bc.SlotProcessingDetails(), bc.Metrics.IncrementKnownGapsProcessed, minimumSlot)
|
|
||||||
log.Debug("Exiting known gaps processing service")
|
|
||||||
return errs
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function will perform all the necessary clean up tasks for stopping historical processing.
|
|
||||||
func (bc *BeaconClient) StopKnownGapsProcessing(cancel context.CancelFunc) error {
|
|
||||||
log.Info("We are stopping the known gaps processing service.")
|
|
||||||
cancel()
|
|
||||||
err := bc.KnownGapsProcess.releaseDbLocks()
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the eth_beacon.known_gaps table. Manual Intervention is needed!")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a single row of historical slots from the table.
|
|
||||||
func (kgp KnownGapsProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess, minimumSlot Slot) []error {
|
|
||||||
return getBatchProcessRow(ctx, kgp.db, getKgEntryStmt, checkKgEntryStmt, lockKgEntryStmt, slotCh, strconv.Itoa(kgp.uniqueNodeIdentifier), minimumSlot)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the table entry.
|
|
||||||
func (kgp KnownGapsProcessing) removeTableEntry(ctx context.Context, processCh <-chan slotsToProcess) error {
|
|
||||||
return removeRowPostProcess(ctx, kgp.db, processCh, QueryBySlotStmt, deleteKgEntryStmt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the table entry.
|
|
||||||
func (kgp KnownGapsProcessing) handleProcessingErrors(ctx context.Context, errMessages <-chan batchHistoricError) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case errMs := <-errMessages:
|
|
||||||
// Check to see if this if this entry already exists.
|
|
||||||
res, err := kgp.db.Exec(context.Background(), checkKgSingleSlotStmt, errMs.slot, errMs.slot)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotError(errMs.slot.Number(), err).Error("Unable to see if this slot is in the eth_beacon.known_gaps table")
|
|
||||||
}
|
|
||||||
|
|
||||||
rows, err := res.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotError(errMs.slot.Number(), err).WithFields(log.Fields{
|
|
||||||
"queryStatement": checkKgSingleSlotStmt,
|
|
||||||
}).Error("Unable to get the number of rows affected by this statement.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if rows > 0 {
|
|
||||||
loghelper.LogSlotError(errMs.slot.Number(), errMs.err).Error("We received an error when processing a knownGap")
|
|
||||||
err = updateKnownGapErrors(kgp.db, errMs.slot, errMs.slot, errMs.err, kgp.metrics)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotError(errMs.slot.Number(), err).Error("Error processing known gap")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
writeKnownGaps(kgp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, kgp.metrics)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Updated checked_out column for the uniqueNodeIdentifier.
|
|
||||||
func (kgp KnownGapsProcessing) releaseDbLocks() error {
|
|
||||||
log.Debug("Updating all the entries to eth_beacon.known_gaps")
|
|
||||||
res, err := kgp.db.Exec(context.Background(), releaseKgLockStmt, kgp.uniqueNodeIdentifier)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rows, err := res.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.WithField("rowCount", rows).Info("Released knownGaps locks for specified rows.")
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -23,476 +23,290 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/jackc/pgx/v4"
|
si "github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||||
|
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||||
|
dt "github.com/prysmaticlabs/prysm/encoding/ssz/detect"
|
||||||
|
|
||||||
|
// The below is temporary, once https://github.com/prysmaticlabs/prysm/issues/10006 has been resolved we wont need it.
|
||||||
|
// pb "github.com/prysmaticlabs/prysm/proto/prysm/v2"
|
||||||
|
|
||||||
|
state "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SlotProcessingDetails struct {
|
var (
|
||||||
Context context.Context // A context generic context with multiple uses.
|
SlotUnmarshalError = func(obj string) string {
|
||||||
ServerEndpoint string // What is the endpoint of the beacon server.
|
return fmt.Sprintf("Unable to properly unmarshal the Slot field in the %s.", obj)
|
||||||
Db sql.Database // Database object used for reads and writes.
|
|
||||||
Metrics *BeaconClientMetrics // An object used to keep track of certain BeaconClient Metrics.
|
|
||||||
KnownGapTableIncrement int // The max number of slots within a single known_gaps table entry.
|
|
||||||
CheckDb bool // Should we check the DB to see if the slot exists before processing it?
|
|
||||||
PerformBeaconStateProcessing bool // Should we process BeaconStates?
|
|
||||||
PerformBeaconBlockProcessing bool // Should we process BeaconBlocks?
|
|
||||||
|
|
||||||
StartingSlot Slot // If we're performing head tracking. What is the first slot we processed.
|
|
||||||
PreviousSlot Slot // Whats the previous slot we processed
|
|
||||||
PreviousBlockRoot string // Whats the previous block root, used to check the next blocks parent.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bc *BeaconClient) SlotProcessingDetails() SlotProcessingDetails {
|
|
||||||
return SlotProcessingDetails{
|
|
||||||
Context: bc.Context,
|
|
||||||
ServerEndpoint: bc.ServerEndpoint,
|
|
||||||
Db: bc.Db,
|
|
||||||
Metrics: bc.Metrics,
|
|
||||||
|
|
||||||
CheckDb: bc.CheckDb,
|
|
||||||
PerformBeaconBlockProcessing: bc.PerformBeaconBlockProcessing,
|
|
||||||
PerformBeaconStateProcessing: bc.PerformBeaconStateProcessing,
|
|
||||||
|
|
||||||
KnownGapTableIncrement: bc.KnownGapTableIncrement,
|
|
||||||
StartingSlot: bc.StartingSlot,
|
|
||||||
PreviousSlot: bc.PreviousSlot,
|
|
||||||
PreviousBlockRoot: bc.PreviousBlockRoot,
|
|
||||||
}
|
}
|
||||||
}
|
ParentRootUnmarshalError = "Unable to properly unmarshal the ParentRoot field in the SignedBeaconBlock."
|
||||||
|
MissingIdentifiedError = "Can't query state without a set slot or block_root"
|
||||||
|
MissingEth1Data = "Can't get the Eth1 block_hash"
|
||||||
|
VersionedUnmarshalerError = "Unable to create a versioned unmarshaler"
|
||||||
|
)
|
||||||
|
|
||||||
type ProcessSlot struct {
|
type ProcessSlot struct {
|
||||||
// Generic
|
// Generic
|
||||||
|
|
||||||
Slot Slot // The slot number.
|
Slot int // The slot number.
|
||||||
Epoch Epoch // The epoch number.
|
Epoch int // The epoch number.
|
||||||
BlockRoot string // The hex encoded string of the BlockRoot.
|
BlockRoot string // The hex encoded string of the BlockRoot.
|
||||||
StateRoot string // The hex encoded string of the StateRoot.
|
StateRoot string // The hex encoded string of the StateRoot.
|
||||||
ParentBlockRoot string // The hex encoded string of the parent block.
|
ParentBlockRoot string // The hex encoded string of the parent block.
|
||||||
Status string // The status of the block
|
Status string // The status of the block
|
||||||
HeadOrHistoric string // Is this the head or a historic slot. This is critical when trying to analyze errors and skipped slots.
|
HeadOrHistoric string // Is this the head or a historic slot. This is critical when trying to analyze errors and skipped slots.
|
||||||
Db sql.Database // The DB object used to write to the DB.
|
Db sql.Database // The DB object used to write to the DB.
|
||||||
Metrics *BeaconClientMetrics // An object to keep track of the beaconclient metrics
|
Metrics *BeaconClientMetrics // An object to keep track of the beaconclient metrics
|
||||||
PerformanceMetrics PerformanceMetrics // An object to keep track of performance metrics.
|
|
||||||
// BeaconBlock
|
// BeaconBlock
|
||||||
|
|
||||||
SszSignedBeaconBlock []byte // The entire SSZ encoded SignedBeaconBlock
|
SszSignedBeaconBlock []byte // The entire SSZ encoded SignedBeaconBlock
|
||||||
FullSignedBeaconBlock *SignedBeaconBlock // The unmarshaled BeaconState object, the unmarshalling could have errors.
|
FullSignedBeaconBlock si.SignedBeaconBlock // The unmarshaled BeaconState object, the unmarshalling could have errors.
|
||||||
|
|
||||||
// BeaconState
|
// BeaconState
|
||||||
FullBeaconState *BeaconState // The unmarshaled BeaconState object, the unmarshalling could have errors.
|
FullBeaconState state.BeaconState // The unmarshaled BeaconState object, the unmarshalling could have errors.
|
||||||
SszBeaconState []byte // The entire SSZ encoded BeaconState
|
SszBeaconState []byte // The entire SSZ encoded BeaconState
|
||||||
|
|
||||||
// DB Write objects
|
// DB Write objects
|
||||||
DbSlotsModel *DbSlots // The model being written to the slots table.
|
DbSlotsModel *DbSlots // The model being written to the slots table.
|
||||||
DbSignedBeaconBlockModel *DbSignedBeaconBlock // The model being written to the signed_block table.
|
DbSignedBeaconBlockModel *DbSignedBeaconBlock // The model being written to the signed_beacon_block table.
|
||||||
DbBeaconState *DbBeaconState // The model being written to the state table.
|
DbBeaconState *DbBeaconState // The model being written to the beacon_state table.
|
||||||
}
|
|
||||||
|
|
||||||
type PerformanceMetrics struct {
|
|
||||||
BeaconNodeBlockRetrievalTime time.Duration // How long it took to get the BeaconBlock from the Beacon Node.
|
|
||||||
BeaconNodeStateRetrievalTime time.Duration // How long it took to get the BeaconState from the Beacon Node.
|
|
||||||
ParseBeaconObjectForHash time.Duration // How long it took to get some information from the beacon objects.
|
|
||||||
CheckDbPreProcessing time.Duration // How long it takes to check the DB before processing a block.
|
|
||||||
CreateDbWriteObject time.Duration // How long it takes to create a DB write object.
|
|
||||||
TransactSlotOnly time.Duration // How long it takes to transact the slot information only.
|
|
||||||
CheckReorg time.Duration // How long it takes to check for Reorgs
|
|
||||||
CommitTransaction time.Duration // How long it takes to commit the final transaction.
|
|
||||||
TotalDbTransaction time.Duration // How long it takes from start to committing the entire DB transaction.
|
|
||||||
TotalProcessing time.Duration // How long it took to process the entire slot.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function will do all the work to process the slot and write it to the DB.
|
// This function will do all the work to process the slot and write it to the DB.
|
||||||
// It will return the error and error process. The error process is used for providing reach detail to the
|
func processFullSlot(db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, headOrHistoric string, metrics *BeaconClientMetrics, knownGapsTableIncrement int) error {
|
||||||
// known_gaps table.
|
ps := &ProcessSlot{
|
||||||
func processFullSlot(
|
Slot: slot,
|
||||||
ctx context.Context,
|
BlockRoot: blockRoot,
|
||||||
slot Slot,
|
StateRoot: stateRoot,
|
||||||
blockRoot string,
|
HeadOrHistoric: headOrHistoric,
|
||||||
stateRoot string,
|
Db: db,
|
||||||
previousSlot Slot,
|
Metrics: metrics,
|
||||||
previousBlockRoot string,
|
|
||||||
knownGapsTableIncrement int,
|
|
||||||
headOrHistoric string,
|
|
||||||
spd *SlotProcessingDetails) (error, string) {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil, ""
|
|
||||||
default:
|
|
||||||
totalStart := time.Now()
|
|
||||||
ps := &ProcessSlot{
|
|
||||||
Slot: slot,
|
|
||||||
BlockRoot: blockRoot,
|
|
||||||
StateRoot: stateRoot,
|
|
||||||
HeadOrHistoric: headOrHistoric,
|
|
||||||
Db: spd.Db,
|
|
||||||
Metrics: spd.Metrics,
|
|
||||||
PerformanceMetrics: PerformanceMetrics{
|
|
||||||
BeaconNodeBlockRetrievalTime: 0,
|
|
||||||
BeaconNodeStateRetrievalTime: 0,
|
|
||||||
ParseBeaconObjectForHash: 0,
|
|
||||||
CheckDbPreProcessing: 0,
|
|
||||||
CreateDbWriteObject: 0,
|
|
||||||
TransactSlotOnly: 0,
|
|
||||||
CheckReorg: 0,
|
|
||||||
CommitTransaction: 0,
|
|
||||||
TotalDbTransaction: 0,
|
|
||||||
TotalProcessing: 0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
g, _ := errgroup.WithContext(context.Background())
|
|
||||||
|
|
||||||
if spd.PerformBeaconStateProcessing {
|
|
||||||
// Get the BeaconState.
|
|
||||||
g.Go(func() error {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
start := time.Now()
|
|
||||||
err := ps.getBeaconState(spd.ServerEndpoint)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ps.PerformanceMetrics.BeaconNodeStateRetrievalTime = time.Since(start)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if spd.PerformBeaconBlockProcessing {
|
|
||||||
// Get the SignedBeaconBlock.
|
|
||||||
g.Go(func() error {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
start := time.Now()
|
|
||||||
err := ps.getSignedBeaconBlock(spd.ServerEndpoint)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ps.PerformanceMetrics.BeaconNodeBlockRetrievalTime = time.Since(start)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := g.Wait(); err != nil {
|
|
||||||
return err, "processSlot"
|
|
||||||
}
|
|
||||||
|
|
||||||
parseBeaconTime := time.Now()
|
|
||||||
finalBlockRoot, finalStateRoot, _, err := ps.provideFinalHash()
|
|
||||||
if err != nil {
|
|
||||||
return err, "CalculateBlockRoot"
|
|
||||||
}
|
|
||||||
ps.PerformanceMetrics.ParseBeaconObjectForHash = time.Since(parseBeaconTime)
|
|
||||||
|
|
||||||
if spd.CheckDb {
|
|
||||||
checkDbTime := time.Now()
|
|
||||||
var blockRequired bool
|
|
||||||
if spd.PerformBeaconBlockProcessing {
|
|
||||||
blockExists, err := checkSlotAndRoot(ps.Db, CheckSignedBeaconBlockStmt, ps.Slot, finalBlockRoot)
|
|
||||||
if err != nil {
|
|
||||||
return err, "checkDb"
|
|
||||||
}
|
|
||||||
blockRequired = !blockExists
|
|
||||||
}
|
|
||||||
|
|
||||||
var stateRequired bool
|
|
||||||
if spd.PerformBeaconStateProcessing {
|
|
||||||
stateExists, err := checkSlotAndRoot(ps.Db, CheckBeaconStateStmt, ps.Slot, finalStateRoot)
|
|
||||||
if err != nil {
|
|
||||||
return err, "checkDb"
|
|
||||||
}
|
|
||||||
stateRequired = !stateExists
|
|
||||||
}
|
|
||||||
|
|
||||||
if !blockRequired && !stateRequired {
|
|
||||||
log.WithField("slot", slot).Info("Slot already in the DB.")
|
|
||||||
return nil, ""
|
|
||||||
}
|
|
||||||
ps.PerformanceMetrics.CheckDbPreProcessing = time.Since(checkDbTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get this object ready to write
|
|
||||||
createDbWriteTime := time.Now()
|
|
||||||
dw, err := ps.createWriteObjects()
|
|
||||||
if err != nil {
|
|
||||||
return err, "blockRoot"
|
|
||||||
}
|
|
||||||
ps.PerformanceMetrics.CreateDbWriteObject = time.Since(createDbWriteTime)
|
|
||||||
|
|
||||||
// Write the object to the DB.
|
|
||||||
dbFullTransactionTime := time.Now()
|
|
||||||
defer func() {
|
|
||||||
err := dw.Tx.Rollback(dw.Ctx)
|
|
||||||
if err != nil && err != pgx.ErrTxClosed {
|
|
||||||
loghelper.LogError(err).Error("We were unable to Rollback a transaction")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
transactionTime := time.Now()
|
|
||||||
err = dw.transactFullSlot()
|
|
||||||
if err != nil {
|
|
||||||
return err, "processSlot"
|
|
||||||
}
|
|
||||||
ps.PerformanceMetrics.TransactSlotOnly = time.Since(transactionTime)
|
|
||||||
|
|
||||||
// Handle any reorgs or skipped slots.
|
|
||||||
reorgTime := time.Now()
|
|
||||||
headOrHistoric = strings.ToLower(headOrHistoric)
|
|
||||||
if headOrHistoric != "head" && headOrHistoric != "historic" {
|
|
||||||
return fmt.Errorf("headOrHistoric must be either historic or head"), ""
|
|
||||||
}
|
|
||||||
if ps.HeadOrHistoric == "head" && previousSlot != 0 && previousBlockRoot != "" && ps.Status != "skipped" {
|
|
||||||
ps.checkPreviousSlot(dw.Tx, dw.Ctx, previousSlot, previousBlockRoot, knownGapsTableIncrement)
|
|
||||||
}
|
|
||||||
ps.PerformanceMetrics.CheckReorg = time.Since(reorgTime)
|
|
||||||
|
|
||||||
// Commit the transaction
|
|
||||||
commitTime := time.Now()
|
|
||||||
if err = dw.Tx.Commit(dw.Ctx); err != nil {
|
|
||||||
return err, "transactionCommit"
|
|
||||||
}
|
|
||||||
ps.PerformanceMetrics.CommitTransaction = time.Since(commitTime)
|
|
||||||
|
|
||||||
// Total metric capture time.
|
|
||||||
ps.PerformanceMetrics.TotalDbTransaction = time.Since(dbFullTransactionTime)
|
|
||||||
ps.PerformanceMetrics.TotalProcessing = time.Since(totalStart)
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"slot": slot,
|
|
||||||
"performanceMetrics": fmt.Sprintf("%+v\n", ps.PerformanceMetrics),
|
|
||||||
}).Debug("Performance Metric output!")
|
|
||||||
|
|
||||||
return nil, ""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
g, _ := errgroup.WithContext(context.Background())
|
||||||
|
vUnmarshalerCh := make(chan *dt.VersionedUnmarshaler, 1)
|
||||||
|
|
||||||
|
// Get the BeaconState.
|
||||||
|
g.Go(func() error {
|
||||||
|
err := ps.getBeaconState(serverAddress, vUnmarshalerCh)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Get the SignedBeaconBlock.
|
||||||
|
g.Go(func() error {
|
||||||
|
err := ps.getSignedBeaconBlock(serverAddress, vUnmarshalerCh)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
|
writeKnownGaps(ps.Db, 1, ps.Slot, ps.Slot, err, "processSlot", ps.Metrics)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ps.HeadOrHistoric == "head" && previousSlot == 0 && previousBlockRoot == "" {
|
||||||
|
writeStartUpGaps(db, knownGapsTableIncrement, ps.Slot, ps.Metrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get this object ready to write
|
||||||
|
blockRootEndpoint := serverAddress + BcBlockRootEndpoint(strconv.Itoa(ps.Slot))
|
||||||
|
dw, err := ps.createWriteObjects(blockRootEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
writeKnownGaps(ps.Db, 1, ps.Slot, ps.Slot, err, "blockRoot", ps.Metrics)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Write the object to the DB.
|
||||||
|
err = dw.writeFullSlot()
|
||||||
|
if err != nil {
|
||||||
|
writeKnownGaps(ps.Db, 1, ps.Slot, ps.Slot, err, "processSlot", ps.Metrics)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle any reorgs or skipped slots.
|
||||||
|
headOrHistoric = strings.ToLower(headOrHistoric)
|
||||||
|
if headOrHistoric != "head" && headOrHistoric != "historic" {
|
||||||
|
return fmt.Errorf("headOrHistoric must be either historic or head!")
|
||||||
|
}
|
||||||
|
if ps.HeadOrHistoric == "head" && previousSlot != 0 && previousBlockRoot != "" && ps.Status != "skipped" {
|
||||||
|
ps.checkPreviousSlot(previousSlot, previousBlockRoot, knownGapsTableIncrement)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle a slot that is at head. A wrapper function for calling `handleFullSlot`.
|
// Handle a slot that is at head. A wrapper function for calling `handleFullSlot`.
|
||||||
func processHeadSlot(slot Slot, blockRoot string, stateRoot string, spd SlotProcessingDetails) {
|
func processHeadSlot(db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, metrics *BeaconClientMetrics, knownGapsTableIncrement int) error {
|
||||||
// Get the knownGaps at startUp
|
return processFullSlot(db, serverAddress, slot, blockRoot, stateRoot, previousSlot, previousBlockRoot, "head", metrics, knownGapsTableIncrement)
|
||||||
if spd.PreviousSlot == 0 && spd.PreviousBlockRoot == "" {
|
|
||||||
writeStartUpGaps(spd.Db, spd.KnownGapTableIncrement, slot, spd.Metrics)
|
|
||||||
}
|
|
||||||
// TODO(telackey): Why context.Background()?
|
|
||||||
err, errReason := processFullSlot(context.Background(), slot, blockRoot, stateRoot,
|
|
||||||
spd.PreviousSlot, spd.PreviousBlockRoot, spd.KnownGapTableIncrement, "head", &spd)
|
|
||||||
if err != nil {
|
|
||||||
writeKnownGaps(spd.Db, spd.KnownGapTableIncrement, slot, slot, err, errReason, spd.Metrics)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle a historic slot. A wrapper function for calling `handleFullSlot`.
|
// Handle a historic slot. A wrapper function for calling `handleFullSlot`.
|
||||||
func handleHistoricSlot(ctx context.Context, slot Slot, spd SlotProcessingDetails) (error, string) {
|
// Commented because of the linter...... LOL
|
||||||
return processFullSlot(ctx, slot, "", "", 0, "",
|
//func handleHistoricSlot(db sql.Database, serverAddress string, slot int) error {
|
||||||
1, "historic", &spd)
|
// return handleFullSlot(db, serverAddress, slot, "", "", 0, "", "historic")
|
||||||
}
|
//}
|
||||||
|
|
||||||
// Update the SszSignedBeaconBlock and FullSignedBeaconBlock object with their respective values.
|
// Update the SszSignedBeaconBlock and FullSignedBeaconBlock object with their respective values.
|
||||||
func (ps *ProcessSlot) getSignedBeaconBlock(serverAddress string) error {
|
func (ps *ProcessSlot) getSignedBeaconBlock(serverAddress string, vmCh <-chan *dt.VersionedUnmarshaler) error {
|
||||||
var blockIdentifier string // Used to query the block
|
var blockIdentifier string // Used to query the block
|
||||||
if ps.BlockRoot != "" {
|
if ps.BlockRoot != "" {
|
||||||
blockIdentifier = ps.BlockRoot
|
blockIdentifier = ps.BlockRoot
|
||||||
|
} else if ps.Slot != 0 {
|
||||||
|
blockIdentifier = strconv.Itoa(ps.Slot)
|
||||||
} else {
|
} else {
|
||||||
blockIdentifier = ps.Slot.Format()
|
log.Error(MissingIdentifiedError)
|
||||||
|
return fmt.Errorf(MissingIdentifiedError)
|
||||||
}
|
}
|
||||||
|
|
||||||
blockEndpoint := serverAddress + BcBlockQueryEndpoint + blockIdentifier
|
blockEndpoint := serverAddress + BcBlockQueryEndpoint + blockIdentifier
|
||||||
sszSignedBeaconBlock, rc, err := querySsz(blockEndpoint, ps.Slot)
|
var err error
|
||||||
|
var rc int
|
||||||
if err != nil || rc != 200 {
|
ps.SszSignedBeaconBlock, rc, err = querySsz(blockEndpoint, strconv.Itoa(ps.Slot))
|
||||||
loghelper.LogSlotError(ps.Slot.Number(), err).Error("Unable to properly query the slot.")
|
|
||||||
ps.FullSignedBeaconBlock = nil
|
|
||||||
ps.SszSignedBeaconBlock = []byte{}
|
|
||||||
ps.ParentBlockRoot = ""
|
|
||||||
ps.Status = "skipped"
|
|
||||||
|
|
||||||
// A 404 is normal in the case of a "skipped" slot.
|
|
||||||
if rc == 404 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var signedBeaconBlock SignedBeaconBlock
|
|
||||||
err = signedBeaconBlock.UnmarshalSSZ(sszSignedBeaconBlock)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(ps.Slot.Number(), err).Error("Unable to unmarshal SignedBeaconBlock for slot.")
|
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error("Unable to properly query the slot.")
|
||||||
ps.FullSignedBeaconBlock = nil
|
|
||||||
ps.SszSignedBeaconBlock = []byte{}
|
|
||||||
ps.ParentBlockRoot = ""
|
|
||||||
ps.Status = "skipped"
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ps.FullSignedBeaconBlock = &signedBeaconBlock
|
vm := <-vmCh
|
||||||
ps.SszSignedBeaconBlock = sszSignedBeaconBlock
|
if rc != 200 {
|
||||||
|
ps.FullSignedBeaconBlock = &wrapper.Phase0SignedBeaconBlock{}
|
||||||
|
ps.SszSignedBeaconBlock = []byte{}
|
||||||
|
ps.ParentBlockRoot = ""
|
||||||
|
ps.Status = "skipped"
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
ps.ParentBlockRoot = toHex(ps.FullSignedBeaconBlock.Block().ParentRoot())
|
if vm == nil {
|
||||||
|
return fmt.Errorf(VersionedUnmarshalerError)
|
||||||
|
}
|
||||||
|
|
||||||
|
ps.FullSignedBeaconBlock, err = vm.UnmarshalBeaconBlock(ps.SszSignedBeaconBlock)
|
||||||
|
if err != nil {
|
||||||
|
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error("We are getting an error message when unmarshalling the SignedBeaconBlock.")
|
||||||
|
if ps.FullSignedBeaconBlock.Block().Slot() == 0 {
|
||||||
|
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(SlotUnmarshalError("SignedBeaconBlock"))
|
||||||
|
return fmt.Errorf(SlotUnmarshalError("SignedBeaconBlock"))
|
||||||
|
} else if ps.FullSignedBeaconBlock.Block().ParentRoot() == nil {
|
||||||
|
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(ParentRootUnmarshalError)
|
||||||
|
return fmt.Errorf(ParentRootUnmarshalError)
|
||||||
|
}
|
||||||
|
log.Warn("We received a processing error: ", err)
|
||||||
|
}
|
||||||
|
ps.ParentBlockRoot = "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().ParentRoot())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the SszBeaconState and FullBeaconState object with their respective values.
|
// Update the SszBeaconState and FullBeaconState object with their respective values.
|
||||||
func (ps *ProcessSlot) getBeaconState(serverEndpoint string) error {
|
func (ps *ProcessSlot) getBeaconState(serverEndpoint string, vmCh chan<- *dt.VersionedUnmarshaler) error {
|
||||||
var stateIdentifier string // Used to query the state
|
var stateIdentifier string // Used to query the state
|
||||||
if ps.StateRoot != "" {
|
if ps.StateRoot != "" {
|
||||||
stateIdentifier = ps.StateRoot
|
stateIdentifier = ps.StateRoot
|
||||||
|
} else if ps.Slot != 0 {
|
||||||
|
stateIdentifier = strconv.Itoa(ps.Slot)
|
||||||
} else {
|
} else {
|
||||||
stateIdentifier = ps.Slot.Format()
|
log.Error(MissingIdentifiedError)
|
||||||
|
return fmt.Errorf(MissingIdentifiedError)
|
||||||
}
|
}
|
||||||
|
|
||||||
stateEndpoint := serverEndpoint + BcStateQueryEndpoint + stateIdentifier
|
stateEndpoint := serverEndpoint + BcStateQueryEndpoint + stateIdentifier
|
||||||
sszBeaconState, _, err := querySsz(stateEndpoint, ps.Slot)
|
ps.SszBeaconState, _, _ = querySsz(stateEndpoint, strconv.Itoa(ps.Slot))
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotError(ps.Slot.Number(), err).Error("Unable to properly query the BeaconState.")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var beaconState BeaconState
|
versionedUnmarshaler, err := dt.FromState(ps.SszBeaconState)
|
||||||
err = beaconState.UnmarshalSSZ(sszBeaconState)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(ps.Slot.Number(), err).Error("Unable to unmarshal the BeaconState.")
|
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(VersionedUnmarshalerError)
|
||||||
return err
|
vmCh <- nil
|
||||||
|
return fmt.Errorf(VersionedUnmarshalerError)
|
||||||
|
}
|
||||||
|
vmCh <- versionedUnmarshaler
|
||||||
|
ps.FullBeaconState, err = versionedUnmarshaler.UnmarshalBeaconState(ps.SszBeaconState)
|
||||||
|
if err != nil {
|
||||||
|
loghelper.LogError(err).Error("We are getting an error message when unmarshalling the BeaconState")
|
||||||
|
if ps.FullBeaconState.Slot() == 0 {
|
||||||
|
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(SlotUnmarshalError("BeaconState"))
|
||||||
|
return fmt.Errorf(SlotUnmarshalError("BeaconState"))
|
||||||
|
} else if hex.EncodeToString(ps.FullBeaconState.Eth1Data().BlockHash) == "" {
|
||||||
|
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(MissingEth1Data)
|
||||||
|
return fmt.Errorf(MissingEth1Data)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ps.FullBeaconState = &beaconState
|
|
||||||
ps.SszBeaconState = sszBeaconState
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check to make sure that the previous block we processed is the parent of the current block.
|
// Check to make sure that the previous block we processed is the parent of the current block.
|
||||||
func (ps *ProcessSlot) checkPreviousSlot(tx sql.Tx, ctx context.Context, previousSlot Slot, previousBlockRoot string, knownGapsTableIncrement int) {
|
func (ps *ProcessSlot) checkPreviousSlot(previousSlot int, previousBlockRoot string, knownGapsTableIncrement int) {
|
||||||
if nil == ps.FullSignedBeaconBlock {
|
parentRoot := "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().ParentRoot())
|
||||||
log.Debug("Can't check block root, no current block.")
|
if previousSlot == int(ps.FullBeaconState.Slot()) {
|
||||||
return
|
|
||||||
}
|
|
||||||
parentRoot := toHex(ps.FullSignedBeaconBlock.Block().ParentRoot())
|
|
||||||
slot := ps.Slot
|
|
||||||
if previousSlot == slot {
|
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"slot": slot,
|
"slot": ps.FullBeaconState.Slot,
|
||||||
"fork": true,
|
"fork": true,
|
||||||
}).Warn("A fork occurred! The previous slot and current slot match.")
|
}).Warn("A fork occurred! The previous slot and current slot match.")
|
||||||
transactReorgs(tx, ctx, ps.Slot, ps.BlockRoot, ps.Metrics)
|
writeReorgs(ps.Db, strconv.Itoa(ps.Slot), ps.BlockRoot, ps.Metrics)
|
||||||
} else if previousSlot > slot {
|
} else if previousSlot+1 != int(ps.FullBeaconState.Slot()) {
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"previousSlot": previousSlot,
|
"previousSlot": previousSlot,
|
||||||
"curSlot": slot,
|
"currentSlot": ps.FullBeaconState.Slot(),
|
||||||
}).Warn("We noticed the previous slot is greater than the current slot.")
|
|
||||||
} else if previousSlot+1 != slot {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"previousSlot": previousSlot,
|
|
||||||
"currentSlot": slot,
|
|
||||||
}).Error("We skipped a few slots.")
|
}).Error("We skipped a few slots.")
|
||||||
transactKnownGaps(tx, ctx, knownGapsTableIncrement, previousSlot+1, slot-1, fmt.Errorf("gaps during head processing"), "headGaps", ps.Metrics)
|
writeKnownGaps(ps.Db, knownGapsTableIncrement, previousSlot+1, int(ps.FullBeaconState.Slot())-1, fmt.Errorf("Gaps during head processing"), "headGaps", ps.Metrics)
|
||||||
} else if previousBlockRoot != parentRoot {
|
} else if previousBlockRoot != parentRoot {
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"previousBlockRoot": previousBlockRoot,
|
"previousBlockRoot": previousBlockRoot,
|
||||||
"currentBlockParent": parentRoot,
|
"currentBlockParent": parentRoot,
|
||||||
}).Error("The previousBlockRoot does not match the current blocks parent, an unprocessed fork might have occurred.")
|
}).Error("The previousBlockRoot does not match the current blocks parent, an unprocessed fork might have occurred.")
|
||||||
transactReorgs(tx, ctx, previousSlot, parentRoot, ps.Metrics)
|
writeReorgs(ps.Db, strconv.Itoa(previousSlot), parentRoot, ps.Metrics)
|
||||||
} else {
|
} else {
|
||||||
log.Debug("Previous Slot and Current Slot are one distance from each other.")
|
log.Debug("Previous Slot and Current Slot are one distance from each other.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transforms all the raw data into DB models that can be written to the DB.
|
// Transforms all the raw data into DB models that can be written to the DB.
|
||||||
func (ps *ProcessSlot) createWriteObjects() (*DatabaseWriter, error) {
|
func (ps *ProcessSlot) createWriteObjects(blockRootEndpoint string) (*DatabaseWriter, error) {
|
||||||
var status string
|
var (
|
||||||
|
stateRoot string
|
||||||
|
blockRoot string
|
||||||
|
status string
|
||||||
|
eth1BlockHash string
|
||||||
|
)
|
||||||
|
|
||||||
|
if ps.Status == "skipped" {
|
||||||
|
stateRoot = ""
|
||||||
|
blockRoot = ""
|
||||||
|
eth1BlockHash = ""
|
||||||
|
} else {
|
||||||
|
if ps.StateRoot != "" {
|
||||||
|
stateRoot = ps.StateRoot
|
||||||
|
} else {
|
||||||
|
stateRoot = "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().StateRoot())
|
||||||
|
log.Debug("StateRoot: ", stateRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ps.BlockRoot != "" {
|
||||||
|
blockRoot = ps.BlockRoot
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
blockRoot, err = queryBlockRoot(blockRootEndpoint, strconv.Itoa(ps.Slot))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
eth1BlockHash = "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().Body().Eth1Data().BlockHash)
|
||||||
|
}
|
||||||
|
|
||||||
if ps.Status != "" {
|
if ps.Status != "" {
|
||||||
status = ps.Status
|
status = ps.Status
|
||||||
} else {
|
} else {
|
||||||
status = "proposed"
|
status = "proposed"
|
||||||
}
|
}
|
||||||
|
|
||||||
parseBeaconTime := time.Now()
|
dw, err := CreateDatabaseWrite(ps.Db, ps.Slot, stateRoot, blockRoot, ps.ParentBlockRoot, eth1BlockHash, status, ps.SszSignedBeaconBlock, ps.SszBeaconState, ps.Metrics)
|
||||||
// These will normally be pre-calculated by this point.
|
|
||||||
blockRoot, stateRoot, eth1DataBlockHash, err := ps.provideFinalHash()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ps.PerformanceMetrics.ParseBeaconObjectForHash = time.Since(parseBeaconTime)
|
|
||||||
|
|
||||||
payloadHeader := ps.provideExecutionPayloadDetails()
|
|
||||||
|
|
||||||
dw, err := CreateDatabaseWrite(ps.Db, ps.Slot, stateRoot, blockRoot, ps.ParentBlockRoot, eth1DataBlockHash,
|
|
||||||
payloadHeader, status, &ps.SszSignedBeaconBlock, &ps.SszBeaconState, ps.Metrics)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dw, err
|
return dw, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return dw, nil
|
return dw, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function will return the final blockRoot, stateRoot, and eth1DataBlockHash that will be
|
|
||||||
// used to write to a DB
|
|
||||||
func (ps *ProcessSlot) provideFinalHash() (string, string, string, error) {
|
|
||||||
var (
|
|
||||||
stateRoot string
|
|
||||||
blockRoot string
|
|
||||||
eth1DataBlockHash string
|
|
||||||
)
|
|
||||||
if ps.Status == "skipped" {
|
|
||||||
stateRoot = ""
|
|
||||||
blockRoot = ""
|
|
||||||
eth1DataBlockHash = ""
|
|
||||||
} else {
|
|
||||||
if ps.StateRoot != "" {
|
|
||||||
stateRoot = ps.StateRoot
|
|
||||||
} else {
|
|
||||||
if nil != ps.FullSignedBeaconBlock {
|
|
||||||
stateRoot = toHex(ps.FullSignedBeaconBlock.Block().StateRoot())
|
|
||||||
log.Debug("BeaconBlock StateRoot: ", stateRoot)
|
|
||||||
} else {
|
|
||||||
log.Debug("BeaconBlock StateRoot: <nil beacon block>")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ps.BlockRoot != "" {
|
|
||||||
blockRoot = ps.BlockRoot
|
|
||||||
} else {
|
|
||||||
if nil != ps.FullSignedBeaconBlock {
|
|
||||||
rawBlockRoot := ps.FullSignedBeaconBlock.Block().HashTreeRoot()
|
|
||||||
blockRoot = toHex(rawBlockRoot)
|
|
||||||
log.WithFields(log.Fields{"blockRoot": blockRoot}).Debug("Block Root from ssz")
|
|
||||||
} else {
|
|
||||||
log.Debug("BeaconBlock HashTreeRoot: <nil beacon block>")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if nil != ps.FullSignedBeaconBlock {
|
|
||||||
eth1DataBlockHash = toHex(ps.FullSignedBeaconBlock.Block().Body().Eth1Data().BlockHash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return blockRoot, stateRoot, eth1DataBlockHash, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps *ProcessSlot) provideExecutionPayloadDetails() *ExecutionPayloadHeader {
|
|
||||||
if nil == ps.FullSignedBeaconBlock || !ps.FullSignedBeaconBlock.IsBellatrix() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
payload := ps.FullSignedBeaconBlock.Block().Body().ExecutionPayloadHeader()
|
|
||||||
blockNumber := uint64(payload.BlockNumber)
|
|
||||||
|
|
||||||
// The earliest blocks on the Bellatrix fork, pre-Merge, have zeroed ExecutionPayloads.
|
|
||||||
// There is nothing useful to to store in that case, even though the structure exists.
|
|
||||||
if blockNumber == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return payload
|
|
||||||
}
|
|
||||||
|
|
||||||
func toHex(r [32]byte) string {
|
|
||||||
return "0x" + hex.EncodeToString(r[:])
|
|
||||||
}
|
|
||||||
|
@ -18,16 +18,40 @@
|
|||||||
package beaconclient
|
package beaconclient
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"encoding/json"
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A helper function to query endpoints that utilize slots.
|
||||||
|
func querySsz(endpoint string, slot string) ([]byte, int, error) {
|
||||||
|
log.WithFields(log.Fields{"endpoint": endpoint}).Debug("Querying endpoint")
|
||||||
|
client := &http.Client{}
|
||||||
|
req, err := http.NewRequest("GET", endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
loghelper.LogSlotError(slot, err).Error("Unable to create a request!")
|
||||||
|
return nil, 0, fmt.Errorf("Unable to create a request!: %s", err.Error())
|
||||||
|
}
|
||||||
|
req.Header.Set("Accept", "application/octet-stream")
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
loghelper.LogSlotError(slot, err).Error("Unable to query Beacon Node!")
|
||||||
|
return nil, 0, fmt.Errorf("Unable to query Beacon Node: %s", err.Error())
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
rc := response.StatusCode
|
||||||
|
body, err := ioutil.ReadAll(response.Body)
|
||||||
|
if err != nil {
|
||||||
|
loghelper.LogSlotError(slot, err).Error("Unable to turn response into a []bytes array!")
|
||||||
|
return nil, rc, fmt.Errorf("Unable to turn response into a []bytes array!: %s", err.Error())
|
||||||
|
}
|
||||||
|
return body, rc, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Object to unmarshal the BlockRootResponse
|
// Object to unmarshal the BlockRootResponse
|
||||||
type BlockRootResponse struct {
|
type BlockRootResponse struct {
|
||||||
Data BlockRootMessage `json:"data"`
|
Data BlockRootMessage `json:"data"`
|
||||||
@ -38,36 +62,35 @@ type BlockRootMessage struct {
|
|||||||
Root string `json:"root"`
|
Root string `json:"root"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// A helper function to query endpoints that utilize slots.
|
// A function to query the blockroot for a given slot.
|
||||||
func querySsz(endpoint string, slot Slot) ([]byte, int, error) {
|
func queryBlockRoot(endpoint string, slot string) (string, error) {
|
||||||
log.WithFields(log.Fields{"endpoint": endpoint}).Debug("Querying endpoint")
|
log.WithFields(log.Fields{"endpoint": endpoint}).Debug("Querying endpoint")
|
||||||
client := &http.Client{}
|
client := &http.Client{}
|
||||||
req, err := http.NewRequest("GET", endpoint, nil)
|
req, err := http.NewRequest("GET", endpoint, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(slot.Number(), err).Error("Unable to create a request!")
|
loghelper.LogSlotError(slot, err).Error("Unable to create a request!")
|
||||||
return nil, 0, fmt.Errorf("Unable to create a request!: %s", err.Error())
|
return "", fmt.Errorf("Unable to create a request!: %s", err.Error())
|
||||||
}
|
}
|
||||||
req.Header.Set("Accept", "application/octet-stream")
|
req.Header.Set("Accept", "application/json")
|
||||||
response, err := client.Do(req)
|
response, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(slot.Number(), err).Error("Unable to query Beacon Node!")
|
loghelper.LogSlotError(slot, err).Error("Unable to query Beacon Node!")
|
||||||
return nil, 0, fmt.Errorf("Unable to query Beacon Node: %s", err.Error())
|
return "", fmt.Errorf("Unable to query Beacon Node: %s", err.Error())
|
||||||
}
|
}
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
body, err := ioutil.ReadAll(response.Body)
|
||||||
rc := response.StatusCode
|
|
||||||
// Any 2xx code is OK.
|
|
||||||
if rc < 200 || rc >= 300 {
|
|
||||||
return nil, rc, fmt.Errorf("HTTP Error: %d", rc)
|
|
||||||
}
|
|
||||||
|
|
||||||
var body bytes.Buffer
|
|
||||||
buf := bufio.NewWriter(&body)
|
|
||||||
_, err = io.Copy(buf, response.Body)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(slot.Number(), err).Error("Unable to turn response into a []bytes array!")
|
loghelper.LogSlotError(slot, err).Error("Unable to turn response into a []bytes array!")
|
||||||
return nil, rc, fmt.Errorf("Unable to turn response into a []bytes array!: %s", err.Error())
|
return "", fmt.Errorf("Unable to turn response into a []bytes array!: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return body.Bytes(), rc, nil
|
resp := BlockRootResponse{}
|
||||||
|
if err := json.Unmarshal(body, &resp); err != nil {
|
||||||
|
loghelper.LogEndpoint(endpoint).WithFields(log.Fields{
|
||||||
|
"rawMessage": string(body),
|
||||||
|
"err": err,
|
||||||
|
}).Error("Unable to unmarshal the block root")
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return resp.Data.Root, nil
|
||||||
}
|
}
|
||||||
|
@ -1,69 +0,0 @@
|
|||||||
package beaconclient_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
prodConfig = Config{
|
|
||||||
protocol: os.Getenv("bc_protocol"),
|
|
||||||
address: os.Getenv("bc_address"),
|
|
||||||
port: getEnvInt(os.Getenv("bc_port"), 5052),
|
|
||||||
dbHost: os.Getenv("db_host"),
|
|
||||||
dbPort: getEnvInt(os.Getenv("db_port"), 8076),
|
|
||||||
dbName: os.Getenv("db_name"),
|
|
||||||
dbUser: os.Getenv("db_user"),
|
|
||||||
dbPassword: os.Getenv("db_password"),
|
|
||||||
dbDriver: os.Getenv("db_driver"),
|
|
||||||
knownGapsTableIncrement: 100000000,
|
|
||||||
bcUniqueIdentifier: 100,
|
|
||||||
checkDb: false,
|
|
||||||
performBeaconBlockProcessing: true,
|
|
||||||
// As of 2022-09, generating and downloading the full BeaconState is so slow it will cause the tests to fail.
|
|
||||||
performBeaconStateProcessing: false,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// Note: These tests expect to communicate with a fully-synced Beacon node.
|
|
||||||
|
|
||||||
var _ = Describe("Systemvalidation", Label("system"), func() {
|
|
||||||
level, _ := log.ParseLevel("debug")
|
|
||||||
log.SetLevel(level)
|
|
||||||
|
|
||||||
Describe("Run the application against a running lighthouse node", func() {
|
|
||||||
Context("When we receive head messages", func() {
|
|
||||||
It("We should process the messages successfully", func() {
|
|
||||||
bc := setUpTest(prodConfig, "10000000000")
|
|
||||||
processProdHeadBlocks(bc, 3, 0, 0, 0)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
Context("When we have historical and knownGaps slots to process", Label("system-batch"), func() {
|
|
||||||
It("Should process them successfully", func() {
|
|
||||||
bc := setUpTest(prodConfig, "10000000000")
|
|
||||||
//known Gaps
|
|
||||||
BeaconNodeTester.writeEventToKnownGaps(bc, 100, 101)
|
|
||||||
BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 0, 0)
|
|
||||||
|
|
||||||
// Historical
|
|
||||||
BeaconNodeTester.writeEventToHistoricProcess(bc, 2375703, 2375703, 10)
|
|
||||||
BeaconNodeTester.runHistoricalProcess(bc, 2, 3, 0, 0, 0)
|
|
||||||
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
validatePopularBatchBlocks(bc)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// Start head tracking and wait for the expected results.
|
|
||||||
func processProdHeadBlocks(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
|
||||||
go bc.CaptureHead()
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
|
|
||||||
}
|
|
@ -20,8 +20,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ sql.Database = &DB{}
|
var _ sql.Database = &DB{}
|
||||||
@ -49,7 +49,7 @@ func SetupPostgresDb(dbHostname string, dbPort int, dbName string, dbUsername st
|
|||||||
"driver_name_provided": driverName,
|
"driver_name_provided": driverName,
|
||||||
}).Error("Can't resolve driver type")
|
}).Error("Can't resolve driver type")
|
||||||
}
|
}
|
||||||
log.Info("Using Driver: ", DbDriver)
|
log.Info("Using Driver:", DbDriver)
|
||||||
|
|
||||||
postgresConfig := Config{
|
postgresConfig := Config{
|
||||||
Hostname: dbHostname,
|
Hostname: dbHostname,
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"github.com/jackc/pgconn"
|
"github.com/jackc/pgconn"
|
||||||
"github.com/jackc/pgx/v4"
|
"github.com/jackc/pgx/v4"
|
||||||
"github.com/jackc/pgx/v4/pgxpool"
|
"github.com/jackc/pgx/v4/pgxpool"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
|
||||||
)
|
)
|
||||||
|
|
||||||
// pgxDriver driver, implements sql.Driver
|
// pgxDriver driver, implements sql.Driver
|
||||||
|
@ -23,9 +23,9 @@ import (
|
|||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
. "github.com/onsi/ginkgo/v2"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql/postgres"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/database/sql/postgres"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/testhelpers"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/testhelpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Pgx", func() {
|
var _ = Describe("Pgx", func() {
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-ethcl-indexer/pkg/loghelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
// operation is a clean up function on shutting down
|
// operation is a clean up function on shutting down
|
||||||
|
@ -27,26 +27,9 @@ func LogError(err error) *log.Entry {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// A simple herlper function to log slot and error.
|
func LogSlotError(slot string, err error) *log.Entry {
|
||||||
func LogSlotError(slot uint64, err error) *log.Entry {
|
|
||||||
return log.WithFields(log.Fields{
|
return log.WithFields(log.Fields{
|
||||||
"err": err,
|
"err": err,
|
||||||
"slot": slot,
|
"slot": slot,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func LogSlotRangeError(startSlot uint64, endSlot uint64, err error) *log.Entry {
|
|
||||||
return log.WithFields(log.Fields{
|
|
||||||
"err": err,
|
|
||||||
"startSlot": startSlot,
|
|
||||||
"endSlot": endSlot,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
func LogSlotRangeStatementError(startSlot uint64, endSlot uint64, statement string, err error) *log.Entry {
|
|
||||||
return log.WithFields(log.Fields{
|
|
||||||
"err": err,
|
|
||||||
"startSlot": startSlot,
|
|
||||||
"endSlot": endSlot,
|
|
||||||
"SqlStatement": statement,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// A simple helper function that will help wrap the reorg error messages.
|
// A simple helper function that will help wrap the reorg error messages.
|
||||||
func LogReorgError(slot uint64, latestBlockRoot string, err error) *log.Entry {
|
func LogReorgError(slot string, latestBlockRoot string, err error) *log.Entry {
|
||||||
return log.WithFields(log.Fields{
|
return log.WithFields(log.Fields{
|
||||||
"err": err,
|
"err": err,
|
||||||
"slot": slot,
|
"slot": slot,
|
||||||
@ -29,7 +29,7 @@ func LogReorgError(slot uint64, latestBlockRoot string, err error) *log.Entry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A simple helper function that will help wrap regular reorg messages.
|
// A simple helper function that will help wrap regular reorg messages.
|
||||||
func LogReorg(slot uint64, latestBlockRoot string) *log.Entry {
|
func LogReorg(slot string, latestBlockRoot string) *log.Entry {
|
||||||
return log.WithFields(log.Fields{
|
return log.WithFields(log.Fields{
|
||||||
"slot": slot,
|
"slot": slot,
|
||||||
"latestBlockRoot": latestBlockRoot,
|
"latestBlockRoot": latestBlockRoot,
|
||||||
|
Loading…
Reference in New Issue
Block a user