76: Add indexing of ExecutionPayloads (and other Merge-related updates). #73
8
.github/workflows/generic-testing.yml
vendored
8
.github/workflows/generic-testing.yml
vendored
@ -17,7 +17,7 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || '7fb664270a0ba09e2caa3095e8c91f3fdb5b38af' }}
|
stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || '7fb664270a0ba09e2caa3095e8c91f3fdb5b38af' }}
|
||||||
ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref || '3dfe416302d553f8240f6051c08a7899b0e39e12' }}
|
ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref || '6b38fe9b18f7b19a803c626b742cafdccc1a2365' }}
|
||||||
ssz-data-ref: ${{ inputs.ssz-data-ref || 'main' }}
|
ssz-data-ref: ${{ inputs.ssz-data-ref || 'main' }}
|
||||||
GOPATH: /tmp/go
|
GOPATH: /tmp/go
|
||||||
jobs:
|
jobs:
|
||||||
@ -64,7 +64,11 @@ jobs:
|
|||||||
- name: Check to make sure HEALTH file is present
|
- name: Check to make sure HEALTH file is present
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
until $(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" --env-file ./config.sh cp ipld-eth-beacon-indexer:/root/HEALTH ./HEALTH) ; do sleep 10; done
|
until $(docker compose \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-db.yml" \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-lighthouse.yml" \
|
||||||
|
--env-file ./config.sh cp ipld-eth-beacon-indexer:/root/HEALTH ./HEALTH) ; do sleep 10; done
|
||||||
cat ./HEALTH
|
cat ./HEALTH
|
||||||
if [[ "$(cat ./HEALTH)" -eq "0" ]]; then echo "Application boot successful" && (exit 0); else docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" cp ipld-eth-beacon-indexer:/root/ipld-eth-beacon-indexer.log . && cat ipld-eth-beacon-indexer.log && (exit 1); fi
|
if [[ "$(cat ./HEALTH)" -eq "0" ]]; then echo "Application boot successful" && (exit 0); else docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" cp ipld-eth-beacon-indexer:/root/ipld-eth-beacon-indexer.log . && cat ipld-eth-beacon-indexer.log && (exit 1); fi
|
||||||
|
|
||||||
|
2
.github/workflows/system-tests.yml
vendored
2
.github/workflows/system-tests.yml
vendored
@ -15,7 +15,7 @@ on:
|
|||||||
required: true
|
required: true
|
||||||
env:
|
env:
|
||||||
stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || '7fb664270a0ba09e2caa3095e8c91f3fdb5b38af' }}
|
stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || '7fb664270a0ba09e2caa3095e8c91f3fdb5b38af' }}
|
||||||
ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref || '3dfe416302d553f8240f6051c08a7899b0e39e12' }}
|
ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref || '6b38fe9b18f7b19a803c626b742cafdccc1a2365' }}
|
||||||
GOPATH: /tmp/go
|
GOPATH: /tmp/go
|
||||||
bc_protocol: "http"
|
bc_protocol: "http"
|
||||||
bc_address: ${{secrets.BC_ADDRESS}}
|
bc_address: ${{secrets.BC_ADDRESS}}
|
||||||
|
@ -13,8 +13,9 @@ RUN GCO_ENABLED=0 GOOS=linux go build -race -ldflags="-s -w" -o ipld-eth-beacon-
|
|||||||
RUN chmod +x ipld-eth-beacon-indexer
|
RUN chmod +x ipld-eth-beacon-indexer
|
||||||
|
|
||||||
FROM frolvlad/alpine-bash:latest
|
FROM frolvlad/alpine-bash:latest
|
||||||
RUN apk --no-cache add ca-certificates libstdc++
|
RUN apk --no-cache add ca-certificates libstdc++ busybox-extras
|
||||||
WORKDIR /root/
|
WORKDIR /root/
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer /root/ipld-eth-beacon-indexer
|
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-beacon-indexer/ipld-eth-beacon-indexer /root/ipld-eth-beacon-indexer
|
||||||
ADD entrypoint.sh .
|
ADD entrypoint.sh .
|
||||||
|
ADD ipld-eth-beacon-config.json .
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
ENTRYPOINT ["./entrypoint.sh"]
|
12
Makefile
12
Makefile
@ -65,7 +65,17 @@ integration-test-local-no-race:
|
|||||||
unit-test-local:
|
unit-test-local:
|
||||||
go vet ./...
|
go vet ./...
|
||||||
go fmt ./...
|
go fmt ./...
|
||||||
$(GINKGO) -r --label-filter unit \
|
$(GINKGO) -r --label-filter 'unit && !flaky' \
|
||||||
|
--randomize-all --randomize-suites \
|
||||||
|
--flake-attempts=3 \
|
||||||
|
--fail-on-pending --keep-going \
|
||||||
|
--trace
|
||||||
|
|
||||||
|
.PHONY: unit-test-local-bellatrix
|
||||||
|
unit-test-local-bellatrix:
|
||||||
|
go vet ./...
|
||||||
|
go fmt ./...
|
||||||
|
$(GINKGO) -r --label-filter 'unit && !flaky && bellatrix' \
|
||||||
--randomize-all --randomize-suites \
|
--randomize-all --randomize-suites \
|
||||||
--flake-attempts=3 \
|
--flake-attempts=3 \
|
||||||
--fail-on-pending --keep-going \
|
--fail-on-pending --keep-going \
|
||||||
|
@ -47,7 +47,8 @@ func bootApp() {
|
|||||||
|
|
||||||
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
||||||
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
||||||
viper.GetInt("kg.increment"), "boot", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"))
|
viper.GetInt("kg.increment"), "boot", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"),
|
||||||
|
viper.GetBool("bc.performBeaconBlockProcessing"), viper.GetBool("bc.performBeaconStateProcessing"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
StopApplicationPreBoot(err, Db)
|
StopApplicationPreBoot(err, Db)
|
||||||
}
|
}
|
||||||
|
10
cmd/full.go
10
cmd/full.go
@ -19,6 +19,7 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@ -34,7 +35,7 @@ import (
|
|||||||
var fullCmd = &cobra.Command{
|
var fullCmd = &cobra.Command{
|
||||||
Use: "full",
|
Use: "full",
|
||||||
Short: "Capture all components of the application (head and historical)",
|
Short: "Capture all components of the application (head and historical)",
|
||||||
Long: `Capture all components of the application (head and historical`,
|
Long: `Capture all components of the application (head and historical)`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
startFullProcessing()
|
startFullProcessing()
|
||||||
},
|
},
|
||||||
@ -62,7 +63,8 @@ func startFullProcessing() {
|
|||||||
|
|
||||||
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
||||||
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
||||||
viper.GetInt("kg.increment"), "head", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"))
|
viper.GetInt("kg.increment"), "head", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"),
|
||||||
|
viper.GetBool("bc.performBeaconBlockProcessing"), viper.GetBool("bc.performBeaconStateProcessing"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
StopApplicationPreBoot(err, Db)
|
StopApplicationPreBoot(err, Db)
|
||||||
}
|
}
|
||||||
@ -80,7 +82,7 @@ func startFullProcessing() {
|
|||||||
|
|
||||||
errG, _ := errgroup.WithContext(context.Background())
|
errG, _ := errgroup.WithContext(context.Background())
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"))
|
errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"), beaconclient.Slot(viper.GetUint64("bc.minimumSlot")))
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
|
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
|
||||||
@ -94,7 +96,7 @@ func startFullProcessing() {
|
|||||||
go func() {
|
go func() {
|
||||||
errG := new(errgroup.Group)
|
errG := new(errgroup.Group)
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"))
|
errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"), beaconclient.Slot(viper.GetUint64("kg.minimumSlot")))
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
||||||
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
||||||
|
@ -19,6 +19,7 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@ -50,7 +51,8 @@ func startHeadTracking() {
|
|||||||
|
|
||||||
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
||||||
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
||||||
viper.GetInt("kg.increment"), "head", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"))
|
viper.GetInt("kg.increment"), "head", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"),
|
||||||
|
viper.GetBool("bc.performBeaconBlockProcessing"), viper.GetBool("bc.performBeaconStateProcessing"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
StopApplicationPreBoot(err, Db)
|
StopApplicationPreBoot(err, Db)
|
||||||
}
|
}
|
||||||
@ -68,7 +70,7 @@ func startHeadTracking() {
|
|||||||
go func() {
|
go func() {
|
||||||
errG := new(errgroup.Group)
|
errG := new(errgroup.Group)
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"))
|
errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"), beaconclient.Slot(viper.GetUint64("kg.minimumSlot")))
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
||||||
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
||||||
|
@ -19,6 +19,7 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@ -50,7 +51,8 @@ func startHistoricProcessing() {
|
|||||||
|
|
||||||
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"),
|
||||||
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
||||||
viper.GetInt("kg.increment"), "historic", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"))
|
viper.GetInt("kg.increment"), "historic", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"), viper.GetBool("bc.checkDb"),
|
||||||
|
viper.GetBool("bc.performBeaconBlockProcessing"), viper.GetBool("bc.performBeaconStateProcessing"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
StopApplicationPreBoot(err, Db)
|
StopApplicationPreBoot(err, Db)
|
||||||
}
|
}
|
||||||
@ -64,7 +66,7 @@ func startHistoricProcessing() {
|
|||||||
|
|
||||||
errG, _ := errgroup.WithContext(context.Background())
|
errG, _ := errgroup.WithContext(context.Background())
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"))
|
errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"), beaconclient.Slot(viper.GetUint64("bc.minimumSlot")))
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
|
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
|
||||||
@ -79,7 +81,7 @@ func startHistoricProcessing() {
|
|||||||
go func() {
|
go func() {
|
||||||
errG := new(errgroup.Group)
|
errG := new(errgroup.Group)
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
errs := Bc.ProcessKnownGaps(kgContext, viper.GetInt("kg.maxKnownGapsWorker"))
|
errs := Bc.ProcessKnownGaps(kgContext, viper.GetInt("kg.maxKnownGapsWorker"), beaconclient.Slot(viper.GetUint64("kg.minimumSlot")))
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
||||||
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
||||||
|
@ -1,40 +0,0 @@
|
|||||||
{
|
|
||||||
"db": {
|
|
||||||
"address": "localhost",
|
|
||||||
"password": "password",
|
|
||||||
"port": 8076,
|
|
||||||
"username": "vdbm",
|
|
||||||
"name": "vulcanize_testing",
|
|
||||||
"driver": "PGX"
|
|
||||||
},
|
|
||||||
"bc": {
|
|
||||||
"address": "localhost",
|
|
||||||
"port": 5052,
|
|
||||||
"type": "lighthouse",
|
|
||||||
"bootRetryInterval": 30,
|
|
||||||
"bootMaxRetry": 5,
|
|
||||||
"maxHistoricProcessWorker": 2,
|
|
||||||
"connectionProtocol": "http",
|
|
||||||
"uniqueNodeIdentifier": 100,
|
|
||||||
"checkDb": true
|
|
||||||
},
|
|
||||||
"t": {
|
|
||||||
"skipSync": true
|
|
||||||
},
|
|
||||||
"log": {
|
|
||||||
"level": "debug",
|
|
||||||
"output": true,
|
|
||||||
"file": "./ipld-eth-beacon-indexer.log",
|
|
||||||
"format": "json"
|
|
||||||
},
|
|
||||||
"kg": {
|
|
||||||
"increment": 10000,
|
|
||||||
"processKnownGaps": true,
|
|
||||||
"maxKnownGapsWorker": 2
|
|
||||||
},
|
|
||||||
"pm": {
|
|
||||||
"address": "localhost",
|
|
||||||
"port": 9000,
|
|
||||||
"metrics": true
|
|
||||||
}
|
|
||||||
}
|
|
1
config/example.ipld-eth-beacon-indexer-config.json
Symbolic link
1
config/example.ipld-eth-beacon-indexer-config.json
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../ipld-eth-beacon-config.json
|
40
config/tel.ipld-eth-beacon-indexer-config.json
Normal file
40
config/tel.ipld-eth-beacon-indexer-config.json
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
{
|
||||||
|
"db": {
|
||||||
|
"address": "localhost",
|
||||||
|
"password": "secret12",
|
||||||
|
"port": 45432,
|
||||||
|
"username": "postgres",
|
||||||
|
"name": "postgres",
|
||||||
|
"driver": "PGX"
|
||||||
|
},
|
||||||
|
"bc": {
|
||||||
|
"address": "localhost",
|
||||||
|
"port": 8001,
|
||||||
|
"type": "lighthouse",
|
||||||
|
"bootRetryInterval": 30,
|
||||||
|
"bootMaxRetry": 5,
|
||||||
|
"maxHistoricProcessWorker": 2,
|
||||||
|
"connectionProtocol": "http",
|
||||||
|
"uniqueNodeIdentifier": 100,
|
||||||
|
"checkDb": true
|
||||||
|
},
|
||||||
|
"t": {
|
||||||
|
"skipSync": true
|
||||||
|
},
|
||||||
|
"log": {
|
||||||
|
"level": "debug",
|
||||||
|
"output": true,
|
||||||
|
"file": "./ipld-eth-beacon-indexer.log",
|
||||||
|
"format": "json"
|
||||||
|
},
|
||||||
|
"kg": {
|
||||||
|
"increment": 10000,
|
||||||
|
"processKnownGaps": true,
|
||||||
|
"maxKnownGapsWorker": 2
|
||||||
|
},
|
||||||
|
"pm": {
|
||||||
|
"address": "localhost",
|
||||||
|
"port": 9000,
|
||||||
|
"metrics": true
|
||||||
|
}
|
||||||
|
}
|
@ -20,5 +20,6 @@ if [ ${CAPTURE_MODE} == "boot" ]; then
|
|||||||
|
|
||||||
tail -f /dev/null
|
tail -f /dev/null
|
||||||
else
|
else
|
||||||
exec /root/ipld-eth-beacon-indexer capture ${CAPTURE_MODE} --config /root/ipld-eth-beacon-config.json > /root/ipld-eth-beacon-indexer.output
|
exec /root/ipld-eth-beacon-indexer capture ${CAPTURE_MODE} --config /root/ipld-eth-beacon-config.json > /dev/null &
|
||||||
|
tail -F ipld-eth-beacon-indexer.log
|
||||||
fi
|
fi
|
100
go.mod
100
go.mod
@ -5,106 +5,94 @@ go 1.18
|
|||||||
require (
|
require (
|
||||||
github.com/ipfs/go-ipfs-blockstore v1.2.0
|
github.com/ipfs/go-ipfs-blockstore v1.2.0
|
||||||
github.com/ipfs/go-ipfs-ds-help v1.1.0
|
github.com/ipfs/go-ipfs-ds-help v1.1.0
|
||||||
github.com/jackc/pgconn v1.12.0
|
github.com/jackc/pgconn v1.13.0
|
||||||
github.com/multiformats/go-multihash v0.1.0
|
github.com/multiformats/go-multihash v0.2.1
|
||||||
github.com/onsi/ginkgo/v2 v2.1.4
|
github.com/onsi/ginkgo/v2 v2.1.4
|
||||||
github.com/onsi/gomega v1.19.0
|
github.com/onsi/gomega v1.19.0
|
||||||
github.com/prometheus/client_golang v1.12.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc
|
github.com/prometheus/client_golang v1.13.0
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/protolambda/zrnt v0.28.0
|
||||||
|
github.com/protolambda/ztyp v0.2.2
|
||||||
|
github.com/r3labs/sse/v2 v2.8.1
|
||||||
|
github.com/sirupsen/logrus v1.9.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
|
||||||
github.com/ethereum/go-ethereum v1.10.17 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.0 // indirect
|
|
||||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||||
github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e // indirect
|
github.com/holiman/uint256 v1.2.0 // indirect
|
||||||
github.com/ipfs/bbloom v0.0.4 // indirect
|
github.com/ipfs/bbloom v0.0.4 // indirect
|
||||||
github.com/ipfs/go-block-format v0.0.3 // indirect
|
github.com/ipfs/go-block-format v0.0.3 // indirect
|
||||||
github.com/ipfs/go-cid v0.1.0 // indirect
|
github.com/ipfs/go-cid v0.3.2 // indirect
|
||||||
github.com/ipfs/go-datastore v0.5.0 // indirect
|
github.com/ipfs/go-datastore v0.6.0 // indirect
|
||||||
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
||||||
github.com/ipfs/go-ipld-format v0.3.0 // indirect
|
github.com/ipfs/go-ipld-format v0.4.0 // indirect
|
||||||
github.com/ipfs/go-log v1.0.5 // indirect
|
github.com/ipfs/go-log v1.0.5 // indirect
|
||||||
github.com/ipfs/go-log/v2 v2.5.0 // indirect
|
github.com/ipfs/go-log/v2 v2.5.1 // indirect
|
||||||
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||||
github.com/jackc/pgio v1.0.0 // indirect
|
github.com/jackc/pgio v1.0.0 // indirect
|
||||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
github.com/jackc/pgproto3/v2 v2.3.0 // indirect
|
github.com/jackc/pgproto3/v2 v2.3.1 // indirect
|
||||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||||
github.com/jackc/pgtype v1.11.0 // indirect
|
github.com/jackc/pgtype v1.12.0 // indirect
|
||||||
github.com/jackc/puddle v1.2.1 // indirect
|
github.com/jackc/puddle v1.3.0 // indirect
|
||||||
github.com/jbenet/goprocess v0.1.4 // indirect
|
github.com/jbenet/goprocess v0.1.4 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.0.12 // indirect
|
github.com/kilic/bls12-381 v0.1.0 // indirect
|
||||||
|
github.com/klauspost/cpuid/v2 v2.1.1 // indirect
|
||||||
github.com/lib/pq v1.10.5 // indirect
|
github.com/lib/pq v1.10.5 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
|
||||||
github.com/minio/highwayhash v1.0.1 // indirect
|
|
||||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/multiformats/go-base32 v0.0.4 // indirect
|
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||||
github.com/multiformats/go-base36 v0.1.0 // indirect
|
github.com/multiformats/go-base36 v0.1.0 // indirect
|
||||||
github.com/multiformats/go-multibase v0.0.3 // indirect
|
github.com/multiformats/go-multibase v0.1.1 // indirect
|
||||||
github.com/multiformats/go-varint v0.0.6 // indirect
|
github.com/multiformats/go-varint v0.0.6 // indirect
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 // indirect
|
github.com/protolambda/bls12-381-util v0.0.0-20210720105258-a772f2aac13e // indirect
|
||||||
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220303211031-f753e083138c // indirect
|
github.com/rogpeppe/go-internal v1.8.0 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
github.com/supranational/blst v0.3.5 // indirect
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e // indirect
|
|
||||||
github.com/urfave/cli/v2 v2.3.0 // indirect
|
|
||||||
go.opencensus.io v0.23.0 // indirect
|
|
||||||
go.uber.org/atomic v1.9.0 // indirect
|
|
||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.uber.org/multierr v1.8.0 // indirect
|
||||||
go.uber.org/zap v1.21.0 // indirect
|
go.uber.org/zap v1.23.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f // indirect
|
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
|
||||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect
|
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
google.golang.org/grpc v1.46.0 // indirect
|
|
||||||
google.golang.org/protobuf v1.28.0 // indirect
|
|
||||||
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
|
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
|
||||||
lukechampine.com/blake3 v1.1.7 // indirect
|
lukechampine.com/blake3 v1.1.7 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/ferranbt/fastssz v0.0.0-20220303160658-88bb965b6747 // indirect
|
|
||||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||||
github.com/georgysavva/scany v0.3.0
|
github.com/georgysavva/scany v1.2.0
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||||
github.com/jackc/pgx/v4 v4.16.0
|
github.com/jackc/pgx/v4 v4.17.2
|
||||||
github.com/jarcoal/httpmock v1.2.0
|
github.com/jarcoal/httpmock v1.2.0
|
||||||
github.com/magiconair/properties v1.8.6 // indirect
|
github.com/magiconair/properties v1.8.6 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.0 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||||
github.com/prysmaticlabs/prysm v1.4.2-0.20220504145118-df695346a53c
|
github.com/spf13/afero v1.9.2 // indirect
|
||||||
github.com/spf13/afero v1.8.2 // indirect
|
github.com/spf13/cast v1.5.0 // indirect
|
||||||
github.com/spf13/cast v1.4.1 // indirect
|
github.com/spf13/cobra v1.5.0
|
||||||
github.com/spf13/cobra v1.4.0
|
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/spf13/viper v1.11.0
|
github.com/spf13/viper v1.13.0
|
||||||
github.com/subosito/gotenv v1.2.0 // indirect
|
github.com/subosito/gotenv v1.4.1 // indirect
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/sync v0.0.0-20220907140024-f12130a52804
|
||||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect
|
golang.org/x/sys v0.0.0-20220907062415-87db552b00fd // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
@ -42,11 +42,11 @@ var (
|
|||||||
//
|
//
|
||||||
// 3. Make sure the node is synced, unless disregardSync is true.
|
// 3. Make sure the node is synced, unless disregardSync is true.
|
||||||
func BootApplication(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string,
|
func BootApplication(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string,
|
||||||
bcAddress string, bcPort int, bcConnectionProtocol string, bcKgTableIncrement int, disregardSync bool, uniqueNodeIdentifier int, checkDb bool) (*beaconclient.BeaconClient, sql.Database, error) {
|
bcAddress string, bcPort int, bcConnectionProtocol string, bcKgTableIncrement int, disregardSync bool, uniqueNodeIdentifier int, checkDb bool, performBeaconBlockProcessing bool, performBeaconStateProcessing bool) (*beaconclient.BeaconClient, sql.Database, error) {
|
||||||
log.Info("Booting the Application")
|
log.Info("Booting the Application")
|
||||||
|
|
||||||
log.Debug("Creating the Beacon Client")
|
log.Debug("Creating the Beacon Client")
|
||||||
Bc, err := beaconclient.CreateBeaconClient(ctx, bcConnectionProtocol, bcAddress, bcPort, bcKgTableIncrement, uniqueNodeIdentifier, checkDb)
|
Bc, err := beaconclient.CreateBeaconClient(ctx, bcConnectionProtocol, bcAddress, bcPort, bcKgTableIncrement, uniqueNodeIdentifier, checkDb, performBeaconBlockProcessing, performBeaconStateProcessing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Bc, nil, err
|
return Bc, nil, err
|
||||||
}
|
}
|
||||||
@ -86,14 +86,15 @@ func BootApplication(ctx context.Context, dbHostname string, dbPort int, dbName
|
|||||||
// Add retry logic to ensure that we are give the Beacon Client and the DB time to start.
|
// Add retry logic to ensure that we are give the Beacon Client and the DB time to start.
|
||||||
func BootApplicationWithRetry(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string,
|
func BootApplicationWithRetry(ctx context.Context, dbHostname string, dbPort int, dbName string, dbUsername string, dbPassword string, driverName string,
|
||||||
bcAddress string, bcPort int, bcConnectionProtocol string, bcType string, bcRetryInterval int, bcMaxRetry int, bcKgTableIncrement int,
|
bcAddress string, bcPort int, bcConnectionProtocol string, bcType string, bcRetryInterval int, bcMaxRetry int, bcKgTableIncrement int,
|
||||||
startUpMode string, disregardSync bool, uniqueNodeIdentifier int, checkDb bool) (*beaconclient.BeaconClient, sql.Database, error) {
|
startUpMode string, disregardSync bool, uniqueNodeIdentifier int, checkDb bool, performBeaconBlockProcessing bool, performBeaconStateProcessing bool) (*beaconclient.BeaconClient, sql.Database, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if bcMaxRetry < 0 {
|
if bcMaxRetry < 0 {
|
||||||
i := 0
|
i := 0
|
||||||
for {
|
for {
|
||||||
BC, DB, err = BootApplication(ctx, dbHostname, dbPort, dbName, dbUsername, dbPassword, driverName,
|
BC, DB, err = BootApplication(ctx, dbHostname, dbPort, dbName, dbUsername, dbPassword, driverName,
|
||||||
bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, disregardSync, uniqueNodeIdentifier, checkDb)
|
bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, disregardSync, uniqueNodeIdentifier, checkDb,
|
||||||
|
performBeaconBlockProcessing, performBeaconStateProcessing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"retryNumber": i,
|
"retryNumber": i,
|
||||||
@ -108,7 +109,8 @@ func BootApplicationWithRetry(ctx context.Context, dbHostname string, dbPort int
|
|||||||
} else {
|
} else {
|
||||||
for i := 0; i < bcMaxRetry; i++ {
|
for i := 0; i < bcMaxRetry; i++ {
|
||||||
BC, DB, err = BootApplication(ctx, dbHostname, dbPort, dbName, dbUsername, dbPassword, driverName,
|
BC, DB, err = BootApplication(ctx, dbHostname, dbPort, dbName, dbUsername, dbPassword, driverName,
|
||||||
bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, disregardSync, uniqueNodeIdentifier, checkDb)
|
bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, disregardSync, uniqueNodeIdentifier, checkDb,
|
||||||
|
performBeaconBlockProcessing, performBeaconStateProcessing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"retryNumber": i,
|
"retryNumber": i,
|
||||||
|
@ -40,51 +40,53 @@ var _ = Describe("Boot", func() {
|
|||||||
bcKgTableIncrement int = 10
|
bcKgTableIncrement int = 10
|
||||||
bcUniqueIdentifier int = 100
|
bcUniqueIdentifier int = 100
|
||||||
bcCheckDb bool = false
|
bcCheckDb bool = false
|
||||||
|
bcProcessBeaconBlocks bool = true
|
||||||
|
bcProcessBeaconState bool = true
|
||||||
)
|
)
|
||||||
Describe("Booting the application", Label("integration"), func() {
|
Describe("Booting the application", Label("integration"), func() {
|
||||||
Context("When the DB and BC are both up and running, we skip checking for a synced head, and we are processing head", func() {
|
Context("When the DB and BC are both up and running, we skip checking for a synced head, and we are processing head", func() {
|
||||||
It("Should connect successfully", func() {
|
It("Should connect successfully", func() {
|
||||||
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", true, bcUniqueIdentifier, bcCheckDb)
|
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When the DB and BC are both up and running, we skip checking for a synced head, and we are processing historic ", func() {
|
Context("When the DB and BC are both up and running, we skip checking for a synced head, and we are processing historic ", func() {
|
||||||
It("Should connect successfully", func() {
|
It("Should connect successfully", func() {
|
||||||
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "historic", true, bcUniqueIdentifier, bcCheckDb)
|
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "historic", true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When the DB and BC are both up and running, and we check for a synced head", func() {
|
Context("When the DB and BC are both up and running, and we check for a synced head", func() {
|
||||||
It("Should not connect successfully", func() {
|
It("Should not connect successfully", func() {
|
||||||
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", false, bcUniqueIdentifier, bcCheckDb)
|
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", false, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When the DB and BC are both up and running, we skip checking for a synced head, but the unique identifier is 0", func() {
|
Context("When the DB and BC are both up and running, we skip checking for a synced head, but the unique identifier is 0", func() {
|
||||||
It("Should not connect successfully", func() {
|
It("Should not connect successfully", func() {
|
||||||
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", false, 0, bcCheckDb)
|
_, db, err := boot.BootApplicationWithRetry(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", false, 0, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When the DB is running but not the BC", func() {
|
Context("When the DB is running but not the BC", func() {
|
||||||
It("Should not connect successfully", func() {
|
It("Should not connect successfully", func() {
|
||||||
_, _, err := boot.BootApplication(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb)
|
_, _, err := boot.BootApplication(context.Background(), dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When the BC is running but not the DB", func() {
|
Context("When the BC is running but not the DB", func() {
|
||||||
It("Should not connect successfully", func() {
|
It("Should not connect successfully", func() {
|
||||||
_, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb)
|
_, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When neither the BC or DB are running", func() {
|
Context("When neither the BC or DB are running", func() {
|
||||||
It("Should not connect successfully", func() {
|
It("Should not connect successfully", func() {
|
||||||
_, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb)
|
_, _, err := boot.BootApplication(context.Background(), "hi", 10, dbName, dbUsername, dbPassword, dbDriver, "hi", 100, bcConnectionProtocol, bcKgTableIncrement, true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
. "github.com/onsi/ginkgo/v2"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"github.com/r3labs/sse"
|
"github.com/r3labs/sse/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
|
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/boot"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
|
"github.com/vulcanize/ipld-eth-beacon-indexer/internal/shutdown"
|
||||||
@ -51,6 +51,8 @@ var (
|
|||||||
bcKgTableIncrement int = 10
|
bcKgTableIncrement int = 10
|
||||||
bcUniqueIdentifier int = 100
|
bcUniqueIdentifier int = 100
|
||||||
bcCheckDb bool = false
|
bcCheckDb bool = false
|
||||||
|
bcProcessBeaconBlocks bool = true
|
||||||
|
bcProcessBeaconState bool = true
|
||||||
maxWaitSecondsShutdown time.Duration = time.Duration(1) * time.Second
|
maxWaitSecondsShutdown time.Duration = time.Duration(1) * time.Second
|
||||||
DB sql.Database
|
DB sql.Database
|
||||||
BC *beaconclient.BeaconClient
|
BC *beaconclient.BeaconClient
|
||||||
@ -63,7 +65,7 @@ var _ = Describe("Shutdown", func() {
|
|||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
ctx = context.Background()
|
ctx = context.Background()
|
||||||
BC, DB, err = boot.BootApplicationWithRetry(ctx, dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress,
|
BC, DB, err = boot.BootApplicationWithRetry(ctx, dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress,
|
||||||
bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", true, bcUniqueIdentifier, bcCheckDb)
|
bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", true, bcUniqueIdentifier, bcCheckDb, bcProcessBeaconBlocks, bcProcessBeaconState)
|
||||||
notifierCh = make(chan os.Signal, 1)
|
notifierCh = make(chan os.Signal, 1)
|
||||||
Expect(err).To(BeNil())
|
Expect(err).To(BeNil())
|
||||||
})
|
})
|
||||||
|
44
ipld-eth-beacon-config.json
Normal file
44
ipld-eth-beacon-config.json
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
{
|
||||||
|
"db": {
|
||||||
|
"address": "localhost",
|
||||||
|
"password": "password",
|
||||||
|
"port": 5432,
|
||||||
|
"username": "vdbm",
|
||||||
|
"name": "vulcanize_db",
|
||||||
|
"driver": "PGX"
|
||||||
|
},
|
||||||
|
"bc": {
|
||||||
|
"address": "localhost",
|
||||||
|
"port": 5052,
|
||||||
|
"type": "lighthouse",
|
||||||
|
"bootRetryInterval": 30,
|
||||||
|
"bootMaxRetry": 5,
|
||||||
|
"maxHistoricProcessWorker": 2,
|
||||||
|
"connectionProtocol": "http",
|
||||||
|
"uniqueNodeIdentifier": 100,
|
||||||
|
"checkDb": true,
|
||||||
|
"performBeaconStateProcessing": false,
|
||||||
|
"performBeaconBlockProcessing": true,
|
||||||
|
"minimumSlot": 4700013
|
||||||
|
},
|
||||||
|
"t": {
|
||||||
|
"skipSync": true
|
||||||
|
},
|
||||||
|
"log": {
|
||||||
|
"level": "debug",
|
||||||
|
"output": true,
|
||||||
|
"file": "./ipld-eth-beacon-indexer.log",
|
||||||
|
"format": "json"
|
||||||
|
},
|
||||||
|
"kg": {
|
||||||
|
"increment": 10000,
|
||||||
|
"processKnownGaps": true,
|
||||||
|
"maxKnownGapsWorker": 2,
|
||||||
|
"minimumSlot": 4700013
|
||||||
|
},
|
||||||
|
"pm": {
|
||||||
|
"address": "localhost",
|
||||||
|
"port": 9000,
|
||||||
|
"metrics": true
|
||||||
|
}
|
||||||
|
}
|
@ -18,11 +18,11 @@ package beaconclient
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"github.com/r3labs/sse/v2"
|
||||||
|
|
||||||
"github.com/r3labs/sse"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: Use prysms config values instead of hardcoding them here.
|
// TODO: Use prysms config values instead of hardcoding them here.
|
||||||
@ -37,7 +37,7 @@ var (
|
|||||||
BcBlockRootEndpoint = func(slot string) string {
|
BcBlockRootEndpoint = func(slot string) string {
|
||||||
return "/eth/v1/beacon/blocks/" + slot + "/root"
|
return "/eth/v1/beacon/blocks/" + slot + "/root"
|
||||||
}
|
}
|
||||||
bcSlotsPerEpoch = 32 // Number of slots in a single Epoch
|
bcSlotsPerEpoch uint64 = 32 // Number of slots in a single Epoch
|
||||||
//bcSlotPerHistoricalVector = 8192 // The number of slots in a historic vector.
|
//bcSlotPerHistoricalVector = 8192 // The number of slots in a historic vector.
|
||||||
//bcFinalizedTopicEndpoint = "/eth/v1/events?topics=finalized_checkpoint" // Endpoint used to subscribe to the head of the chain
|
//bcFinalizedTopicEndpoint = "/eth/v1/events?topics=finalized_checkpoint" // Endpoint used to subscribe to the head of the chain
|
||||||
)
|
)
|
||||||
@ -52,12 +52,14 @@ type BeaconClient struct {
|
|||||||
UniqueNodeIdentifier int // The unique identifier within the cluster of this individual node.
|
UniqueNodeIdentifier int // The unique identifier within the cluster of this individual node.
|
||||||
KnownGapsProcess KnownGapsProcessing // object keeping track of knowngaps processing
|
KnownGapsProcess KnownGapsProcessing // object keeping track of knowngaps processing
|
||||||
CheckDb bool // Should we check the DB to see if the slot exists before processing it?
|
CheckDb bool // Should we check the DB to see if the slot exists before processing it?
|
||||||
|
PerformBeaconStateProcessing bool // Should we process BeaconStates?
|
||||||
|
PerformBeaconBlockProcessing bool // Should we process BeaconBlocks?
|
||||||
|
|
||||||
// Used for Head Tracking
|
// Used for Head Tracking
|
||||||
|
|
||||||
PerformHeadTracking bool // Should we track head?
|
PerformHeadTracking bool // Should we track head?
|
||||||
StartingSlot int // If we're performing head tracking. What is the first slot we processed.
|
StartingSlot Slot // If we're performing head tracking. What is the first slot we processed.
|
||||||
PreviousSlot int // Whats the previous slot we processed
|
PreviousSlot Slot // Whats the previous slot we processed
|
||||||
PreviousBlockRoot string // Whats the previous block root, used to check the next blocks parent.
|
PreviousBlockRoot string // Whats the previous block root, used to check the next blocks parent.
|
||||||
HeadTracking *SseEvents[Head] // Track the head block
|
HeadTracking *SseEvents[Head] // Track the head block
|
||||||
ReOrgTracking *SseEvents[ChainReorg] // Track all Reorgs
|
ReOrgTracking *SseEvents[ChainReorg] // Track all Reorgs
|
||||||
@ -78,7 +80,7 @@ type SseEvents[P ProcessedEvents] struct {
|
|||||||
MessagesCh chan *sse.Event // Contains all the messages from the SSE Channel
|
MessagesCh chan *sse.Event // Contains all the messages from the SSE Channel
|
||||||
ErrorCh chan *SseError // Contains any errors while SSE streaming occurred
|
ErrorCh chan *SseError // Contains any errors while SSE streaming occurred
|
||||||
ProcessCh chan *P // Used to capture processed data in its proper struct.
|
ProcessCh chan *P // Used to capture processed data in its proper struct.
|
||||||
SseClient *sse.Client // sse.Client object that is used to interact with the SSE stream
|
sseClient *sse.Client // sse.Client object that is used to interact with the SSE stream
|
||||||
}
|
}
|
||||||
|
|
||||||
// An object to capture any errors when turning an SSE message to JSON.
|
// An object to capture any errors when turning an SSE message to JSON.
|
||||||
@ -88,7 +90,8 @@ type SseError struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A Function to create the BeaconClient.
|
// A Function to create the BeaconClient.
|
||||||
func CreateBeaconClient(ctx context.Context, connectionProtocol string, bcAddress string, bcPort int, bcKgTableIncrement int, uniqueNodeIdentifier int, checkDb bool) (*BeaconClient, error) {
|
func CreateBeaconClient(ctx context.Context, connectionProtocol string, bcAddress string, bcPort int,
|
||||||
|
bcKgTableIncrement int, uniqueNodeIdentifier int, checkDb bool, performBeaconBlockProcessing bool, performBeaconStateProcessing bool) (*BeaconClient, error) {
|
||||||
if uniqueNodeIdentifier == 0 {
|
if uniqueNodeIdentifier == 0 {
|
||||||
uniqueNodeIdentifier := rand.Int()
|
uniqueNodeIdentifier := rand.Int()
|
||||||
log.WithField("randomUniqueNodeIdentifier", uniqueNodeIdentifier).Warn("No uniqueNodeIdentifier provided, we are going to use a randomly generated one.")
|
log.WithField("randomUniqueNodeIdentifier", uniqueNodeIdentifier).Warn("No uniqueNodeIdentifier provided, we are going to use a randomly generated one.")
|
||||||
@ -110,6 +113,8 @@ func CreateBeaconClient(ctx context.Context, connectionProtocol string, bcAddres
|
|||||||
Metrics: metrics,
|
Metrics: metrics,
|
||||||
UniqueNodeIdentifier: uniqueNodeIdentifier,
|
UniqueNodeIdentifier: uniqueNodeIdentifier,
|
||||||
CheckDb: checkDb,
|
CheckDb: checkDb,
|
||||||
|
PerformBeaconBlockProcessing: performBeaconBlockProcessing,
|
||||||
|
PerformBeaconStateProcessing: performBeaconStateProcessing,
|
||||||
//FinalizationTracking: createSseEvent[FinalizedCheckpoint](endpoint, bcFinalizedTopicEndpoint),
|
//FinalizationTracking: createSseEvent[FinalizedCheckpoint](endpoint, bcFinalizedTopicEndpoint),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -122,10 +127,40 @@ func createSseEvent[P ProcessedEvents](baseEndpoint string, path string) *SseEve
|
|||||||
MessagesCh: make(chan *sse.Event, 1),
|
MessagesCh: make(chan *sse.Event, 1),
|
||||||
ErrorCh: make(chan *SseError),
|
ErrorCh: make(chan *SseError),
|
||||||
ProcessCh: make(chan *P),
|
ProcessCh: make(chan *P),
|
||||||
SseClient: func(endpoint string) *sse.Client {
|
|
||||||
log.WithFields(log.Fields{"endpoint": endpoint}).Info("Creating SSE client")
|
|
||||||
return sse.NewClient(endpoint)
|
|
||||||
}(endpoint),
|
|
||||||
}
|
}
|
||||||
return sseEvents
|
return sseEvents
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (se *SseEvents[P]) Connect() error {
|
||||||
|
if nil == se.sseClient {
|
||||||
|
se.initClient()
|
||||||
|
}
|
||||||
|
return se.sseClient.SubscribeChanRaw(se.MessagesCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (se *SseEvents[P]) Disconnect() {
|
||||||
|
if nil == se.sseClient {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{"endpoint": se.Endpoint}).Info("Disconnecting and destroying SSE client")
|
||||||
|
se.sseClient.Unsubscribe(se.MessagesCh)
|
||||||
|
se.sseClient.Connection.CloseIdleConnections()
|
||||||
|
se.sseClient = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (se *SseEvents[P]) initClient() {
|
||||||
|
if nil != se.sseClient {
|
||||||
|
se.Disconnect()
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{"endpoint": se.Endpoint}).Info("Creating SSE client")
|
||||||
|
client := sse.NewClient(se.Endpoint)
|
||||||
|
client.ReconnectNotify = func(err error, duration time.Duration) {
|
||||||
|
log.WithFields(log.Fields{"endpoint": se.Endpoint}).Debug("Reconnecting SSE client")
|
||||||
|
}
|
||||||
|
client.OnDisconnect(func(c *sse.Client) {
|
||||||
|
log.WithFields(log.Fields{"endpoint": se.Endpoint}).Debug("SSE client disconnected")
|
||||||
|
})
|
||||||
|
se.sseClient = client
|
||||||
|
}
|
||||||
|
@ -50,7 +50,7 @@ func (bc *BeaconClient) StopHeadTracking() error {
|
|||||||
// This function closes the SSE subscription, but waits until the MessagesCh is empty
|
// This function closes the SSE subscription, but waits until the MessagesCh is empty
|
||||||
func (se *SseEvents[ProcessedEvents]) finishProcessingChannel(finish chan<- bool) {
|
func (se *SseEvents[ProcessedEvents]) finishProcessingChannel(finish chan<- bool) {
|
||||||
loghelper.LogEndpoint(se.Endpoint).Info("Received a close event.")
|
loghelper.LogEndpoint(se.Endpoint).Info("Received a close event.")
|
||||||
se.SseClient.Unsubscribe(se.MessagesCh)
|
se.Disconnect()
|
||||||
for len(se.MessagesCh) != 0 || len(se.ProcessCh) != 0 {
|
for len(se.MessagesCh) != 0 || len(se.ProcessCh) != 0 {
|
||||||
time.Sleep(time.Duration(shutdownWaitInterval) * time.Millisecond)
|
time.Sleep(time.Duration(shutdownWaitInterval) * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/protolambda/zrnt/eth2/beacon/common"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -29,12 +30,7 @@ import (
|
|||||||
|
|
||||||
"github.com/jarcoal/httpmock"
|
"github.com/jarcoal/httpmock"
|
||||||
. "github.com/onsi/ginkgo/v2"
|
. "github.com/onsi/ginkgo/v2"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
"github.com/r3labs/sse/v2"
|
||||||
si "github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
|
||||||
dt "github.com/prysmaticlabs/prysm/encoding/ssz/detect"
|
|
||||||
st "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
|
||||||
"github.com/r3labs/sse"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -75,7 +71,7 @@ var (
|
|||||||
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2GINRRGFSDKYRZGNTGIYLCGY4TAMJTME3WMMDBGJTDSNRRMNQWGYJQMM4DKM3GHA3WGZTFHE2TSNLGMU2TAMBTHAYTMMZQG44TGNRQ",
|
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2GINRRGFSDKYRZGNTGIYLCGY4TAMJTME3WMMDBGJTDSNRRMNQWGYJQMM4DKM3GHA3WGZTFHE2TSNLGMU2TAMBTHAYTMMZQG44TGNRQ",
|
||||||
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPA3WKNZWHA4DAZLCGY3WEYTEMM4DMMRVGBQWCNJXHA4TKODFHFSDANRXGVSTMNDFG4YTIMZTG44DKNJSGA2GMYRVMFRGCYLGHAZGGMTC",
|
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPA3WKNZWHA4DAZLCGY3WEYTEMM4DMMRVGBQWCNJXHA4TKODFHFSDANRXGVSTMNDFG4YTIMZTG44DKNJSGA2GMYRVMFRGCYLGHAZGGMTC",
|
||||||
CorrectParentRoot: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
CorrectParentRoot: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
CorrectEth1BlockHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
CorrectEth1DataBlockHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
},
|
},
|
||||||
"100-dummy": {
|
"100-dummy": {
|
||||||
HeadMessage: beaconclient.Head{
|
HeadMessage: beaconclient.Head{
|
||||||
@ -141,7 +137,7 @@ var (
|
|||||||
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2TQMRRHA3WKOJXMY3TKMRQMJRDMOLFMVQTAMJUMMZTQMZUMM4TMNDDGQ2TENJZGM3TEYJQMVQWCZLBGNTDAMZSGAYTGNZZG44TSNTC",
|
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2TQMRRHA3WKOJXMY3TKMRQMJRDMOLFMVQTAMJUMMZTQMZUMM4TMNDDGQ2TENJZGM3TEYJQMVQWCZLBGNTDAMZSGAYTGNZZG44TSNTC",
|
||||||
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBTDEOBWMEYDGNZZMMYDGOBWMEZWGN3CMUZDQZBQGVSDQMRZMY4GKYRXMIZDQMDDMM4WKZDFGE2TINBZMFTDEMDFMJRWIMBWME3WCNJW",
|
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBTDEOBWMEYDGNZZMMYDGOBWMEZWGN3CMUZDQZBQGVSDQMRZMY4GKYRXMIZDQMDDMM4WKZDFGE2TINBZMFTDEMDFMJRWIMBWME3WCNJW",
|
||||||
CorrectParentRoot: "0x629ae1587895043076500f4f5dcb202a47c2fc95d5b5c548cb83bc97bd2dbfe1",
|
CorrectParentRoot: "0x629ae1587895043076500f4f5dcb202a47c2fc95d5b5c548cb83bc97bd2dbfe1",
|
||||||
CorrectEth1BlockHash: "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1",
|
CorrectEth1DataBlockHash: "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1",
|
||||||
},
|
},
|
||||||
"101": {
|
"101": {
|
||||||
HeadMessage: beaconclient.Head{
|
HeadMessage: beaconclient.Head{
|
||||||
@ -156,7 +152,7 @@ var (
|
|||||||
TestNotes: "An easy to process Phase 0 block",
|
TestNotes: "An easy to process Phase 0 block",
|
||||||
SignedBeaconBlock: filepath.Join("ssz-data", "101", "signed-beacon-block.ssz"),
|
SignedBeaconBlock: filepath.Join("ssz-data", "101", "signed-beacon-block.ssz"),
|
||||||
BeaconState: filepath.Join("ssz-data", "101", "beacon-state.ssz"),
|
BeaconState: filepath.Join("ssz-data", "101", "beacon-state.ssz"),
|
||||||
CorrectEth1BlockHash: "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1",
|
CorrectEth1DataBlockHash: "0x8d3f027beef5cbd4f8b29fc831aba67a5d74768edca529f5596f07fd207865e1",
|
||||||
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPBQWEZJRME4TOMTFGUYTEMJYGJSDANDGGBSDIYJVMM4WGMRVMY4WKZJVG5RTEZJZMQYGMZRTMY2GGNDDHAZGMZBUGJSDCM3EGMYTAOBT",
|
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPBQWEZJRME4TOMTFGUYTEMJYGJSDANDGGBSDIYJVMM4WGMRVMY4WKZJVG5RTEZJZMQYGMZRTMY2GGNDDHAZGMZBUGJSDCM3EGMYTAOBT",
|
||||||
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBRWEMBUMFQTEZLEMJTDCM3DG5RGEN3FG5RGIOLCGYZDCY3FMQ3DQMZSMUYDANZVMU4DSMJUG4ZTKMTFMFRTGMBRHFQTQMRUMNSTQNBX",
|
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBRWEMBUMFQTEZLEMJTDCM3DG5RGEN3FG5RGIOLCGYZDCY3FMQ3DQMZSMUYDANZVMU4DSMJUG4ZTKMTFMFRTGMBRHFQTQMRUMNSTQNBX",
|
||||||
},
|
},
|
||||||
@ -199,11 +195,15 @@ var (
|
|||||||
Slot: "2375703",
|
Slot: "2375703",
|
||||||
Block: "0x4392372c5f6e39499e31bf924388b5815639103149f0f54f8a453773b1802301",
|
Block: "0x4392372c5f6e39499e31bf924388b5815639103149f0f54f8a453773b1802301",
|
||||||
State: "0xb6215b560273af63ec7e011572b60ec1ca0b0232f8ff44fcd4ed55c7526e964e",
|
State: "0xb6215b560273af63ec7e011572b60ec1ca0b0232f8ff44fcd4ed55c7526e964e",
|
||||||
CurrentDutyDependentRoot: "", PreviousDutyDependentRoot: "", EpochTransition: false, ExecutionOptimistic: false},
|
CurrentDutyDependentRoot: "",
|
||||||
|
PreviousDutyDependentRoot: "",
|
||||||
|
EpochTransition: false,
|
||||||
|
ExecutionOptimistic: false,
|
||||||
|
},
|
||||||
TestNotes: "An easy to process Altair Block",
|
TestNotes: "An easy to process Altair Block",
|
||||||
SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"),
|
SignedBeaconBlock: filepath.Join("ssz-data", "2375703", "signed-beacon-block.ssz"),
|
||||||
BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"),
|
BeaconState: filepath.Join("ssz-data", "2375703", "beacon-state.ssz"),
|
||||||
CorrectEth1BlockHash: "0xd74b1c60423651624de6bb301ac25808951c167ba6ecdd9b2e79b4315aee8202",
|
CorrectEth1DataBlockHash: "0xd74b1c60423651624de6bb301ac25808951c167ba6ecdd9b2e79b4315aee8202",
|
||||||
CorrectParentRoot: "0x08736ddc20b77f65d1aa6301f7e6e856a820ff3ce6430ed2c3694ae35580e740",
|
CorrectParentRoot: "0x08736ddc20b77f65d1aa6301f7e6e856a820ff3ce6430ed2c3694ae35580e740",
|
||||||
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2DGOJSGM3TEYZVMY3GKMZZGQ4TSZJTGFRGMOJSGQZTQODCGU4DCNJWGM4TCMBTGE2DSZRQMY2TIZRYME2DKMZXG4ZWEMJYGAZDGMBR",
|
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA2DGOJSGM3TEYZVMY3GKMZZGQ4TSZJTGFRGMOJSGQZTQODCGU4DCNJWGM4TCMBTGE2DSZRQMY2TIZRYME2DKMZXG4ZWEMJYGAZDGMBR",
|
||||||
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBRDMMRRGVRDKNRQGI3TGYLGGYZWKYZXMUYDCMJVG4ZGENRQMVRTCY3BGBRDAMRTGJTDQZTGGQ2GMY3EGRSWINJVMM3TKMRWMU4TMNDF",
|
CorrectBeaconStateMhKey: "/blocks/QLVAEQRQPBRDMMRRGVRDKNRQGI3TGYLGGYZWKYZXMUYDCMJVG4ZGENRQMVRTCY3BGBRDAMRTGJTDQZTGGQ2GMY3EGRSWINJVMM3TKMRWMU4TMNDF",
|
||||||
@ -213,12 +213,61 @@ var (
|
|||||||
Slot: "3797056",
|
Slot: "3797056",
|
||||||
Block: "",
|
Block: "",
|
||||||
State: "",
|
State: "",
|
||||||
CurrentDutyDependentRoot: "", PreviousDutyDependentRoot: "", EpochTransition: false, ExecutionOptimistic: false},
|
CurrentDutyDependentRoot: "",
|
||||||
|
PreviousDutyDependentRoot: "",
|
||||||
|
EpochTransition: false,
|
||||||
|
ExecutionOptimistic: false,
|
||||||
|
},
|
||||||
TestNotes: "An easy to process Altair Block",
|
TestNotes: "An easy to process Altair Block",
|
||||||
// The file below should not exist, this will trigger an error message and 404 response from the mock.
|
// The file below should not exist, this will trigger an error message and 404 response from the mock.
|
||||||
SignedBeaconBlock: filepath.Join("ssz-data", "3797056", "should-not-exist.txt"),
|
SignedBeaconBlock: filepath.Join("ssz-data", "3797056", "should-not-exist.txt"),
|
||||||
BeaconState: filepath.Join("ssz-data", "3797056", "beacon-state.ssz"),
|
BeaconState: filepath.Join("ssz-data", "3797056", "beacon-state.ssz"),
|
||||||
},
|
},
|
||||||
|
"4636672": {
|
||||||
|
HeadMessage: beaconclient.Head{
|
||||||
|
Slot: "4636672",
|
||||||
|
Block: "0x9429ce339da8944dd2e1565be8cac5bf634cae2120b6937c081e39148a7f4b1a",
|
||||||
|
State: "0x0067a5d28b38e6e2f59a73046fabbf16a782b978c2c89621a679e7f682b05bd4",
|
||||||
|
CurrentDutyDependentRoot: "",
|
||||||
|
PreviousDutyDependentRoot: "",
|
||||||
|
EpochTransition: true,
|
||||||
|
ExecutionOptimistic: false,
|
||||||
|
},
|
||||||
|
TestNotes: "The first Bellatrix block (empty ExecutionPayload)",
|
||||||
|
SignedBeaconBlock: filepath.Join("ssz-data", "4636672", "signed-beacon-block.ssz"),
|
||||||
|
BeaconState: filepath.Join("ssz-data", "4636672", "beacon-state.ssz"),
|
||||||
|
CorrectEth1DataBlockHash: "0x3b7d392e46db19704d677cadb3310c3776d8c0b8cb2af1c324bb4a394b7f8164",
|
||||||
|
CorrectParentRoot: "0xe7d4f3b7924c30ae047fceabb853b8afdae32b85e0a87ab6c4c37421b353a1da",
|
||||||
|
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA4TIMRZMNSTGMZZMRQTQOJUGRSGIMTFGE2TMNLCMU4GGYLDGVRGMNRTGRRWCZJSGEZDAYRWHEZTOYZQHAYWKMZZGE2DQYJXMY2GEMLB",
|
||||||
|
CorrectBeaconStateMhKey: "",
|
||||||
|
},
|
||||||
|
"4700013": {
|
||||||
|
HeadMessage: beaconclient.Head{
|
||||||
|
Slot: "4700013",
|
||||||
|
Block: "0x810a00400a80cdffc11ffdcf17ac404ac4dba215b95221955a9dfddf163d0b0d",
|
||||||
|
State: "0x171ef131e0638eddfe1ef73e7b483e344b1cf128b092f2c39e946eb7775b3a2f",
|
||||||
|
CurrentDutyDependentRoot: "",
|
||||||
|
PreviousDutyDependentRoot: "",
|
||||||
|
EpochTransition: true,
|
||||||
|
ExecutionOptimistic: false,
|
||||||
|
},
|
||||||
|
TestNotes: "The first Bellatrix block post-Merge (with ExecutionPayload)",
|
||||||
|
SignedBeaconBlock: filepath.Join("ssz-data", "4700013", "signed-beacon-block.ssz"),
|
||||||
|
BeaconState: filepath.Join("ssz-data", "4700013", "beacon-state.ssz"),
|
||||||
|
CorrectEth1DataBlockHash: "0xb8736ada384707e156f2e0e69d8311ceda11f96806921644a378fd55899894ca",
|
||||||
|
CorrectParentRoot: "0x60e751f7d2cf0ae24b195bda37e9add56a7d8c4b75469c018c0f912518c3bae8",
|
||||||
|
CorrectSignedBeaconBlockMhKey: "/blocks/QLVAEQRQPA4DCMDBGAYDIMBQME4DAY3EMZTGGMJRMZTGIY3GGE3WCYZUGA2GCYZUMRRGCMRRGVRDSNJSGIYTSNJVME4WIZTEMRTDCNRTMQYGEMDE",
|
||||||
|
CorrectBeaconStateMhKey: "",
|
||||||
|
CorrectExecutionPayloadHeader: &beaconclient.DbExecutionPayloadHeader{
|
||||||
|
BlockNumber: 15537394,
|
||||||
|
Timestamp: 1663224179,
|
||||||
|
BlockHash: "0x56a9bb0302da44b8c0b3df540781424684c3af04d0b7a38d72842b762076a664",
|
||||||
|
ParentHash: "0x55b11b918355b1ef9c5db810302ebad0bf2544255b530cdce90674d5887bb286",
|
||||||
|
StateRoot: "0x40c07091e16263270f3579385090fea02dd5f061ba6750228fcc082ff762fda7",
|
||||||
|
ReceiptsRoot: "0x928073fb98ce316265ea35d95ab7e2e1206cecd85242eb841dbbcc4f568fca4b",
|
||||||
|
TransactionsRoot: "0xf9ef008aaf996dccd1c871c7e937f25d66e057e52773fbe2497090c114231acf",
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
TestConfig = Config{
|
TestConfig = Config{
|
||||||
protocol: protocol,
|
protocol: protocol,
|
||||||
@ -234,6 +283,8 @@ var (
|
|||||||
knownGapsTableIncrement: knownGapsTableIncrement,
|
knownGapsTableIncrement: knownGapsTableIncrement,
|
||||||
bcUniqueIdentifier: bcUniqueIdentifier,
|
bcUniqueIdentifier: bcUniqueIdentifier,
|
||||||
checkDb: true,
|
checkDb: true,
|
||||||
|
performBeaconStateProcessing: true,
|
||||||
|
performBeaconBlockProcessing: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
BeaconNodeTester = TestBeaconNode{
|
BeaconNodeTester = TestBeaconNode{
|
||||||
@ -251,7 +302,8 @@ type Message struct {
|
|||||||
CorrectSignedBeaconBlockMhKey string // The correct MhKey for the signedBeaconBlock
|
CorrectSignedBeaconBlockMhKey string // The correct MhKey for the signedBeaconBlock
|
||||||
CorrectBeaconStateMhKey string // The correct MhKey beaconState
|
CorrectBeaconStateMhKey string // The correct MhKey beaconState
|
||||||
CorrectParentRoot string // The correct parent root
|
CorrectParentRoot string // The correct parent root
|
||||||
CorrectEth1BlockHash string // The correct eth1blockHash
|
CorrectEth1DataBlockHash string // The correct eth1blockHash
|
||||||
|
CorrectExecutionPayloadHeader *beaconclient.DbExecutionPayloadHeader // The correct ExecutionPayload details.
|
||||||
}
|
}
|
||||||
|
|
||||||
// A structure that can be utilized to mimic and existing SSZ object but change it ever so slightly.
|
// A structure that can be utilized to mimic and existing SSZ object but change it ever so slightly.
|
||||||
@ -270,8 +322,12 @@ var _ = Describe("Capturehead", Label("head"), func() {
|
|||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
||||||
defer httpmock.DeactivateAndReset()
|
defer httpmock.DeactivateAndReset()
|
||||||
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0)
|
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0)
|
||||||
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey)
|
if bc.PerformBeaconBlockProcessing {
|
||||||
|
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["100"].CorrectExecutionPayloadHeader)
|
||||||
|
}
|
||||||
|
if bc.PerformBeaconStateProcessing {
|
||||||
validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey)
|
validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey)
|
||||||
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -281,8 +337,12 @@ var _ = Describe("Capturehead", Label("head"), func() {
|
|||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
||||||
defer httpmock.DeactivateAndReset()
|
defer httpmock.DeactivateAndReset()
|
||||||
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
|
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0)
|
||||||
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey)
|
if bc.PerformBeaconBlockProcessing {
|
||||||
|
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["2375703"].CorrectExecutionPayloadHeader)
|
||||||
|
}
|
||||||
|
if bc.PerformBeaconStateProcessing {
|
||||||
validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey)
|
validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("Correctly formatted Altair Test Blocks", func() {
|
Context("Correctly formatted Altair Test Blocks", func() {
|
||||||
@ -299,6 +359,26 @@ var _ = Describe("Capturehead", Label("head"), func() {
|
|||||||
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
Context("Correctly formatted Bellatrix Test Blocks", Label("unit", "bellatrix"), func() {
|
||||||
|
It("Should turn it into a struct successfully (pre-Merge).", func() {
|
||||||
|
bc := setUpTest(BeaconNodeTester.TestConfig, "4636672")
|
||||||
|
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
||||||
|
defer httpmock.DeactivateAndReset()
|
||||||
|
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["4636672"].HeadMessage, 144896, maxRetry, 1, 0, 0)
|
||||||
|
if bc.PerformBeaconBlockProcessing {
|
||||||
|
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["4636672"].HeadMessage, BeaconNodeTester.TestEvents["4636672"].CorrectParentRoot, BeaconNodeTester.TestEvents["4636672"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["4636672"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["4636672"].CorrectExecutionPayloadHeader)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
It("Should turn it into a struct successfully (post-Merge).", func() {
|
||||||
|
bc := setUpTest(BeaconNodeTester.TestConfig, "4700013")
|
||||||
|
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
||||||
|
defer httpmock.DeactivateAndReset()
|
||||||
|
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["4700013"].HeadMessage, 146875, maxRetry, 1, 0, 0)
|
||||||
|
if bc.PerformBeaconBlockProcessing {
|
||||||
|
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["4700013"].HeadMessage, BeaconNodeTester.TestEvents["4700013"].CorrectParentRoot, BeaconNodeTester.TestEvents["4700013"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["4700013"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["4700013"].CorrectExecutionPayloadHeader)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
Context("Correctly formatted Phase0 Test Blocks", func() {
|
Context("Correctly formatted Phase0 Test Blocks", func() {
|
||||||
It("Should turn it into a struct successfully.", func() {
|
It("Should turn it into a struct successfully.", func() {
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "99")
|
bc := setUpTest(BeaconNodeTester.TestConfig, "99")
|
||||||
@ -311,7 +391,6 @@ var _ = Describe("Capturehead", Label("head"), func() {
|
|||||||
defer httpmock.DeactivateAndReset()
|
defer httpmock.DeactivateAndReset()
|
||||||
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage, 3, maxRetry, 1, 0, 0)
|
BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage, 3, maxRetry, 1, 0, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
Context("Two consecutive correct blocks", func() {
|
Context("Two consecutive correct blocks", func() {
|
||||||
It("Should handle both blocks correctly, without any reorgs or known_gaps", func() {
|
It("Should handle both blocks correctly, without any reorgs or known_gaps", func() {
|
||||||
@ -332,12 +411,12 @@ var _ = Describe("Capturehead", Label("head"), func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("Phase 0: We have a correctly formated SSZ SignedBeaconBlock and BeaconState", func() {
|
Context("Phase 0: We have a correctly formated SSZ SignedBeaconBlock and BeaconState", func() {
|
||||||
It("Should be able to get each objects root hash.", func() {
|
It("Should be able to get each objects root hash (100).", func() {
|
||||||
testSszRoot(BeaconNodeTester.TestEvents["100"])
|
testSszRoot(BeaconNodeTester.TestEvents["100"])
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("Altair: We have a correctly formated SSZ SignedBeaconBlock and BeaconState", func() {
|
Context("Altair: We have a correctly formated SSZ SignedBeaconBlock and BeaconState", func() {
|
||||||
It("Should be able to get each objects root hash.", func() {
|
It("Should be able to get each objects root hash (2375703).", func() {
|
||||||
testSszRoot(BeaconNodeTester.TestEvents["2375703"])
|
testSszRoot(BeaconNodeTester.TestEvents["2375703"])
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -459,6 +538,8 @@ type Config struct {
|
|||||||
knownGapsTableIncrement int
|
knownGapsTableIncrement int
|
||||||
bcUniqueIdentifier int
|
bcUniqueIdentifier int
|
||||||
checkDb bool
|
checkDb bool
|
||||||
|
performBeaconBlockProcessing bool
|
||||||
|
performBeaconStateProcessing bool
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////
|
||||||
@ -468,7 +549,7 @@ type Config struct {
|
|||||||
// Must run before each test. We can't use the beforeEach because of the way
|
// Must run before each test. We can't use the beforeEach because of the way
|
||||||
// Gingko treats race conditions.
|
// Gingko treats race conditions.
|
||||||
func setUpTest(config Config, maxSlot string) *beaconclient.BeaconClient {
|
func setUpTest(config Config, maxSlot string) *beaconclient.BeaconClient {
|
||||||
bc, err := beaconclient.CreateBeaconClient(context.Background(), config.protocol, config.address, config.port, config.knownGapsTableIncrement, config.bcUniqueIdentifier, config.checkDb)
|
bc, err := beaconclient.CreateBeaconClient(context.Background(), config.protocol, config.address, config.port, config.knownGapsTableIncrement, config.bcUniqueIdentifier, config.checkDb, config.performBeaconBlockProcessing, config.performBeaconStateProcessing)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
db, err := postgres.SetupPostgresDb(config.dbHost, config.dbPort, config.dbName, config.dbUser, config.dbPassword, config.dbDriver)
|
db, err := postgres.SetupPostgresDb(config.dbHost, config.dbPort, config.dbName, config.dbUser, config.dbPassword, config.dbDriver)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@ -484,10 +565,10 @@ func setUpTest(config Config, maxSlot string) *beaconclient.BeaconClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A helper function to validate the expected output from the eth_beacon.slots table.
|
// A helper function to validate the expected output from the eth_beacon.slots table.
|
||||||
func validateSlot(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctEpoch int, correctStatus string) {
|
func validateSlot(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctEpoch beaconclient.Epoch, correctStatus string) {
|
||||||
epoch, dbSlot, blockRoot, stateRoot, status := queryDbSlotAndBlock(bc.Db, headMessage.Slot, headMessage.Block)
|
epoch, dbSlot, blockRoot, stateRoot, status := queryDbSlotAndBlock(bc.Db, headMessage.Slot, headMessage.Block)
|
||||||
log.Info("validateSlot: ", headMessage)
|
log.Info("validateSlot: ", headMessage)
|
||||||
baseSlot, err := strconv.Atoi(headMessage.Slot)
|
baseSlot, err := beaconclient.ParseSlot(headMessage.Slot)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(dbSlot).To(Equal(baseSlot))
|
Expect(dbSlot).To(Equal(baseSlot))
|
||||||
Expect(epoch).To(Equal(correctEpoch))
|
Expect(epoch).To(Equal(correctEpoch))
|
||||||
@ -497,29 +578,30 @@ func validateSlot(bc *beaconclient.BeaconClient, headMessage beaconclient.Head,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A helper function to validate the expected output from the eth_beacon.signed_block table.
|
// A helper function to validate the expected output from the eth_beacon.signed_block table.
|
||||||
func validateSignedBeaconBlock(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctParentRoot string, correctEth1BlockHash string, correctMhKey string) {
|
func validateSignedBeaconBlock(bc *beaconclient.BeaconClient, headMessage beaconclient.Head,
|
||||||
dbSlot, blockRoot, parentRoot, eth1BlockHash, mhKey := queryDbSignedBeaconBlock(bc.Db, headMessage.Slot, headMessage.Block)
|
correctParentRoot string, correctEth1DataBlockHash string, correctMhKey string,
|
||||||
|
correctExecutionPayloadHeader *beaconclient.DbExecutionPayloadHeader) {
|
||||||
|
dbSignedBlock := queryDbSignedBeaconBlock(bc.Db, headMessage.Slot, headMessage.Block)
|
||||||
log.Info("validateSignedBeaconBlock: ", headMessage)
|
log.Info("validateSignedBeaconBlock: ", headMessage)
|
||||||
baseSlot, err := strconv.Atoi(headMessage.Slot)
|
baseSlot, err := beaconclient.ParseSlot(headMessage.Slot)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(dbSlot).To(Equal(baseSlot))
|
Expect(dbSignedBlock.Slot).To(Equal(baseSlot.Number()))
|
||||||
Expect(blockRoot).To(Equal(headMessage.Block))
|
Expect(dbSignedBlock.BlockRoot).To(Equal(headMessage.Block))
|
||||||
Expect(parentRoot).To(Equal(correctParentRoot))
|
Expect(dbSignedBlock.ParentBlock).To(Equal(correctParentRoot))
|
||||||
Expect(eth1BlockHash).To(Equal(correctEth1BlockHash))
|
Expect(dbSignedBlock.Eth1DataBlockHash).To(Equal(correctEth1DataBlockHash))
|
||||||
Expect(mhKey).To(Equal(correctMhKey))
|
Expect(dbSignedBlock.MhKey).To(Equal(correctMhKey))
|
||||||
|
Expect(dbSignedBlock.ExecutionPayloadHeader).To(Equal(correctExecutionPayloadHeader))
|
||||||
}
|
}
|
||||||
|
|
||||||
// A helper function to validate the expected output from the eth_beacon.state table.
|
// A helper function to validate the expected output from the eth_beacon.state table.
|
||||||
func validateBeaconState(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctMhKey string) {
|
func validateBeaconState(bc *beaconclient.BeaconClient, headMessage beaconclient.Head, correctMhKey string) {
|
||||||
dbSlot, stateRoot, mhKey := queryDbBeaconState(bc.Db, headMessage.Slot, headMessage.State)
|
dbSlot, stateRoot, mhKey := queryDbBeaconState(bc.Db, headMessage.Slot, headMessage.State)
|
||||||
log.Info("validateBeaconState: ", headMessage)
|
log.Info("validateBeaconState: ", headMessage)
|
||||||
baseSlot, err := strconv.Atoi(headMessage.Slot)
|
baseSlot, err := beaconclient.ParseSlot(headMessage.Slot)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(dbSlot).To(Equal(baseSlot))
|
Expect(dbSlot).To(Equal(baseSlot))
|
||||||
Expect(stateRoot).To(Equal(headMessage.State))
|
Expect(stateRoot).To(Equal(headMessage.State))
|
||||||
Expect(mhKey).To(Equal(correctMhKey))
|
Expect(mhKey).To(Equal(correctMhKey))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper function to send a head message to the beaconclient
|
// Wrapper function to send a head message to the beaconclient
|
||||||
@ -551,9 +633,10 @@ func sendHeadMessage(bc *beaconclient.BeaconClient, head beaconclient.Head, maxR
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A helper function to query the eth_beacon.slots table based on the slot and block_root
|
// A helper function to query the eth_beacon.slots table based on the slot and block_root
|
||||||
func queryDbSlotAndBlock(db sql.Database, querySlot string, queryBlockRoot string) (int, int, string, string, string) {
|
func queryDbSlotAndBlock(db sql.Database, querySlot string, queryBlockRoot string) (beaconclient.Epoch, beaconclient.Slot, string, string, string) {
|
||||||
sqlStatement := `SELECT epoch, slot, block_root, state_root, status FROM eth_beacon.slots WHERE slot=$1 AND block_root=$2;`
|
sqlStatement := `SELECT epoch, slot, block_root, state_root, status FROM eth_beacon.slots WHERE slot=$1 AND block_root=$2;`
|
||||||
var epoch, slot int
|
var epoch beaconclient.Epoch
|
||||||
|
var slot beaconclient.Slot
|
||||||
var blockRoot, stateRoot, status string
|
var blockRoot, stateRoot, status string
|
||||||
log.Debug("Starting to query the eth_beacon.slots table, ", querySlot, " ", queryBlockRoot)
|
log.Debug("Starting to query the eth_beacon.slots table, ", querySlot, " ", queryBlockRoot)
|
||||||
err := db.QueryRow(context.Background(), sqlStatement, querySlot, queryBlockRoot).Scan(&epoch, &slot, &blockRoot, &stateRoot, &status)
|
err := db.QueryRow(context.Background(), sqlStatement, querySlot, queryBlockRoot).Scan(&epoch, &slot, &blockRoot, &stateRoot, &status)
|
||||||
@ -563,25 +646,56 @@ func queryDbSlotAndBlock(db sql.Database, querySlot string, queryBlockRoot strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A helper function to query the eth_beacon.signed_block table based on the slot and block_root.
|
// A helper function to query the eth_beacon.signed_block table based on the slot and block_root.
|
||||||
func queryDbSignedBeaconBlock(db sql.Database, querySlot string, queryBlockRoot string) (int, string, string, string, string) {
|
func queryDbSignedBeaconBlock(db sql.Database, querySlot string, queryBlockRoot string) beaconclient.DbSignedBeaconBlock {
|
||||||
sqlStatement := `SELECT slot, block_root, parent_block_root, eth1_block_hash, mh_key FROM eth_beacon.signed_block WHERE slot=$1 AND block_root=$2;`
|
sqlStatement := `SELECT slot, block_root, parent_block_root, eth1_data_block_hash, mh_key,
|
||||||
var slot int
|
payload_block_number, payload_timestamp, payload_block_hash,
|
||||||
var blockRoot, parent_block_root, eth1_block_hash, mh_key string
|
payload_parent_hash, payload_state_root, payload_receipts_root,
|
||||||
|
payload_transactions_root FROM eth_beacon.signed_block WHERE slot=$1 AND block_root=$2;`
|
||||||
|
|
||||||
|
var slot beaconclient.Slot
|
||||||
|
var payloadBlockNumber, payloadTimestamp *uint64
|
||||||
|
var blockRoot, parentBlockRoot, eth1DataBlockHash, mhKey string
|
||||||
|
var payloadBlockHash, payloadParentHash, payloadStateRoot, payloadReceiptsRoot, payloadTransactionsRoot *string
|
||||||
|
|
||||||
row := db.QueryRow(context.Background(), sqlStatement, querySlot, queryBlockRoot)
|
row := db.QueryRow(context.Background(), sqlStatement, querySlot, queryBlockRoot)
|
||||||
err := row.Scan(&slot, &blockRoot, &parent_block_root, ð1_block_hash, &mh_key)
|
err := row.Scan(&slot, &blockRoot, &parentBlockRoot, ð1DataBlockHash, &mhKey,
|
||||||
|
&payloadBlockNumber, &payloadTimestamp, &payloadBlockHash,
|
||||||
|
&payloadParentHash, &payloadStateRoot, &payloadReceiptsRoot, &payloadTransactionsRoot)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
return slot, blockRoot, parent_block_root, eth1_block_hash, mh_key
|
|
||||||
|
signedBlock := beaconclient.DbSignedBeaconBlock{
|
||||||
|
Slot: slot.Number(),
|
||||||
|
BlockRoot: blockRoot,
|
||||||
|
ParentBlock: parentBlockRoot,
|
||||||
|
Eth1DataBlockHash: eth1DataBlockHash,
|
||||||
|
MhKey: mhKey,
|
||||||
|
ExecutionPayloadHeader: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
if nil != payloadBlockNumber {
|
||||||
|
signedBlock.ExecutionPayloadHeader = &beaconclient.DbExecutionPayloadHeader{
|
||||||
|
BlockNumber: *payloadBlockNumber,
|
||||||
|
Timestamp: *payloadTimestamp,
|
||||||
|
BlockHash: *payloadBlockHash,
|
||||||
|
ParentHash: *payloadParentHash,
|
||||||
|
StateRoot: *payloadStateRoot,
|
||||||
|
ReceiptsRoot: *payloadReceiptsRoot,
|
||||||
|
TransactionsRoot: *payloadTransactionsRoot,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return signedBlock
|
||||||
}
|
}
|
||||||
|
|
||||||
// A helper function to query the eth_beacon.signed_block table based on the slot and block_root.
|
// A helper function to query the eth_beacon.signed_block table based on the slot and block_root.
|
||||||
func queryDbBeaconState(db sql.Database, querySlot string, queryStateRoot string) (int, string, string) {
|
func queryDbBeaconState(db sql.Database, querySlot string, queryStateRoot string) (beaconclient.Slot, string, string) {
|
||||||
sqlStatement := `SELECT slot, state_root, mh_key FROM eth_beacon.state WHERE slot=$1 AND state_root=$2;`
|
sqlStatement := `SELECT slot, state_root, mh_key FROM eth_beacon.state WHERE slot=$1 AND state_root=$2;`
|
||||||
var slot int
|
var slot beaconclient.Slot
|
||||||
var stateRoot, mh_key string
|
var stateRoot, mhKey string
|
||||||
row := db.QueryRow(context.Background(), sqlStatement, querySlot, queryStateRoot)
|
row := db.QueryRow(context.Background(), sqlStatement, querySlot, queryStateRoot)
|
||||||
err := row.Scan(&slot, &stateRoot, &mh_key)
|
err := row.Scan(&slot, &stateRoot, &mhKey)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
return slot, stateRoot, mh_key
|
return slot, stateRoot, mhKey
|
||||||
}
|
}
|
||||||
|
|
||||||
// Count the entries in the knownGaps table.
|
// Count the entries in the knownGaps table.
|
||||||
@ -620,55 +734,27 @@ func writeSlot(db sql.Database, slot string) {
|
|||||||
|
|
||||||
// Read a file with the SignedBeaconBlock in SSZ and return the SSZ object. This is used for testing only.
|
// Read a file with the SignedBeaconBlock in SSZ and return the SSZ object. This is used for testing only.
|
||||||
// We can't use the readSignedBeaconBlockInterface to update struct fields so this is the workaround.
|
// We can't use the readSignedBeaconBlockInterface to update struct fields so this is the workaround.
|
||||||
func readSignedBeaconBlock(slotFile string) (*st.SignedBeaconBlock, error) {
|
func readSignedBeaconBlock(slotFile string) (*beaconclient.SignedBeaconBlock, error) {
|
||||||
dat, err := os.ReadFile(slotFile)
|
dat, err := os.ReadFile(slotFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Can't find the slot file, %s", slotFile)
|
return nil, fmt.Errorf("Can't find the slot file, %s", slotFile)
|
||||||
}
|
}
|
||||||
block := &st.SignedBeaconBlock{}
|
var block beaconclient.SignedBeaconBlock
|
||||||
err = block.UnmarshalSSZ(dat)
|
err = block.UnmarshalSSZ(dat)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
return block, nil
|
return &block, nil
|
||||||
}
|
|
||||||
|
|
||||||
// Read a file with the SignedBeaconBlock in SSZ and return the SSZ object. This is used for testing only.
|
|
||||||
// We can't use the readSignedBeaconBlockInterface to update struct fields so this is the workaround.
|
|
||||||
func readSignedBeaconBlockAltair(slotFile string) (*st.SignedBeaconBlockAltair, error) {
|
|
||||||
dat, err := os.ReadFile(slotFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Can't find the slot file, %s", slotFile)
|
|
||||||
}
|
|
||||||
block := &st.SignedBeaconBlockAltair{}
|
|
||||||
err = block.UnmarshalSSZ(dat)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
return block, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read a file with the SignedBeaconBlock in SSZ and return the SSZ objects interface. This is production like.
|
|
||||||
// It will provide the correct struct for the given fork.
|
|
||||||
func readSignedBeaconBlockInterface(slotFile string, vm *dt.VersionedUnmarshaler) (si.SignedBeaconBlock, error) {
|
|
||||||
dat, err := os.ReadFile(slotFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Can't find the slot file, %s", slotFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
block, err := vm.UnmarshalBeaconBlock(dat)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
return block, nil
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read a file with the BeaconState in SSZ and return the SSZ object
|
// Read a file with the BeaconState in SSZ and return the SSZ object
|
||||||
func readBeaconState(slotFile string) (state.BeaconState, *dt.VersionedUnmarshaler, error) {
|
func readBeaconState(slotFile string) (*beaconclient.BeaconState, error) {
|
||||||
dat, err := os.ReadFile(slotFile)
|
dat, err := os.ReadFile(slotFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("Can't find the slot file, %s", slotFile)
|
return nil, fmt.Errorf("Can't find the slot file, %s", slotFile)
|
||||||
}
|
}
|
||||||
versionedUnmarshaler, err := dt.FromState(dat)
|
var beaconState beaconclient.BeaconState
|
||||||
|
err = beaconState.UnmarshalSSZ(dat)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
state, err := versionedUnmarshaler.UnmarshalBeaconState(dat)
|
return &beaconState, nil
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
return state, versionedUnmarshaler, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// An object that is used to aggregate test functions. Test functions are needed because we need to
|
// An object that is used to aggregate test functions. Test functions are needed because we need to
|
||||||
@ -768,52 +854,62 @@ func (tbc TestBeaconNode) provideSsz(slotIdentifier string, sszIdentifier string
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
Expect(block.IsPhase0()).To(BeTrue())
|
||||||
|
var phase0 = block.GetPhase0()
|
||||||
|
|
||||||
slot, err := strconv.ParseUint(Message.HeadMessage.Slot, 10, 64)
|
slot, err := strconv.ParseUint(Message.HeadMessage.Slot, 10, 64)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
block.Block.Slot = types.Slot(slot)
|
phase0.Message.Slot = common.Slot(slot)
|
||||||
|
|
||||||
block.Block.StateRoot, err = hex.DecodeString(Message.HeadMessage.State)
|
phase0.Message.StateRoot, err = decodeRoot(Message.HeadMessage.State)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
if Message.MimicConfig.ParentRoot == "" {
|
if Message.MimicConfig.ParentRoot == "" {
|
||||||
block.Block.ParentRoot, err = hex.DecodeString(dummyParentRoot)
|
phase0.Message.ParentRoot, err = decodeRoot(dummyParentRoot)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
} else {
|
} else {
|
||||||
block.Block.ParentRoot, err = hex.DecodeString(Message.MimicConfig.ParentRoot)
|
phase0.Message.ParentRoot, err = decodeRoot(Message.MimicConfig.ParentRoot)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
}
|
}
|
||||||
return block.MarshalSSZ()
|
return block.MarshalSSZ()
|
||||||
case "altair":
|
case "altair":
|
||||||
block, err := readSignedBeaconBlockAltair(slotFile)
|
block, err := readSignedBeaconBlock(slotFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
Expect(block.IsAltair()).To(BeTrue())
|
||||||
|
var altair = block.GetAltair()
|
||||||
slot, err := strconv.ParseUint(Message.HeadMessage.Slot, 10, 64)
|
slot, err := strconv.ParseUint(Message.HeadMessage.Slot, 10, 64)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
block.Block.Slot = types.Slot(slot)
|
altair.Message.Slot = common.Slot(slot)
|
||||||
|
|
||||||
block.Block.StateRoot, err = hex.DecodeString(Message.HeadMessage.State)
|
altair.Message.StateRoot, err = decodeRoot(Message.HeadMessage.State)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
if Message.MimicConfig.ParentRoot == "" {
|
if Message.MimicConfig.ParentRoot == "" {
|
||||||
block.Block.ParentRoot, err = hex.DecodeString(dummyParentRoot)
|
altair.Message.ParentRoot, err = decodeRoot(dummyParentRoot)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
} else {
|
} else {
|
||||||
block.Block.ParentRoot, err = hex.DecodeString(Message.MimicConfig.ParentRoot)
|
altair.Message.ParentRoot, err = decodeRoot(Message.MimicConfig.ParentRoot)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
}
|
}
|
||||||
return block.MarshalSSZ()
|
return block.MarshalSSZ()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if sszIdentifier == "state" {
|
if sszIdentifier == "state" {
|
||||||
state, _, err := readBeaconState(slotFile)
|
state, err := readBeaconState(slotFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
slot, err := strconv.ParseUint(Message.HeadMessage.Slot, 10, 64)
|
slot, err := strconv.ParseUint(Message.HeadMessage.Slot, 10, 64)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
err = state.SetSlot(types.Slot(slot))
|
if state.IsBellatrix() {
|
||||||
Expect(err).ToNot(HaveOccurred())
|
state.GetBellatrix().Slot = common.Slot(slot)
|
||||||
|
} else if state.IsAltair() {
|
||||||
|
state.GetAltair().Slot = common.Slot(slot)
|
||||||
|
} else {
|
||||||
|
state.GetPhase0().Slot = common.Slot(slot)
|
||||||
|
}
|
||||||
return state.MarshalSSZ()
|
return state.MarshalSSZ()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -831,7 +927,7 @@ func (tbc TestBeaconNode) provideSsz(slotIdentifier string, sszIdentifier string
|
|||||||
|
|
||||||
// Helper function to test three reorg messages. There are going to be many functions like this,
|
// Helper function to test three reorg messages. There are going to be many functions like this,
|
||||||
// Because we need to test the same logic for multiple phases.
|
// Because we need to test the same logic for multiple phases.
|
||||||
func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, thirdHead beaconclient.Head, epoch int, maxRetry int) {
|
func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, thirdHead beaconclient.Head, epoch beaconclient.Epoch, maxRetry int) {
|
||||||
go bc.CaptureHead()
|
go bc.CaptureHead()
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
@ -863,7 +959,7 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs
|
|||||||
NewHeadBlock: secondHead.Block,
|
NewHeadBlock: secondHead.Block,
|
||||||
OldHeadState: thirdHead.State,
|
OldHeadState: thirdHead.State,
|
||||||
NewHeadState: secondHead.State,
|
NewHeadState: secondHead.State,
|
||||||
Epoch: strconv.Itoa(epoch),
|
Epoch: epoch.Format(),
|
||||||
ExecutionOptimistic: false,
|
ExecutionOptimistic: false,
|
||||||
})
|
})
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@ -893,7 +989,7 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A test to validate a single block was processed correctly
|
// A test to validate a single block was processed correctly
|
||||||
func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head beaconclient.Head, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) {
|
func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head beaconclient.Head, epoch beaconclient.Epoch, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) {
|
||||||
go bc.CaptureHead()
|
go bc.CaptureHead()
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
sendHeadMessage(bc, head, maxRetry, expectedSuccessInsert)
|
sendHeadMessage(bc, head, maxRetry, expectedSuccessInsert)
|
||||||
@ -923,7 +1019,7 @@ func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head b
|
|||||||
|
|
||||||
// A test that ensures that if two HeadMessages occur for a single slot they are marked
|
// A test that ensures that if two HeadMessages occur for a single slot they are marked
|
||||||
// as proposed and forked correctly.
|
// as proposed and forked correctly.
|
||||||
func (tbc TestBeaconNode) testMultipleHead(bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, epoch int, maxRetry int) {
|
func (tbc TestBeaconNode) testMultipleHead(bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, epoch beaconclient.Epoch, maxRetry int) {
|
||||||
go bc.CaptureHead()
|
go bc.CaptureHead()
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
@ -979,15 +1075,25 @@ func (tbc TestBeaconNode) testKnownGapsMessages(bc *beaconclient.BeaconClient, t
|
|||||||
|
|
||||||
// This function will make sure we are properly able to get the SszRoot of the SignedBeaconBlock and the BeaconState.
|
// This function will make sure we are properly able to get the SszRoot of the SignedBeaconBlock and the BeaconState.
|
||||||
func testSszRoot(msg Message) {
|
func testSszRoot(msg Message) {
|
||||||
state, vm, err := readBeaconState(msg.BeaconState)
|
state, err := readBeaconState(msg.BeaconState)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
stateRoot, err := state.HashTreeRoot(context.Background())
|
stateRoot := state.HashTreeRoot()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(msg.HeadMessage.State).To(Equal("0x" + hex.EncodeToString(stateRoot[:])))
|
Expect(msg.HeadMessage.State).To(Equal("0x" + hex.EncodeToString(stateRoot[:])))
|
||||||
|
|
||||||
block, err := readSignedBeaconBlockInterface(msg.SignedBeaconBlock, vm)
|
block, err := readSignedBeaconBlock(msg.SignedBeaconBlock)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
blockRoot, err := block.Block().HashTreeRoot()
|
blockRoot := block.Block().HashTreeRoot()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(msg.HeadMessage.Block).To(Equal("0x" + hex.EncodeToString(blockRoot[:])))
|
Expect(msg.HeadMessage.Block).To(Equal("0x" + hex.EncodeToString(blockRoot[:])))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func decodeRoot(raw string) (common.Root, error) {
|
||||||
|
value, err := hex.DecodeString(raw)
|
||||||
|
if err != nil {
|
||||||
|
return common.Root{}, err
|
||||||
|
}
|
||||||
|
var root common.Root
|
||||||
|
copy(root[:], value[:32])
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
@ -22,16 +22,15 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This function will perform all the heavy lifting for tracking the head of the chain.
|
// This function will perform all the heavy lifting for tracking the head of the chain.
|
||||||
func (bc *BeaconClient) CaptureHistoric(ctx context.Context, maxWorkers int) []error {
|
func (bc *BeaconClient) CaptureHistoric(ctx context.Context, maxWorkers int, minimumSlot Slot) []error {
|
||||||
log.Info("We are starting the historical processing service.")
|
log.Info("We are starting the historical processing service.")
|
||||||
bc.HistoricalProcess = HistoricProcessing{db: bc.Db, metrics: bc.Metrics, uniqueNodeIdentifier: bc.UniqueNodeIdentifier}
|
bc.HistoricalProcess = HistoricProcessing{db: bc.Db, metrics: bc.Metrics, uniqueNodeIdentifier: bc.UniqueNodeIdentifier}
|
||||||
errs := handleBatchProcess(ctx, maxWorkers, bc.HistoricalProcess, bc.HistoricalProcess.db, bc.ServerEndpoint, bc.Metrics, bc.CheckDb, bc.Metrics.IncrementHistoricSlotProcessed)
|
errs := handleBatchProcess(ctx, maxWorkers, bc.HistoricalProcess, bc.SlotProcessingDetails(), bc.Metrics.IncrementHistoricSlotProcessed, minimumSlot)
|
||||||
log.Debug("Exiting Historical")
|
log.Debug("Exiting Historical")
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
@ -53,7 +52,7 @@ func (bc *BeaconClient) StopHistoric(cancel context.CancelFunc) error {
|
|||||||
//
|
//
|
||||||
// 2. Known Gaps Processing
|
// 2. Known Gaps Processing
|
||||||
type BatchProcessing interface {
|
type BatchProcessing interface {
|
||||||
getSlotRange(context.Context, chan<- slotsToProcess) []error // Write the slots to process in a channel, return an error if you cant get the next slots to write.
|
getSlotRange(context.Context, chan<- slotsToProcess, Slot) []error // Write the slots to process in a channel, return an error if you cant get the next slots to write.
|
||||||
handleProcessingErrors(context.Context, <-chan batchHistoricError) // Custom logic to handle errors.
|
handleProcessingErrors(context.Context, <-chan batchHistoricError) // Custom logic to handle errors.
|
||||||
removeTableEntry(context.Context, <-chan slotsToProcess) error // With the provided start and end slot, remove the entry from the database.
|
removeTableEntry(context.Context, <-chan slotsToProcess) error // With the provided start and end slot, remove the entry from the database.
|
||||||
releaseDbLocks() error // Update the checked_out column to false for whatever table is being updated.
|
releaseDbLocks() error // Update the checked_out column to false for whatever table is being updated.
|
||||||
@ -67,14 +66,14 @@ type BatchProcessing interface {
|
|||||||
|
|
||||||
// A struct to pass around indicating a table entry for slots to process.
|
// A struct to pass around indicating a table entry for slots to process.
|
||||||
type slotsToProcess struct {
|
type slotsToProcess struct {
|
||||||
startSlot int // The start slot
|
startSlot Slot // The start slot
|
||||||
endSlot int // The end slot
|
endSlot Slot // The end slot
|
||||||
}
|
}
|
||||||
|
|
||||||
type batchHistoricError struct {
|
type batchHistoricError struct {
|
||||||
err error // The error that occurred when attempting to a slot
|
err error // The error that occurred when attempting to a slot
|
||||||
errProcess string // The process that caused the error.
|
errProcess string // The process that caused the error.
|
||||||
slot int // The slot which the error is for.
|
slot Slot // The slot which the error is for.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper function for the BatchProcessing interface.
|
// Wrapper function for the BatchProcessing interface.
|
||||||
@ -91,9 +90,9 @@ type batchHistoricError struct {
|
|||||||
// 4. Remove the slot entry from the DB.
|
// 4. Remove the slot entry from the DB.
|
||||||
//
|
//
|
||||||
// 5. Handle any errors.
|
// 5. Handle any errors.
|
||||||
func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing, db sql.Database, serverEndpoint string, metrics *BeaconClientMetrics, checkDb bool, incrementTracker func(uint64)) []error {
|
func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing, spd SlotProcessingDetails, incrementTracker func(uint64), minimumSlot Slot) []error {
|
||||||
slotsCh := make(chan slotsToProcess)
|
slotsCh := make(chan slotsToProcess)
|
||||||
workCh := make(chan int)
|
workCh := make(chan Slot)
|
||||||
processedCh := make(chan slotsToProcess)
|
processedCh := make(chan slotsToProcess)
|
||||||
errCh := make(chan batchHistoricError)
|
errCh := make(chan batchHistoricError)
|
||||||
finalErrCh := make(chan []error, 1)
|
finalErrCh := make(chan []error, 1)
|
||||||
@ -108,7 +107,7 @@ func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing,
|
|||||||
for w := 1; w <= maxWorkers; w++ {
|
for w := 1; w <= maxWorkers; w++ {
|
||||||
log.WithFields(log.Fields{"maxWorkers": maxWorkers}).Debug("Starting batch processing workers")
|
log.WithFields(log.Fields{"maxWorkers": maxWorkers}).Debug("Starting batch processing workers")
|
||||||
|
|
||||||
go processSlotRangeWorker(ctx, workCh, errCh, db, serverEndpoint, metrics, checkDb, incrementTracker)
|
go processSlotRangeWorker(ctx, workCh, errCh, spd, incrementTracker)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process all ranges and send each individual slot to the worker.
|
// Process all ranges and send each individual slot to the worker.
|
||||||
@ -161,7 +160,7 @@ func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing,
|
|||||||
|
|
||||||
// Get slots from the DB.
|
// Get slots from the DB.
|
||||||
go func() {
|
go func() {
|
||||||
errs := bp.getSlotRange(ctx, slotsCh) // Periodically adds new entries....
|
errs := bp.getSlotRange(ctx, slotsCh, minimumSlot) // Periodically adds new entries....
|
||||||
if errs != nil {
|
if errs != nil {
|
||||||
finalErrCh <- errs
|
finalErrCh <- errs
|
||||||
}
|
}
|
||||||
|
@ -54,8 +54,12 @@ var _ = Describe("Capturehistoric", func() {
|
|||||||
BeaconNodeTester.writeEventToHistoricProcess(bc, 0, 0, 10)
|
BeaconNodeTester.writeEventToHistoricProcess(bc, 0, 0, 10)
|
||||||
BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0)
|
BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0)
|
||||||
validateSlot(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, 0, "proposed")
|
validateSlot(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, 0, "proposed")
|
||||||
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, BeaconNodeTester.TestEvents["0"].CorrectParentRoot, BeaconNodeTester.TestEvents["0"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["0"].CorrectSignedBeaconBlockMhKey)
|
if bc.PerformBeaconBlockProcessing {
|
||||||
|
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, BeaconNodeTester.TestEvents["0"].CorrectParentRoot, BeaconNodeTester.TestEvents["0"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["0"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["0"].CorrectExecutionPayloadHeader)
|
||||||
|
}
|
||||||
|
if bc.PerformBeaconStateProcessing {
|
||||||
validateBeaconState(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, BeaconNodeTester.TestEvents["0"].CorrectBeaconStateMhKey)
|
validateBeaconState(bc, BeaconNodeTester.TestEvents["0"].HeadMessage, BeaconNodeTester.TestEvents["0"].CorrectBeaconStateMhKey)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When there is a skipped slot", func() {
|
Context("When there is a skipped slot", func() {
|
||||||
@ -94,7 +98,7 @@ var _ = Describe("Capturehistoric", func() {
|
|||||||
BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 2, 0)
|
BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 2, 0)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Context("When theres a reprocessing error", Label("reprocessingError"), func() {
|
Context("When theres a reprocessing error", Label("reprocessingError", "flaky"), func() {
|
||||||
It("Should update the reprocessing error.", func() {
|
It("Should update the reprocessing error.", func() {
|
||||||
bc := setUpTest(BeaconNodeTester.TestConfig, "99")
|
bc := setUpTest(BeaconNodeTester.TestConfig, "99")
|
||||||
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot)
|
||||||
@ -201,7 +205,7 @@ func (tbc TestBeaconNode) writeEventToHistoricProcess(bc *beaconclient.BeaconCli
|
|||||||
// Start the CaptureHistoric function, and check for the correct inserted slots.
|
// Start the CaptureHistoric function, and check for the correct inserted slots.
|
||||||
func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
go bc.CaptureHistoric(ctx, maxWorkers)
|
go bc.CaptureHistoric(ctx, maxWorkers, 0)
|
||||||
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
|
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
|
||||||
log.Debug("Calling the stop function for historical processing..")
|
log.Debug("Calling the stop function for historical processing..")
|
||||||
err := bc.StopHistoric(cancel)
|
err := bc.StopHistoric(cancel)
|
||||||
@ -213,7 +217,7 @@ func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, ma
|
|||||||
// Wrapper function that processes knownGaps
|
// Wrapper function that processes knownGaps
|
||||||
func (tbc TestBeaconNode) runKnownGapsProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
func (tbc TestBeaconNode) runKnownGapsProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
go bc.ProcessKnownGaps(ctx, maxWorkers)
|
go bc.ProcessKnownGaps(ctx, maxWorkers, 0)
|
||||||
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
|
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
|
||||||
err := bc.StopKnownGapsProcessing(cancel)
|
err := bc.StopKnownGapsProcessing(cancel)
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
@ -268,17 +272,29 @@ func validateMetrics(bc *beaconclient.BeaconClient, expectedInserts, expectedReo
|
|||||||
// A wrapper function to validate a few popular blocks
|
// A wrapper function to validate a few popular blocks
|
||||||
func validatePopularBatchBlocks(bc *beaconclient.BeaconClient) {
|
func validatePopularBatchBlocks(bc *beaconclient.BeaconClient) {
|
||||||
validateSlot(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, "proposed")
|
validateSlot(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, "proposed")
|
||||||
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey)
|
if bc.PerformBeaconBlockProcessing {
|
||||||
|
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["100"].CorrectExecutionPayloadHeader)
|
||||||
|
}
|
||||||
|
if bc.PerformBeaconStateProcessing {
|
||||||
validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey)
|
validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey)
|
||||||
|
}
|
||||||
|
|
||||||
validateSlot(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, "proposed")
|
validateSlot(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, "proposed")
|
||||||
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, BeaconNodeTester.TestEvents["100"].HeadMessage.Block, BeaconNodeTester.TestEvents["101"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["101"].CorrectSignedBeaconBlockMhKey)
|
if bc.PerformBeaconBlockProcessing {
|
||||||
|
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, BeaconNodeTester.TestEvents["100"].HeadMessage.Block, BeaconNodeTester.TestEvents["101"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["101"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["101"].CorrectExecutionPayloadHeader)
|
||||||
|
}
|
||||||
|
if bc.PerformBeaconStateProcessing {
|
||||||
validateBeaconState(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, BeaconNodeTester.TestEvents["101"].CorrectBeaconStateMhKey)
|
validateBeaconState(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, BeaconNodeTester.TestEvents["101"].CorrectBeaconStateMhKey)
|
||||||
|
}
|
||||||
|
|
||||||
validateSlot(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, "proposed")
|
validateSlot(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, "proposed")
|
||||||
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey)
|
if bc.PerformBeaconBlockProcessing {
|
||||||
|
validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1DataBlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey, BeaconNodeTester.TestEvents["2375703"].CorrectExecutionPayloadHeader)
|
||||||
|
}
|
||||||
|
if bc.PerformBeaconStateProcessing {
|
||||||
validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey)
|
validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Make sure all rows have checked_out as false.
|
// Make sure all rows have checked_out as false.
|
||||||
func validateAllRowsCheckedOut(db sql.Database, checkStmt string) {
|
func validateAllRowsCheckedOut(db sql.Database, checkStmt string) {
|
||||||
|
@ -16,9 +16,11 @@
|
|||||||
package beaconclient
|
package beaconclient
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -71,14 +73,17 @@ func (bc BeaconClient) QueryHeadSync() (Sync, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
var body bytes.Buffer
|
||||||
|
buf := bufio.NewWriter(&body)
|
||||||
|
_, err = io.Copy(buf, resp.Body)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return syncStatus, err
|
return syncStatus, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(body, &syncStatus); err != nil {
|
if err := json.Unmarshal(body.Bytes(), &syncStatus); err != nil {
|
||||||
loghelper.LogEndpoint(bcSync).WithFields(log.Fields{
|
loghelper.LogEndpoint(bcSync).WithFields(log.Fields{
|
||||||
"rawMessage": string(body),
|
"rawMessage": body.String(),
|
||||||
"err": err,
|
"err": err,
|
||||||
}).Error("Unable to unmarshal sync status")
|
}).Error("Unable to unmarshal sync status")
|
||||||
return syncStatus, err
|
return syncStatus, err
|
||||||
@ -149,14 +154,16 @@ func (bc BeaconClient) queryLighthouseDbInfo() (LighthouseDatabaseInfo, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
var body bytes.Buffer
|
||||||
|
buf := bufio.NewWriter(&body)
|
||||||
|
_, err = io.Copy(buf, resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dbInfo, err
|
return dbInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(body, &dbInfo); err != nil {
|
if err := json.Unmarshal(body.Bytes(), &dbInfo); err != nil {
|
||||||
loghelper.LogEndpoint(lhDbInfo).WithFields(log.Fields{
|
loghelper.LogEndpoint(lhDbInfo).WithFields(log.Fields{
|
||||||
"rawMessage": string(body),
|
"rawMessage": body.String(),
|
||||||
"err": err,
|
"err": err,
|
||||||
}).Error("Unable to unmarshal the lighthouse database information")
|
}).Error("Unable to unmarshal the lighthouse database information")
|
||||||
return dbInfo, err
|
return dbInfo, err
|
||||||
|
441
pkg/beaconclient/consensus.go
Normal file
441
pkg/beaconclient/consensus.go
Normal file
@ -0,0 +1,441 @@
|
|||||||
|
package beaconclient
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"github.com/protolambda/zrnt/eth2/beacon/altair"
|
||||||
|
"github.com/protolambda/zrnt/eth2/beacon/bellatrix"
|
||||||
|
"github.com/protolambda/zrnt/eth2/beacon/common"
|
||||||
|
"github.com/protolambda/zrnt/eth2/beacon/phase0"
|
||||||
|
"github.com/protolambda/zrnt/eth2/configs"
|
||||||
|
"github.com/protolambda/ztyp/codec"
|
||||||
|
"github.com/protolambda/ztyp/tree"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Eth1Data common.Eth1Data
|
||||||
|
type Root common.Root
|
||||||
|
type Signature common.BLSSignature
|
||||||
|
type Slot uint64
|
||||||
|
type Epoch uint64
|
||||||
|
type ExecutionPayloadHeader common.ExecutionPayloadHeader
|
||||||
|
|
||||||
|
func ParseSlot(v string) (Slot, error) {
|
||||||
|
slotNum, err := strconv.ParseUint(v, 10, 64)
|
||||||
|
return Slot(slotNum), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Slot) Format() string {
|
||||||
|
return strconv.FormatUint(uint64(*s), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Slot) Number() uint64 {
|
||||||
|
return uint64(*s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Slot) Plus(v uint64) Slot {
|
||||||
|
return Slot(v + s.Number())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Slot) PlusInt(v int) Slot {
|
||||||
|
return s.Plus(uint64(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Epoch) Format() string {
|
||||||
|
return strconv.FormatUint(uint64(*e), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
type BeaconBlock struct {
|
||||||
|
spec *common.Spec
|
||||||
|
bellatrix *bellatrix.BeaconBlock
|
||||||
|
altair *altair.BeaconBlock
|
||||||
|
phase0 *phase0.BeaconBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
type BeaconBlockBody struct {
|
||||||
|
spec *common.Spec
|
||||||
|
bellatrix *bellatrix.BeaconBlockBody
|
||||||
|
altair *altair.BeaconBlockBody
|
||||||
|
phase0 *phase0.BeaconBlockBody
|
||||||
|
}
|
||||||
|
|
||||||
|
type BeaconState struct {
|
||||||
|
spec *common.Spec
|
||||||
|
bellatrix *bellatrix.BeaconState
|
||||||
|
altair *altair.BeaconState
|
||||||
|
phase0 *phase0.BeaconState
|
||||||
|
}
|
||||||
|
|
||||||
|
type SignedBeaconBlock struct {
|
||||||
|
spec *common.Spec
|
||||||
|
bellatrix *bellatrix.SignedBeaconBlock
|
||||||
|
altair *altair.SignedBeaconBlock
|
||||||
|
phase0 *phase0.SignedBeaconBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SignedBeaconBlock) UnmarshalSSZ(ssz []byte) error {
|
||||||
|
spec := chooseSpec(s.spec)
|
||||||
|
|
||||||
|
var bellatrix bellatrix.SignedBeaconBlock
|
||||||
|
err := bellatrix.Deserialize(spec, makeDecodingReader(ssz))
|
||||||
|
if nil == err {
|
||||||
|
s.bellatrix = &bellatrix
|
||||||
|
s.altair = nil
|
||||||
|
s.phase0 = nil
|
||||||
|
log.Info("Unmarshalled Bellatrix SignedBeaconBlock")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var altair altair.SignedBeaconBlock
|
||||||
|
err = altair.Deserialize(spec, makeDecodingReader(ssz))
|
||||||
|
if nil == err {
|
||||||
|
s.bellatrix = nil
|
||||||
|
s.altair = &altair
|
||||||
|
s.phase0 = nil
|
||||||
|
log.Info("Unmarshalled Altair SignedBeaconBlock")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var phase0 phase0.SignedBeaconBlock
|
||||||
|
err = phase0.Deserialize(spec, makeDecodingReader(ssz))
|
||||||
|
if nil == err {
|
||||||
|
s.bellatrix = nil
|
||||||
|
s.altair = nil
|
||||||
|
s.phase0 = &phase0
|
||||||
|
log.Info("Unmarshalled Phase0 SignedBeaconBlock")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.bellatrix = nil
|
||||||
|
s.altair = nil
|
||||||
|
s.phase0 = nil
|
||||||
|
|
||||||
|
log.Warning("Unable to unmarshal SignedBeaconBlock")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SignedBeaconBlock) MarshalSSZ() ([]byte, error) {
|
||||||
|
spec := chooseSpec(s.spec)
|
||||||
|
var err error
|
||||||
|
var buf bytes.Buffer
|
||||||
|
encodingWriter := codec.NewEncodingWriter(&buf)
|
||||||
|
|
||||||
|
if s.IsBellatrix() {
|
||||||
|
err = s.bellatrix.Serialize(spec, encodingWriter)
|
||||||
|
}
|
||||||
|
if s.IsAltair() {
|
||||||
|
err = s.altair.Serialize(spec, encodingWriter)
|
||||||
|
}
|
||||||
|
if s.IsPhase0() {
|
||||||
|
err = s.phase0.Serialize(spec, encodingWriter)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SignedBeaconBlock) IsBellatrix() bool {
|
||||||
|
return s.bellatrix != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SignedBeaconBlock) IsAltair() bool {
|
||||||
|
return s.altair != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SignedBeaconBlock) IsPhase0() bool {
|
||||||
|
return s.phase0 != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SignedBeaconBlock) GetBellatrix() *bellatrix.SignedBeaconBlock {
|
||||||
|
return s.bellatrix
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SignedBeaconBlock) GetAltair() *altair.SignedBeaconBlock {
|
||||||
|
return s.altair
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SignedBeaconBlock) GetPhase0() *phase0.SignedBeaconBlock {
|
||||||
|
return s.phase0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SignedBeaconBlock) Signature() Signature {
|
||||||
|
if s.IsBellatrix() {
|
||||||
|
return Signature(s.bellatrix.Signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsAltair() {
|
||||||
|
return Signature(s.altair.Signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsPhase0() {
|
||||||
|
return Signature(s.phase0.Signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Signature{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SignedBeaconBlock) Block() *BeaconBlock {
|
||||||
|
if s.IsBellatrix() {
|
||||||
|
return &BeaconBlock{bellatrix: &s.bellatrix.Message, spec: s.spec}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsAltair() {
|
||||||
|
return &BeaconBlock{altair: &s.altair.Message, spec: s.spec}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsPhase0() {
|
||||||
|
return &BeaconBlock{phase0: &s.phase0.Message, spec: s.spec}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlock) IsBellatrix() bool {
|
||||||
|
return b.bellatrix != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlock) IsAltair() bool {
|
||||||
|
return b.altair != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlock) IsPhase0() bool {
|
||||||
|
return b.phase0 != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconBlock) GetBellatrix() *bellatrix.BeaconBlock {
|
||||||
|
return s.bellatrix
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconBlock) GetAltair() *altair.BeaconBlock {
|
||||||
|
return s.altair
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconBlock) GetPhase0() *phase0.BeaconBlock {
|
||||||
|
return s.phase0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlock) ParentRoot() Root {
|
||||||
|
if b.IsBellatrix() {
|
||||||
|
return Root(b.bellatrix.ParentRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.IsAltair() {
|
||||||
|
return Root(b.altair.ParentRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.IsPhase0() {
|
||||||
|
return Root(b.phase0.ParentRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Root{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlock) StateRoot() Root {
|
||||||
|
if b.IsBellatrix() {
|
||||||
|
return Root(b.bellatrix.StateRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.IsAltair() {
|
||||||
|
return Root(b.altair.StateRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.IsPhase0() {
|
||||||
|
return Root(b.phase0.StateRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Root{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlock) Body() *BeaconBlockBody {
|
||||||
|
if b.IsBellatrix() {
|
||||||
|
return &BeaconBlockBody{bellatrix: &b.bellatrix.Body, spec: b.spec}
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.IsAltair() {
|
||||||
|
return &BeaconBlockBody{altair: &b.altair.Body, spec: b.spec}
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.IsPhase0() {
|
||||||
|
return &BeaconBlockBody{phase0: &b.phase0.Body, spec: b.spec}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlockBody) IsBellatrix() bool {
|
||||||
|
return b.bellatrix != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlockBody) IsAltair() bool {
|
||||||
|
return b.altair != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlockBody) IsPhase0() bool {
|
||||||
|
return b.phase0 != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlockBody) Eth1Data() Eth1Data {
|
||||||
|
if b.IsBellatrix() {
|
||||||
|
return Eth1Data(b.bellatrix.Eth1Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.IsAltair() {
|
||||||
|
return Eth1Data(b.altair.Eth1Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.IsPhase0() {
|
||||||
|
return Eth1Data(b.phase0.Eth1Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Eth1Data{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlockBody) ExecutionPayloadHeader() *ExecutionPayloadHeader {
|
||||||
|
if b.IsBellatrix() {
|
||||||
|
payloadHeader := b.bellatrix.ExecutionPayload.Header(chooseSpec(b.spec))
|
||||||
|
return (*ExecutionPayloadHeader)(payloadHeader)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconBlock) HashTreeRoot() Root {
|
||||||
|
spec := chooseSpec(b.spec)
|
||||||
|
hashFn := tree.GetHashFn()
|
||||||
|
|
||||||
|
if b.IsBellatrix() {
|
||||||
|
return Root(b.bellatrix.HashTreeRoot(spec, hashFn))
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.IsAltair() {
|
||||||
|
return Root(b.altair.HashTreeRoot(spec, hashFn))
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.IsPhase0() {
|
||||||
|
return Root(b.phase0.HashTreeRoot(spec, hashFn))
|
||||||
|
}
|
||||||
|
|
||||||
|
return Root{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconState) UnmarshalSSZ(ssz []byte) error {
|
||||||
|
spec := chooseSpec(s.spec)
|
||||||
|
|
||||||
|
var bellatrix bellatrix.BeaconState
|
||||||
|
err := bellatrix.Deserialize(spec, makeDecodingReader(ssz))
|
||||||
|
if nil == err {
|
||||||
|
s.bellatrix = &bellatrix
|
||||||
|
s.altair = nil
|
||||||
|
s.phase0 = nil
|
||||||
|
log.Info("Unmarshalled Bellatrix BeaconState")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var altair altair.BeaconState
|
||||||
|
err = altair.Deserialize(spec, makeDecodingReader(ssz))
|
||||||
|
if nil == err {
|
||||||
|
s.bellatrix = nil
|
||||||
|
s.altair = &altair
|
||||||
|
s.phase0 = nil
|
||||||
|
log.Info("Unmarshalled Altair BeaconState")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var phase0 phase0.BeaconState
|
||||||
|
err = phase0.Deserialize(spec, makeDecodingReader(ssz))
|
||||||
|
if nil == err {
|
||||||
|
s.bellatrix = nil
|
||||||
|
s.altair = nil
|
||||||
|
s.phase0 = &phase0
|
||||||
|
log.Info("Unmarshalled Phase0 BeaconState")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.bellatrix = nil
|
||||||
|
s.altair = nil
|
||||||
|
s.phase0 = nil
|
||||||
|
|
||||||
|
log.Warning("Unable to unmarshal BeaconState")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconState) MarshalSSZ() ([]byte, error) {
|
||||||
|
spec := chooseSpec(s.spec)
|
||||||
|
var err error
|
||||||
|
var buf bytes.Buffer
|
||||||
|
encodingWriter := codec.NewEncodingWriter(&buf)
|
||||||
|
|
||||||
|
if s.IsBellatrix() {
|
||||||
|
err = s.bellatrix.Serialize(spec, encodingWriter)
|
||||||
|
} else if s.IsAltair() {
|
||||||
|
err = s.altair.Serialize(spec, encodingWriter)
|
||||||
|
} else if s.IsPhase0() {
|
||||||
|
err = s.phase0.Serialize(spec, encodingWriter)
|
||||||
|
} else {
|
||||||
|
err = errors.New("BeaconState not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
if nil != err {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconState) IsBellatrix() bool {
|
||||||
|
return s.bellatrix != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconState) IsAltair() bool {
|
||||||
|
return s.altair != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconState) IsPhase0() bool {
|
||||||
|
return s.phase0 != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconState) HashTreeRoot() Root {
|
||||||
|
spec := chooseSpec(s.spec)
|
||||||
|
hashFn := tree.GetHashFn()
|
||||||
|
|
||||||
|
if s.IsBellatrix() {
|
||||||
|
return Root(s.bellatrix.HashTreeRoot(spec, hashFn))
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsAltair() {
|
||||||
|
return Root(s.altair.HashTreeRoot(spec, hashFn))
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsPhase0() {
|
||||||
|
return Root(s.phase0.HashTreeRoot(spec, hashFn))
|
||||||
|
}
|
||||||
|
|
||||||
|
return Root{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconState) GetBellatrix() *bellatrix.BeaconState {
|
||||||
|
return s.bellatrix
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconState) GetAltair() *altair.BeaconState {
|
||||||
|
return s.altair
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BeaconState) GetPhase0() *phase0.BeaconState {
|
||||||
|
return s.phase0
|
||||||
|
}
|
||||||
|
|
||||||
|
func chooseSpec(spec *common.Spec) *common.Spec {
|
||||||
|
if nil == spec {
|
||||||
|
return configs.Mainnet
|
||||||
|
}
|
||||||
|
return spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeDecodingReader(ssz []byte) *codec.DecodingReader {
|
||||||
|
return codec.NewDecodingReader(bytes.NewReader(ssz), uint64(len(ssz)))
|
||||||
|
}
|
@ -18,8 +18,6 @@ package beaconclient
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/jackc/pgx/v4"
|
"github.com/jackc/pgx/v4"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
||||||
@ -34,8 +32,14 @@ INSERT INTO eth_beacon.slots (epoch, slot, block_root, state_root, status)
|
|||||||
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
|
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
|
||||||
// Statement to upsert to the eth_beacon.signed_blocks table.
|
// Statement to upsert to the eth_beacon.signed_blocks table.
|
||||||
UpsertSignedBeaconBlockStmt string = `
|
UpsertSignedBeaconBlockStmt string = `
|
||||||
INSERT INTO eth_beacon.signed_block (slot, block_root, parent_block_root, eth1_block_hash, mh_key)
|
INSERT INTO eth_beacon.signed_block (slot, block_root, parent_block_root, eth1_data_block_hash, mh_key)
|
||||||
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
|
VALUES ($1, $2, $3, $4, $5) ON CONFLICT (slot, block_root) DO NOTHING`
|
||||||
|
UpsertSignedBeaconBlockWithPayloadStmt string = `
|
||||||
|
INSERT INTO eth_beacon.signed_block (slot, block_root, parent_block_root, eth1_data_block_hash, mh_key,
|
||||||
|
payload_block_number, payload_timestamp, payload_block_hash,
|
||||||
|
payload_parent_hash, payload_state_root, payload_receipts_root,
|
||||||
|
payload_transactions_root)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) ON CONFLICT (slot, block_root) DO NOTHING`
|
||||||
// Statement to upsert to the eth_beacon.state table.
|
// Statement to upsert to the eth_beacon.state table.
|
||||||
UpsertBeaconState string = `
|
UpsertBeaconState string = `
|
||||||
INSERT INTO eth_beacon.state (slot, state_root, mh_key)
|
INSERT INTO eth_beacon.state (slot, state_root, mh_key)
|
||||||
@ -94,8 +98,8 @@ type DatabaseWriter struct {
|
|||||||
rawSignedBeaconBlock *[]byte
|
rawSignedBeaconBlock *[]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateDatabaseWrite(db sql.Database, slot int, stateRoot string, blockRoot string, parentBlockRoot string,
|
func CreateDatabaseWrite(db sql.Database, slot Slot, stateRoot string, blockRoot string, parentBlockRoot string,
|
||||||
eth1BlockHash string, status string, rawSignedBeaconBlock *[]byte, rawBeaconState *[]byte, metrics *BeaconClientMetrics) (*DatabaseWriter, error) {
|
eth1DataBlockHash string, payloadHeader *ExecutionPayloadHeader, status string, rawSignedBeaconBlock *[]byte, rawBeaconState *[]byte, metrics *BeaconClientMetrics) (*DatabaseWriter, error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
tx, err := db.Begin(ctx)
|
tx, err := db.Begin(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -110,7 +114,7 @@ func CreateDatabaseWrite(db sql.Database, slot int, stateRoot string, blockRoot
|
|||||||
Metrics: metrics,
|
Metrics: metrics,
|
||||||
}
|
}
|
||||||
dw.prepareSlotsModel(slot, stateRoot, blockRoot, status)
|
dw.prepareSlotsModel(slot, stateRoot, blockRoot, status)
|
||||||
err = dw.prepareSignedBeaconBlockModel(slot, blockRoot, parentBlockRoot, eth1BlockHash)
|
err = dw.prepareSignedBeaconBlockModel(slot, blockRoot, parentBlockRoot, eth1DataBlockHash, payloadHeader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -124,10 +128,10 @@ func CreateDatabaseWrite(db sql.Database, slot int, stateRoot string, blockRoot
|
|||||||
// Write functions to write each all together...
|
// Write functions to write each all together...
|
||||||
// Should I do one atomic write?
|
// Should I do one atomic write?
|
||||||
// Create the model for the eth_beacon.slots table
|
// Create the model for the eth_beacon.slots table
|
||||||
func (dw *DatabaseWriter) prepareSlotsModel(slot int, stateRoot string, blockRoot string, status string) {
|
func (dw *DatabaseWriter) prepareSlotsModel(slot Slot, stateRoot string, blockRoot string, status string) {
|
||||||
dw.DbSlots = &DbSlots{
|
dw.DbSlots = &DbSlots{
|
||||||
Epoch: calculateEpoch(slot, bcSlotsPerEpoch),
|
Epoch: calculateEpoch(slot, bcSlotsPerEpoch),
|
||||||
Slot: strconv.Itoa(slot),
|
Slot: slot.Number(),
|
||||||
StateRoot: stateRoot,
|
StateRoot: stateRoot,
|
||||||
BlockRoot: blockRoot,
|
BlockRoot: blockRoot,
|
||||||
Status: status,
|
Status: status,
|
||||||
@ -137,30 +141,45 @@ func (dw *DatabaseWriter) prepareSlotsModel(slot int, stateRoot string, blockRoo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create the model for the eth_beacon.signed_block table.
|
// Create the model for the eth_beacon.signed_block table.
|
||||||
func (dw *DatabaseWriter) prepareSignedBeaconBlockModel(slot int, blockRoot string, parentBlockRoot string, eth1BlockHash string) error {
|
func (dw *DatabaseWriter) prepareSignedBeaconBlockModel(slot Slot, blockRoot string, parentBlockRoot string, eth1DataBlockHash string,
|
||||||
|
payloadHeader *ExecutionPayloadHeader) error {
|
||||||
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.BlockRoot))
|
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.BlockRoot))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
dw.DbSignedBeaconBlock = &DbSignedBeaconBlock{
|
dw.DbSignedBeaconBlock = &DbSignedBeaconBlock{
|
||||||
Slot: strconv.Itoa(slot),
|
Slot: slot.Number(),
|
||||||
BlockRoot: blockRoot,
|
BlockRoot: blockRoot,
|
||||||
ParentBlock: parentBlockRoot,
|
ParentBlock: parentBlockRoot,
|
||||||
Eth1BlockHash: eth1BlockHash,
|
Eth1DataBlockHash: eth1DataBlockHash,
|
||||||
MhKey: mhKey,
|
MhKey: mhKey,
|
||||||
|
ExecutionPayloadHeader: nil,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if nil != payloadHeader {
|
||||||
|
dw.DbSignedBeaconBlock.ExecutionPayloadHeader = &DbExecutionPayloadHeader{
|
||||||
|
BlockNumber: uint64(payloadHeader.BlockNumber),
|
||||||
|
Timestamp: uint64(payloadHeader.Timestamp),
|
||||||
|
BlockHash: toHex(payloadHeader.BlockHash),
|
||||||
|
ParentHash: toHex(payloadHeader.ParentHash),
|
||||||
|
StateRoot: toHex(payloadHeader.StateRoot),
|
||||||
|
ReceiptsRoot: toHex(payloadHeader.ReceiptsRoot),
|
||||||
|
TransactionsRoot: toHex(payloadHeader.TransactionsRoot),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Debug("dw.DbSignedBeaconBlock: ", dw.DbSignedBeaconBlock)
|
log.Debug("dw.DbSignedBeaconBlock: ", dw.DbSignedBeaconBlock)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the model for the eth_beacon.state table.
|
// Create the model for the eth_beacon.state table.
|
||||||
func (dw *DatabaseWriter) prepareBeaconStateModel(slot int, stateRoot string) error {
|
func (dw *DatabaseWriter) prepareBeaconStateModel(slot Slot, stateRoot string) error {
|
||||||
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.StateRoot))
|
mhKey, err := MultihashKeyFromSSZRoot([]byte(dw.DbSlots.StateRoot))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
dw.DbBeaconState = &DbBeaconState{
|
dw.DbBeaconState = &DbBeaconState{
|
||||||
Slot: strconv.Itoa(slot),
|
Slot: slot.Number(),
|
||||||
StateRoot: stateRoot,
|
StateRoot: stateRoot,
|
||||||
MhKey: mhKey,
|
MhKey: mhKey,
|
||||||
}
|
}
|
||||||
@ -229,6 +248,11 @@ func (dw *DatabaseWriter) upsertSlots() error {
|
|||||||
|
|
||||||
// Add the information for the signed_block to a transaction.
|
// Add the information for the signed_block to a transaction.
|
||||||
func (dw *DatabaseWriter) transactSignedBeaconBlocks() error {
|
func (dw *DatabaseWriter) transactSignedBeaconBlocks() error {
|
||||||
|
if nil == dw.rawSignedBeaconBlock || len(*dw.rawSignedBeaconBlock) == 0 {
|
||||||
|
log.Warn("Skipping writing of empty BeaconBlock.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
err := dw.upsertPublicBlocks(dw.DbSignedBeaconBlock.MhKey, dw.rawSignedBeaconBlock)
|
err := dw.upsertPublicBlocks(dw.DbSignedBeaconBlock.MhKey, dw.rawSignedBeaconBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -252,9 +276,36 @@ func (dw *DatabaseWriter) upsertPublicBlocks(key string, data *[]byte) error {
|
|||||||
|
|
||||||
// Upsert to the eth_beacon.signed_block table.
|
// Upsert to the eth_beacon.signed_block table.
|
||||||
func (dw *DatabaseWriter) upsertSignedBeaconBlock() error {
|
func (dw *DatabaseWriter) upsertSignedBeaconBlock() error {
|
||||||
_, err := dw.Tx.Exec(dw.Ctx, UpsertSignedBeaconBlockStmt, dw.DbSignedBeaconBlock.Slot, dw.DbSignedBeaconBlock.BlockRoot, dw.DbSignedBeaconBlock.ParentBlock, dw.DbSignedBeaconBlock.Eth1BlockHash, dw.DbSignedBeaconBlock.MhKey)
|
block := dw.DbSignedBeaconBlock
|
||||||
|
var err error
|
||||||
|
if nil != block.ExecutionPayloadHeader {
|
||||||
|
_, err = dw.Tx.Exec(dw.Ctx,
|
||||||
|
UpsertSignedBeaconBlockWithPayloadStmt,
|
||||||
|
block.Slot,
|
||||||
|
block.BlockRoot,
|
||||||
|
block.ParentBlock,
|
||||||
|
block.Eth1DataBlockHash,
|
||||||
|
block.MhKey,
|
||||||
|
block.ExecutionPayloadHeader.BlockNumber,
|
||||||
|
block.ExecutionPayloadHeader.Timestamp,
|
||||||
|
block.ExecutionPayloadHeader.BlockHash,
|
||||||
|
block.ExecutionPayloadHeader.ParentHash,
|
||||||
|
block.ExecutionPayloadHeader.StateRoot,
|
||||||
|
block.ExecutionPayloadHeader.ReceiptsRoot,
|
||||||
|
block.ExecutionPayloadHeader.TransactionsRoot,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
_, err = dw.Tx.Exec(dw.Ctx,
|
||||||
|
UpsertSignedBeaconBlockStmt,
|
||||||
|
block.Slot,
|
||||||
|
block.BlockRoot,
|
||||||
|
block.ParentBlock,
|
||||||
|
block.Eth1DataBlockHash,
|
||||||
|
block.MhKey,
|
||||||
|
)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(dw.DbSlots.Slot, err).WithFields(log.Fields{"block_root": dw.DbSignedBeaconBlock.BlockRoot}).Error("Unable to write to the slot to the eth_beacon.signed_block table")
|
loghelper.LogSlotError(dw.DbSlots.Slot, err).WithFields(log.Fields{"block_root": block.BlockRoot}).Error("Unable to write to the slot to the eth_beacon.signed_block table")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -262,6 +313,11 @@ func (dw *DatabaseWriter) upsertSignedBeaconBlock() error {
|
|||||||
|
|
||||||
// Add the information for the state to a transaction.
|
// Add the information for the state to a transaction.
|
||||||
func (dw *DatabaseWriter) transactBeaconState() error {
|
func (dw *DatabaseWriter) transactBeaconState() error {
|
||||||
|
if nil == dw.rawBeaconState || len(*dw.rawBeaconState) == 0 {
|
||||||
|
log.Warn("Skipping writing of empty BeaconState.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
err := dw.upsertPublicBlocks(dw.DbBeaconState.MhKey, dw.rawBeaconState)
|
err := dw.upsertPublicBlocks(dw.DbBeaconState.MhKey, dw.rawBeaconState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -285,56 +341,51 @@ func (dw *DatabaseWriter) upsertBeaconState() error {
|
|||||||
|
|
||||||
// Update a given slot to be marked as forked within a transaction. Provide the slot and the latest latestBlockRoot.
|
// Update a given slot to be marked as forked within a transaction. Provide the slot and the latest latestBlockRoot.
|
||||||
// We will mark all entries for the given slot that don't match the provided latestBlockRoot as forked.
|
// We will mark all entries for the given slot that don't match the provided latestBlockRoot as forked.
|
||||||
func transactReorgs(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot string, metrics *BeaconClientMetrics) {
|
func transactReorgs(tx sql.Tx, ctx context.Context, slot Slot, latestBlockRoot string, metrics *BeaconClientMetrics) {
|
||||||
slotNum, strErr := strconv.Atoi(slot)
|
|
||||||
if strErr != nil {
|
|
||||||
loghelper.LogReorgError(slot, latestBlockRoot, strErr).Error("We can't convert the slot to an int...")
|
|
||||||
}
|
|
||||||
|
|
||||||
forkCount, err := updateForked(tx, ctx, slot, latestBlockRoot)
|
forkCount, err := updateForked(tx, ctx, slot, latestBlockRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We ran into some trouble while updating all forks.")
|
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("We ran into some trouble while updating all forks.")
|
||||||
transactKnownGaps(tx, ctx, 1, slotNum, slotNum, err, "reorg", metrics)
|
transactKnownGaps(tx, ctx, 1, slot, slot, err, "reorg", metrics)
|
||||||
}
|
}
|
||||||
proposedCount, err := updateProposed(tx, ctx, slot, latestBlockRoot)
|
proposedCount, err := updateProposed(tx, ctx, slot, latestBlockRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We ran into some trouble while trying to update the proposed slot.")
|
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("We ran into some trouble while trying to update the proposed slot.")
|
||||||
transactKnownGaps(tx, ctx, 1, slotNum, slotNum, err, "reorg", metrics)
|
transactKnownGaps(tx, ctx, 1, slot, slot, err, "reorg", metrics)
|
||||||
}
|
}
|
||||||
|
|
||||||
if forkCount > 0 {
|
if forkCount > 0 {
|
||||||
loghelper.LogReorg(slot, latestBlockRoot).WithFields(log.Fields{
|
loghelper.LogReorg(slot.Number(), latestBlockRoot).WithFields(log.Fields{
|
||||||
"forkCount": forkCount,
|
"forkCount": forkCount,
|
||||||
}).Info("Updated rows that were forked.")
|
}).Info("Updated rows that were forked.")
|
||||||
} else {
|
} else {
|
||||||
loghelper.LogReorg(slot, latestBlockRoot).WithFields(log.Fields{
|
loghelper.LogReorg(slot.Number(), latestBlockRoot).WithFields(log.Fields{
|
||||||
"forkCount": forkCount,
|
"forkCount": forkCount,
|
||||||
}).Warn("There were no forked rows to update.")
|
}).Warn("There were no forked rows to update.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if proposedCount == 1 {
|
if proposedCount == 1 {
|
||||||
loghelper.LogReorg(slot, latestBlockRoot).WithFields(log.Fields{
|
loghelper.LogReorg(slot.Number(), latestBlockRoot).WithFields(log.Fields{
|
||||||
"proposedCount": proposedCount,
|
"proposedCount": proposedCount,
|
||||||
}).Info("Updated the row that should have been marked as proposed.")
|
}).Info("Updated the row that should have been marked as proposed.")
|
||||||
} else if proposedCount > 1 {
|
} else if proposedCount > 1 {
|
||||||
loghelper.LogReorg(slot, latestBlockRoot).WithFields(log.Fields{
|
loghelper.LogReorg(slot.Number(), latestBlockRoot).WithFields(log.Fields{
|
||||||
"proposedCount": proposedCount,
|
"proposedCount": proposedCount,
|
||||||
}).Error("Too many rows were marked as proposed!")
|
}).Error("Too many rows were marked as proposed!")
|
||||||
transactKnownGaps(tx, ctx, 1, slotNum, slotNum, fmt.Errorf("Too many rows were marked as unproposed."), "reorg", metrics)
|
transactKnownGaps(tx, ctx, 1, slot, slot, fmt.Errorf("Too many rows were marked as unproposed."), "reorg", metrics)
|
||||||
} else if proposedCount == 0 {
|
} else if proposedCount == 0 {
|
||||||
transactKnownGaps(tx, ctx, 1, slotNum, slotNum, fmt.Errorf("Unable to find properly proposed row in DB"), "reorg", metrics)
|
transactKnownGaps(tx, ctx, 1, slot, slot, fmt.Errorf("Unable to find properly proposed row in DB"), "reorg", metrics)
|
||||||
loghelper.LogReorg(slot, latestBlockRoot).Info("Updated the row that should have been marked as proposed.")
|
loghelper.LogReorg(slot.Number(), latestBlockRoot).Info("Updated the row that should have been marked as proposed.")
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.IncrementReorgsInsert(1)
|
metrics.IncrementReorgsInsert(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper function that will create a transaction and execute the function.
|
// Wrapper function that will create a transaction and execute the function.
|
||||||
func writeReorgs(db sql.Database, slot string, latestBlockRoot string, metrics *BeaconClientMetrics) {
|
func writeReorgs(db sql.Database, slot Slot, latestBlockRoot string, metrics *BeaconClientMetrics) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
tx, err := db.Begin(ctx)
|
tx, err := db.Begin(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot, latestBlockRoot, err).Fatal("Unable to create a new transaction for reorgs")
|
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Fatal("Unable to create a new transaction for reorgs")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
err := tx.Rollback(ctx)
|
err := tx.Rollback(ctx)
|
||||||
@ -344,35 +395,35 @@ func writeReorgs(db sql.Database, slot string, latestBlockRoot string, metrics *
|
|||||||
}()
|
}()
|
||||||
transactReorgs(tx, ctx, slot, latestBlockRoot, metrics)
|
transactReorgs(tx, ctx, slot, latestBlockRoot, metrics)
|
||||||
if err = tx.Commit(ctx); err != nil {
|
if err = tx.Commit(ctx); err != nil {
|
||||||
loghelper.LogReorgError(slot, latestBlockRoot, err).Fatal("Unable to execute the transaction for reorgs")
|
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Fatal("Unable to execute the transaction for reorgs")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the slots table by marking the old slot's as forked.
|
// Update the slots table by marking the old slot's as forked.
|
||||||
func updateForked(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot string) (int64, error) {
|
func updateForked(tx sql.Tx, ctx context.Context, slot Slot, latestBlockRoot string) (int64, error) {
|
||||||
res, err := tx.Exec(ctx, UpdateForkedStmt, slot, latestBlockRoot)
|
res, err := tx.Exec(ctx, UpdateForkedStmt, slot, latestBlockRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the eth_beacon.slots table with the forked slots")
|
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("We are unable to update the eth_beacon.slots table with the forked slots")
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
count, err := res.RowsAffected()
|
count, err := res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("Unable to figure out how many entries were marked as forked.")
|
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("Unable to figure out how many entries were marked as forked.")
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return count, err
|
return count, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark a slot as proposed.
|
// Mark a slot as proposed.
|
||||||
func updateProposed(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot string) (int64, error) {
|
func updateProposed(tx sql.Tx, ctx context.Context, slot Slot, latestBlockRoot string) (int64, error) {
|
||||||
res, err := tx.Exec(ctx, UpdateProposedStmt, slot, latestBlockRoot)
|
res, err := tx.Exec(ctx, UpdateProposedStmt, slot, latestBlockRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("We are unable to update the eth_beacon.slots table with the proposed slot.")
|
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("We are unable to update the eth_beacon.slots table with the proposed slot.")
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
count, err := res.RowsAffected()
|
count, err := res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogReorgError(slot, latestBlockRoot, err).Error("Unable to figure out how many entries were marked as proposed")
|
loghelper.LogReorgError(slot.Number(), latestBlockRoot, err).Error("Unable to figure out how many entries were marked as proposed")
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,17 +433,17 @@ func updateProposed(tx sql.Tx, ctx context.Context, slot string, latestBlockRoot
|
|||||||
// A wrapper function to call upsertKnownGaps. This function will break down the range of known_gaps into
|
// A wrapper function to call upsertKnownGaps. This function will break down the range of known_gaps into
|
||||||
// smaller chunks. For example, instead of having an entry of 1-101, if we increment the entries by 10 slots, we would
|
// smaller chunks. For example, instead of having an entry of 1-101, if we increment the entries by 10 slots, we would
|
||||||
// have 10 entries as follows: 1-10, 11-20, etc...
|
// have 10 entries as follows: 1-10, 11-20, etc...
|
||||||
func transactKnownGaps(tx sql.Tx, ctx context.Context, tableIncrement int, startSlot int, endSlot int, entryError error, entryProcess string, metric *BeaconClientMetrics) {
|
func transactKnownGaps(tx sql.Tx, ctx context.Context, tableIncrement int, startSlot Slot, endSlot Slot, entryError error, entryProcess string, metric *BeaconClientMetrics) {
|
||||||
var entryErrorMsg string
|
var entryErrorMsg string
|
||||||
if entryError == nil {
|
if entryError == nil {
|
||||||
entryErrorMsg = ""
|
entryErrorMsg = ""
|
||||||
} else {
|
} else {
|
||||||
entryErrorMsg = entryError.Error()
|
entryErrorMsg = entryError.Error()
|
||||||
}
|
}
|
||||||
if endSlot-startSlot <= tableIncrement {
|
if endSlot.Number()-startSlot.Number() <= uint64(tableIncrement) {
|
||||||
kgModel := DbKnownGaps{
|
kgModel := DbKnownGaps{
|
||||||
StartSlot: strconv.Itoa(startSlot),
|
StartSlot: startSlot.Number(),
|
||||||
EndSlot: strconv.Itoa(endSlot),
|
EndSlot: endSlot.Number(),
|
||||||
CheckedOut: false,
|
CheckedOut: false,
|
||||||
ReprocessingError: "",
|
ReprocessingError: "",
|
||||||
EntryError: entryErrorMsg,
|
EntryError: entryErrorMsg,
|
||||||
@ -400,24 +451,24 @@ func transactKnownGaps(tx sql.Tx, ctx context.Context, tableIncrement int, start
|
|||||||
}
|
}
|
||||||
upsertKnownGaps(tx, ctx, kgModel, metric)
|
upsertKnownGaps(tx, ctx, kgModel, metric)
|
||||||
} else {
|
} else {
|
||||||
totalSlots := endSlot - startSlot
|
totalSlots := endSlot.Number() - startSlot.Number()
|
||||||
var chunks int
|
var chunks int
|
||||||
chunks = totalSlots / tableIncrement
|
chunks = int(totalSlots / uint64(tableIncrement))
|
||||||
if totalSlots%tableIncrement != 0 {
|
if totalSlots%uint64(tableIncrement) != 0 {
|
||||||
chunks = chunks + 1
|
chunks = chunks + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < chunks; i++ {
|
for i := 0; i < chunks; i++ {
|
||||||
var tempStart, tempEnd int
|
var tempStart, tempEnd Slot
|
||||||
tempStart = startSlot + (i * tableIncrement)
|
tempStart = startSlot.PlusInt(i * tableIncrement)
|
||||||
if i+1 == chunks {
|
if i+1 == chunks {
|
||||||
tempEnd = endSlot
|
tempEnd = endSlot
|
||||||
} else {
|
} else {
|
||||||
tempEnd = startSlot + ((i + 1) * tableIncrement)
|
tempEnd = startSlot.PlusInt((i + 1) * tableIncrement)
|
||||||
}
|
}
|
||||||
kgModel := DbKnownGaps{
|
kgModel := DbKnownGaps{
|
||||||
StartSlot: strconv.Itoa(tempStart),
|
StartSlot: tempStart.Number(),
|
||||||
EndSlot: strconv.Itoa(tempEnd),
|
EndSlot: tempEnd.Number(),
|
||||||
CheckedOut: false,
|
CheckedOut: false,
|
||||||
ReprocessingError: "",
|
ReprocessingError: "",
|
||||||
EntryError: entryErrorMsg,
|
EntryError: entryErrorMsg,
|
||||||
@ -430,11 +481,11 @@ func transactKnownGaps(tx sql.Tx, ctx context.Context, tableIncrement int, start
|
|||||||
|
|
||||||
// Wrapper function, instead of adding the knownGaps entries to a transaction, it will
|
// Wrapper function, instead of adding the knownGaps entries to a transaction, it will
|
||||||
// create the transaction and write it.
|
// create the transaction and write it.
|
||||||
func writeKnownGaps(db sql.Database, tableIncrement int, startSlot int, endSlot int, entryError error, entryProcess string, metric *BeaconClientMetrics) {
|
func writeKnownGaps(db sql.Database, tableIncrement int, startSlot Slot, endSlot Slot, entryError error, entryProcess string, metric *BeaconClientMetrics) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
tx, err := db.Begin(ctx)
|
tx, err := db.Begin(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Fatal("Unable to create a new transaction for knownGaps")
|
loghelper.LogSlotRangeError(startSlot.Number(), endSlot.Number(), err).Fatal("Unable to create a new transaction for knownGaps")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
err := tx.Rollback(ctx)
|
err := tx.Rollback(ctx)
|
||||||
@ -444,7 +495,7 @@ func writeKnownGaps(db sql.Database, tableIncrement int, startSlot int, endSlot
|
|||||||
}()
|
}()
|
||||||
transactKnownGaps(tx, ctx, tableIncrement, startSlot, endSlot, entryError, entryProcess, metric)
|
transactKnownGaps(tx, ctx, tableIncrement, startSlot, endSlot, entryError, entryProcess, metric)
|
||||||
if err = tx.Commit(ctx); err != nil {
|
if err = tx.Commit(ctx); err != nil {
|
||||||
loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Fatal("Unable to execute the transaction for knownGaps")
|
loghelper.LogSlotRangeError(startSlot.Number(), endSlot.Number(), err).Fatal("Unable to execute the transaction for knownGaps")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -467,8 +518,8 @@ func upsertKnownGaps(tx sql.Tx, ctx context.Context, knModel DbKnownGaps, metric
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A function to write the gap between the highest slot in the DB and the first processed slot.
|
// A function to write the gap between the highest slot in the DB and the first processed slot.
|
||||||
func writeStartUpGaps(db sql.Database, tableIncrement int, firstSlot int, metric *BeaconClientMetrics) {
|
func writeStartUpGaps(db sql.Database, tableIncrement int, firstSlot Slot, metric *BeaconClientMetrics) {
|
||||||
var maxSlot int
|
var maxSlot Slot
|
||||||
err := db.QueryRow(context.Background(), QueryHighestSlotStmt).Scan(&maxSlot)
|
err := db.QueryRow(context.Background(), QueryHighestSlotStmt).Scan(&maxSlot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogError(err).Fatal("Unable to get the max block from the DB. We must close the application or we might have undetected gaps.")
|
loghelper.LogError(err).Fatal("Unable to get the max block from the DB. We must close the application or we might have undetected gaps.")
|
||||||
@ -496,19 +547,19 @@ func writeStartUpGaps(db sql.Database, tableIncrement int, firstSlot int, metric
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A function to update a knownGap range with a reprocessing error.
|
// A function to update a knownGap range with a reprocessing error.
|
||||||
func updateKnownGapErrors(db sql.Database, startSlot int, endSlot int, reprocessingErr error, metric *BeaconClientMetrics) error {
|
func updateKnownGapErrors(db sql.Database, startSlot Slot, endSlot Slot, reprocessingErr error, metric *BeaconClientMetrics) error {
|
||||||
res, err := db.Exec(context.Background(), UpsertKnownGapsErrorStmt, startSlot, endSlot, reprocessingErr.Error())
|
res, err := db.Exec(context.Background(), UpsertKnownGapsErrorStmt, startSlot, endSlot, reprocessingErr.Error())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Error("Unable to update reprocessing_error")
|
loghelper.LogSlotRangeError(startSlot.Number(), endSlot.Number(), err).Error("Unable to update reprocessing_error")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
row, err := res.RowsAffected()
|
row, err := res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).Error("Unable to count rows affected when trying to update reprocessing_error.")
|
loghelper.LogSlotRangeError(startSlot.Number(), endSlot.Number(), err).Error("Unable to count rows affected when trying to update reprocessing_error.")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if row != 1 {
|
if row != 1 {
|
||||||
loghelper.LogSlotRangeError(strconv.Itoa(startSlot), strconv.Itoa(endSlot), err).WithFields(log.Fields{
|
loghelper.LogSlotRangeError(startSlot.Number(), endSlot.Number(), err).WithFields(log.Fields{
|
||||||
"rowCount": row,
|
"rowCount": row,
|
||||||
}).Error("The rows affected by the upsert for reprocessing_error is not 1.")
|
}).Error("The rows affected by the upsert for reprocessing_error is not 1.")
|
||||||
metric.IncrementKnownGapsReprocessError(1)
|
metric.IncrementKnownGapsReprocessError(1)
|
||||||
@ -519,13 +570,12 @@ func updateKnownGapErrors(db sql.Database, startSlot int, endSlot int, reprocess
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A quick helper function to calculate the epoch.
|
// A quick helper function to calculate the epoch.
|
||||||
func calculateEpoch(slot int, slotPerEpoch int) string {
|
func calculateEpoch(slot Slot, slotPerEpoch uint64) uint64 {
|
||||||
epoch := slot / slotPerEpoch
|
return slot.Number() / slotPerEpoch
|
||||||
return strconv.Itoa(epoch)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A helper function to check to see if the slot is processed.
|
// A helper function to check to see if the slot is processed.
|
||||||
func isSlotProcessed(db sql.Database, checkProcessStmt string, slot string) (bool, error) {
|
func isSlotProcessed(db sql.Database, checkProcessStmt string, slot Slot) (bool, error) {
|
||||||
processRow, err := db.Exec(context.Background(), checkProcessStmt, slot)
|
processRow, err := db.Exec(context.Background(), checkProcessStmt, slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -542,7 +592,7 @@ func isSlotProcessed(db sql.Database, checkProcessStmt string, slot string) (boo
|
|||||||
|
|
||||||
// Check to see if this slot is in the DB. Check eth_beacon.slots, eth_beacon.signed_block
|
// Check to see if this slot is in the DB. Check eth_beacon.slots, eth_beacon.signed_block
|
||||||
// and eth_beacon.state. If the slot exists, return true
|
// and eth_beacon.state. If the slot exists, return true
|
||||||
func IsSlotInDb(ctx context.Context, db sql.Database, slot string, blockRoot string, stateRoot string) (bool, error) {
|
func IsSlotInDb(ctx context.Context, db sql.Database, slot Slot, blockRoot string, stateRoot string) (bool, error) {
|
||||||
var (
|
var (
|
||||||
isInBeaconState bool
|
isInBeaconState bool
|
||||||
isInSignedBeaconBlock bool
|
isInSignedBeaconBlock bool
|
||||||
@ -585,7 +635,7 @@ func IsSlotInDb(ctx context.Context, db sql.Database, slot string, blockRoot str
|
|||||||
|
|
||||||
// Provide a statement, slot, and root, and this function will check to see
|
// Provide a statement, slot, and root, and this function will check to see
|
||||||
// if the slot and root exist in the table.
|
// if the slot and root exist in the table.
|
||||||
func checkSlotAndRoot(db sql.Database, statement, slot, root string) (bool, error) {
|
func checkSlotAndRoot(db sql.Database, statement string, slot Slot, root string) (bool, error) {
|
||||||
processRow, err := db.Exec(context.Background(), statement, slot, root)
|
processRow, err := db.Exec(context.Background(), statement, slot, root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
1
pkg/beaconclient/eth2.0-spec-tests
Symbolic link
1
pkg/beaconclient/eth2.0-spec-tests
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../external/eth2.0-spec-tests
|
@ -31,11 +31,10 @@ var _ = Describe("Healthcheck", func() {
|
|||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
var err error
|
var err error
|
||||||
Bc, err = beaconclient.CreateBeaconClient(context.Background(), "http", "localhost", 5052, 10, bcUniqueIdentifier, false)
|
Bc, err = beaconclient.CreateBeaconClient(context.Background(), "http", "localhost", 5052, 10, bcUniqueIdentifier, false, true, true)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
errBc, err = beaconclient.CreateBeaconClient(context.Background(), "http", "blah-blah", 1010, 10, bcUniqueIdentifier, false)
|
errBc, err = beaconclient.CreateBeaconClient(context.Background(), "http", "blah-blah", 1010, 10, bcUniqueIdentifier, false, true, true)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
})
|
})
|
||||||
Describe("Connecting to the lighthouse client", Label("integration"), func() {
|
Describe("Connecting to the lighthouse client", Label("integration"), func() {
|
||||||
Context("When the client is running", func() {
|
Context("When the client is running", func() {
|
||||||
|
@ -19,6 +19,7 @@ package beaconclient
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@ -33,11 +34,11 @@ var (
|
|||||||
// This function will capture all the SSE events for a given SseEvents object.
|
// This function will capture all the SSE events for a given SseEvents object.
|
||||||
// When new messages come in, it will ensure that they are decoded into JSON.
|
// When new messages come in, it will ensure that they are decoded into JSON.
|
||||||
// If any errors occur, it log the error information.
|
// If any errors occur, it log the error information.
|
||||||
func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMetricInc func(uint64)) {
|
func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMetricInc func(uint64), idleTimeout time.Duration) {
|
||||||
go func() {
|
go func() {
|
||||||
errG := new(errgroup.Group)
|
errG := new(errgroup.Group)
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
err := eventHandler.SseClient.SubscribeChanRaw(eventHandler.MessagesCh)
|
err := eventHandler.Connect()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -54,9 +55,21 @@ func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMe
|
|||||||
}
|
}
|
||||||
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
// TODO(telackey): Doesn't there need to be a check here that the handler hasn't been shutdown?
|
||||||
for {
|
for {
|
||||||
|
var idleTimer *time.Timer = nil
|
||||||
|
var idleTimerC <-chan time.Time = nil
|
||||||
|
if idleTimeout > 0 {
|
||||||
|
idleTimer = time.NewTimer(idleTimeout)
|
||||||
|
idleTimerC = idleTimer.C
|
||||||
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case message := <-eventHandler.MessagesCh:
|
case message := <-eventHandler.MessagesCh:
|
||||||
|
if nil != idleTimer {
|
||||||
|
idleTimer.Stop()
|
||||||
|
}
|
||||||
// Message can be nil if its a keep-alive message
|
// Message can be nil if its a keep-alive message
|
||||||
if len(message.Data) != 0 {
|
if len(message.Data) != 0 {
|
||||||
log.WithFields(log.Fields{"msg": string(message.Data)}).Debug("We are going to send the following message to be processed.")
|
log.WithFields(log.Fields{"msg": string(message.Data)}).Debug("We are going to send the following message to be processed.")
|
||||||
@ -64,6 +77,9 @@ func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMe
|
|||||||
}
|
}
|
||||||
|
|
||||||
case headErr := <-eventHandler.ErrorCh:
|
case headErr := <-eventHandler.ErrorCh:
|
||||||
|
if nil != idleTimer {
|
||||||
|
idleTimer.Stop()
|
||||||
|
}
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"endpoint": eventHandler.Endpoint,
|
"endpoint": eventHandler.Endpoint,
|
||||||
"err": headErr.err,
|
"err": headErr.err,
|
||||||
@ -71,6 +87,21 @@ func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMe
|
|||||||
},
|
},
|
||||||
).Error("Unable to handle event.")
|
).Error("Unable to handle event.")
|
||||||
errMetricInc(1)
|
errMetricInc(1)
|
||||||
|
|
||||||
|
case <-idleTimerC:
|
||||||
|
err := errors.New("SSE idle timeout")
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"endpoint": eventHandler.Endpoint,
|
||||||
|
"err": err,
|
||||||
|
"msg": err.Error(),
|
||||||
|
},
|
||||||
|
).Error("TIMEOUT - Attempting to resubscribe")
|
||||||
|
errMetricInc(1)
|
||||||
|
eventHandler.Disconnect()
|
||||||
|
err = eventHandler.Connect()
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Unable to re-subscribe.", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -93,6 +124,6 @@ func processMsg[P ProcessedEvents](msg []byte, processCh chan<- *P, errorCh chan
|
|||||||
// Capture all of the event topics.
|
// Capture all of the event topics.
|
||||||
func (bc *BeaconClient) captureEventTopic() {
|
func (bc *BeaconClient) captureEventTopic() {
|
||||||
log.Info("We are capturing all SSE events")
|
log.Info("We are capturing all SSE events")
|
||||||
go handleIncomingSseEvent(bc.HeadTracking, bc.Metrics.IncrementHeadError)
|
go handleIncomingSseEvent(bc.HeadTracking, bc.Metrics.IncrementHeadError, time.Second*30)
|
||||||
go handleIncomingSseEvent(bc.ReOrgTracking, bc.Metrics.IncrementReorgError)
|
go handleIncomingSseEvent(bc.ReOrgTracking, bc.Metrics.IncrementReorgError, 0)
|
||||||
}
|
}
|
||||||
|
@ -53,34 +53,45 @@ type ChainReorg struct {
|
|||||||
|
|
||||||
// A struct to capture whats being written to the eth-beacon.slots table.
|
// A struct to capture whats being written to the eth-beacon.slots table.
|
||||||
type DbSlots struct {
|
type DbSlots struct {
|
||||||
Epoch string // The epoch.
|
Epoch uint64 // The epoch.
|
||||||
Slot string // The slot.
|
Slot uint64 // The slot.
|
||||||
BlockRoot string // The block root
|
BlockRoot string // The block root
|
||||||
StateRoot string // The state root
|
StateRoot string // The state root
|
||||||
Status string // The status, it can be proposed | forked | skipped.
|
Status string // The status, it can be proposed | forked | skipped.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A struct to handle the details of an embedded Eth1-block (ie, the ExecutionPayload)
|
||||||
|
type DbExecutionPayloadHeader struct {
|
||||||
|
BlockNumber uint64
|
||||||
|
Timestamp uint64
|
||||||
|
BlockHash string
|
||||||
|
ParentHash string
|
||||||
|
StateRoot string
|
||||||
|
ReceiptsRoot string
|
||||||
|
TransactionsRoot string
|
||||||
|
}
|
||||||
|
|
||||||
// A struct to capture whats being written to eth-beacon.signed_block table.
|
// A struct to capture whats being written to eth-beacon.signed_block table.
|
||||||
type DbSignedBeaconBlock struct {
|
type DbSignedBeaconBlock struct {
|
||||||
Slot string // The slot.
|
Slot uint64 // The slot.
|
||||||
BlockRoot string // The block root
|
BlockRoot string // The block root
|
||||||
ParentBlock string // The parent block root.
|
ParentBlock string // The parent block root.
|
||||||
Eth1BlockHash string // The eth1 block_hash
|
Eth1DataBlockHash string // The eth1 block_hash
|
||||||
MhKey string // The ipld multihash key.
|
MhKey string // The ipld multihash key.
|
||||||
|
ExecutionPayloadHeader *DbExecutionPayloadHeader // The ExecutionPayloadHeader (after Bellatrix only).
|
||||||
}
|
}
|
||||||
|
|
||||||
// A struct to capture whats being written to eth-beacon.state table.
|
// A struct to capture whats being written to eth-beacon.state table.
|
||||||
type DbBeaconState struct {
|
type DbBeaconState struct {
|
||||||
Slot string // The slot.
|
Slot uint64 // The slot.
|
||||||
StateRoot string // The state root
|
StateRoot string // The state root
|
||||||
MhKey string // The ipld multihash key.
|
MhKey string // The ipld multihash key.
|
||||||
}
|
}
|
||||||
|
|
||||||
// A structure to capture whats being written to the eth-beacon.known_gaps table.
|
// A structure to capture whats being written to the eth-beacon.known_gaps table.
|
||||||
type DbKnownGaps struct {
|
type DbKnownGaps struct {
|
||||||
StartSlot string // The start slot for known_gaps, inclusive.
|
StartSlot uint64 // The start slot for known_gaps, inclusive.
|
||||||
EndSlot string // The end slot for known_gaps, inclusive.
|
EndSlot uint64 // The end slot for known_gaps, inclusive.
|
||||||
CheckedOut bool // Indicates if any process is currently processing this entry.
|
CheckedOut bool // Indicates if any process is currently processing this entry.
|
||||||
ReprocessingError string // The error that occurred when attempting to reprocess these entries.
|
ReprocessingError string // The error that occurred when attempting to reprocess these entries.
|
||||||
EntryError string // The error that caused this entry to be added to the table. Could be null.
|
EntryError string // The error that caused this entry to be added to the table. Could be null.
|
||||||
|
@ -20,9 +20,8 @@ package beaconclient
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This function will perform the necessary steps to handle a reorg.
|
// This function will perform the necessary steps to handle a reorg.
|
||||||
@ -31,7 +30,11 @@ func (bc *BeaconClient) handleReorg() {
|
|||||||
for {
|
for {
|
||||||
reorg := <-bc.ReOrgTracking.ProcessCh
|
reorg := <-bc.ReOrgTracking.ProcessCh
|
||||||
log.WithFields(log.Fields{"reorg": reorg}).Debug("Received a new reorg message.")
|
log.WithFields(log.Fields{"reorg": reorg}).Debug("Received a new reorg message.")
|
||||||
writeReorgs(bc.Db, reorg.Slot, reorg.NewHeadBlock, bc.Metrics)
|
slot, err := ParseSlot(reorg.Slot)
|
||||||
|
if nil != err {
|
||||||
|
loghelper.LogSlotError(slot.Number(), err)
|
||||||
|
}
|
||||||
|
writeReorgs(bc.Db, slot, reorg.NewHeadBlock, bc.Metrics)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,7 +45,7 @@ func (bc *BeaconClient) handleHead() {
|
|||||||
for {
|
for {
|
||||||
head := <-bc.HeadTracking.ProcessCh
|
head := <-bc.HeadTracking.ProcessCh
|
||||||
// Process all the work here.
|
// Process all the work here.
|
||||||
slot, err := strconv.Atoi(head.Slot)
|
slot, err := ParseSlot(head.Slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
bc.HeadTracking.ErrorCh <- &SseError{
|
bc.HeadTracking.ErrorCh <- &SseError{
|
||||||
err: fmt.Errorf("Unable to turn the slot from string to int: %s", head.Slot),
|
err: fmt.Errorf("Unable to turn the slot from string to int: %s", head.Slot),
|
||||||
@ -66,7 +69,7 @@ func (bc *BeaconClient) handleHead() {
|
|||||||
bc.StartingSlot = slot
|
bc.StartingSlot = slot
|
||||||
}
|
}
|
||||||
|
|
||||||
go processHeadSlot(bc.Db, bc.ServerEndpoint, slot, head.Block, head.State, bc.PreviousSlot, bc.PreviousBlockRoot, bc.Metrics, bc.KnownGapTableIncrement, bc.CheckDb)
|
go processHeadSlot(slot, head.Block, head.State, bc.SlotProcessingDetails())
|
||||||
|
|
||||||
log.WithFields(log.Fields{"head": head.Slot}).Debug("We finished calling processHeadSlot.")
|
log.WithFields(log.Fields{"head": head.Slot}).Debug("We finished calling processHeadSlot.")
|
||||||
|
|
||||||
|
@ -33,11 +33,11 @@ import (
|
|||||||
var (
|
var (
|
||||||
// Get a single highest priority and non-checked out row row from eth_beacon.historical_process
|
// Get a single highest priority and non-checked out row row from eth_beacon.historical_process
|
||||||
getHpEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.historic_process
|
getHpEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.historic_process
|
||||||
WHERE checked_out=false
|
WHERE checked_out=false AND end_slot >= $1
|
||||||
ORDER BY priority ASC
|
ORDER BY priority ASC
|
||||||
LIMIT 1;`
|
LIMIT 1;`
|
||||||
// Used to periodically check to see if there is a new entry in the eth_beacon.historic_process table.
|
// Used to periodically check to see if there is a new entry in the eth_beacon.historic_process table.
|
||||||
checkHpEntryStmt string = `SELECT * FROM eth_beacon.historic_process WHERE checked_out=false;`
|
checkHpEntryStmt string = `SELECT * FROM eth_beacon.historic_process WHERE checked_out=false AND end_slot >= $1;`
|
||||||
// Used to checkout a row from the eth_beacon.historic_process table
|
// Used to checkout a row from the eth_beacon.historic_process table
|
||||||
lockHpEntryStmt string = `UPDATE eth_beacon.historic_process
|
lockHpEntryStmt string = `UPDATE eth_beacon.historic_process
|
||||||
SET checked_out=true, checked_out_by=$3
|
SET checked_out=true, checked_out_by=$3
|
||||||
@ -58,8 +58,8 @@ type HistoricProcessing struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get a single row of historical slots from the table.
|
// Get a single row of historical slots from the table.
|
||||||
func (hp HistoricProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess) []error {
|
func (hp HistoricProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess, minimumSlot Slot) []error {
|
||||||
return getBatchProcessRow(ctx, hp.db, getHpEntryStmt, checkHpEntryStmt, lockHpEntryStmt, slotCh, strconv.Itoa(hp.uniqueNodeIdentifier))
|
return getBatchProcessRow(ctx, hp.db, getHpEntryStmt, checkHpEntryStmt, lockHpEntryStmt, slotCh, strconv.Itoa(hp.uniqueNodeIdentifier), minimumSlot)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the table entry.
|
// Remove the table entry.
|
||||||
@ -74,7 +74,7 @@ func (hp HistoricProcessing) handleProcessingErrors(ctx context.Context, errMess
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case errMs := <-errMessages:
|
case errMs := <-errMessages:
|
||||||
loghelper.LogSlotError(strconv.Itoa(errMs.slot), errMs.err)
|
loghelper.LogSlotError(errMs.slot.Number(), errMs.err)
|
||||||
writeKnownGaps(hp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, hp.metrics)
|
writeKnownGaps(hp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, hp.metrics)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -97,14 +97,14 @@ func (hp HistoricProcessing) releaseDbLocks() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Process the slot range.
|
// Process the slot range.
|
||||||
func processSlotRangeWorker(ctx context.Context, workCh <-chan int, errCh chan<- batchHistoricError, db sql.Database, serverAddress string, metrics *BeaconClientMetrics, checkDb bool, incrementTracker func(uint64)) {
|
func processSlotRangeWorker(ctx context.Context, workCh <-chan Slot, errCh chan<- batchHistoricError, spd SlotProcessingDetails, incrementTracker func(uint64)) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case slot := <-workCh:
|
case slot := <-workCh:
|
||||||
log.Debug("Handling slot: ", slot)
|
log.Debug("Handling slot: ", slot)
|
||||||
err, errProcess := handleHistoricSlot(ctx, db, serverAddress, slot, metrics, checkDb)
|
err, errProcess := handleHistoricSlot(ctx, slot, spd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMs := batchHistoricError{
|
errMs := batchHistoricError{
|
||||||
err: err,
|
err: err,
|
||||||
@ -123,7 +123,7 @@ func processSlotRangeWorker(ctx context.Context, workCh <-chan int, errCh chan<-
|
|||||||
// It also locks the row by updating the checked_out column.
|
// It also locks the row by updating the checked_out column.
|
||||||
// The statement for getting the start_slot and end_slot must be provided.
|
// The statement for getting the start_slot and end_slot must be provided.
|
||||||
// The statement for "locking" the row must also be provided.
|
// The statement for "locking" the row must also be provided.
|
||||||
func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStmt string, checkNewRowsStmt string, checkOutRowStmt string, slotCh chan<- slotsToProcess, uniqueNodeIdentifier string) []error {
|
func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStmt string, checkNewRowsStmt string, checkOutRowStmt string, slotCh chan<- slotsToProcess, uniqueNodeIdentifier string, minimumSlot Slot) []error {
|
||||||
errCount := make([]error, 0)
|
errCount := make([]error, 0)
|
||||||
|
|
||||||
// 5 is an arbitrary number. It allows us to retry a few times before
|
// 5 is an arbitrary number. It allows us to retry a few times before
|
||||||
@ -139,7 +139,7 @@ func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStm
|
|||||||
"errCount": errCount,
|
"errCount": errCount,
|
||||||
}).Error("New error entry added")
|
}).Error("New error entry added")
|
||||||
}
|
}
|
||||||
processRow, err := db.Exec(context.Background(), checkNewRowsStmt)
|
processRow, err := db.Exec(context.Background(), checkNewRowsStmt, minimumSlot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCount = append(errCount, err)
|
errCount = append(errCount, err)
|
||||||
}
|
}
|
||||||
@ -172,13 +172,13 @@ func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStm
|
|||||||
|
|
||||||
// Query the DB for slots.
|
// Query the DB for slots.
|
||||||
sp := slotsToProcess{}
|
sp := slotsToProcess{}
|
||||||
err = tx.QueryRow(dbCtx, getStartEndSlotStmt).Scan(&sp.startSlot, &sp.endSlot)
|
err = tx.QueryRow(dbCtx, getStartEndSlotStmt, minimumSlot).Scan(&sp.startSlot, &sp.endSlot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == pgx.ErrNoRows {
|
if err == pgx.ErrNoRows {
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), getStartEndSlotStmt, err).Error("Unable to get a row")
|
loghelper.LogSlotRangeStatementError(sp.startSlot.Number(), sp.endSlot.Number(), getStartEndSlotStmt, err).Error("Unable to get a row")
|
||||||
errCount = append(errCount, err)
|
errCount = append(errCount, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -186,25 +186,25 @@ func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStm
|
|||||||
// Checkout the Row
|
// Checkout the Row
|
||||||
res, err := tx.Exec(dbCtx, checkOutRowStmt, sp.startSlot, sp.endSlot, uniqueNodeIdentifier)
|
res, err := tx.Exec(dbCtx, checkOutRowStmt, sp.startSlot, sp.endSlot, uniqueNodeIdentifier)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).Error("Unable to checkout the row")
|
loghelper.LogSlotRangeStatementError(sp.startSlot.Number(), sp.endSlot.Number(), checkOutRowStmt, err).Error("Unable to checkout the row")
|
||||||
errCount = append(errCount, err)
|
errCount = append(errCount, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
rows, err := res.RowsAffected()
|
rows, err := res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, fmt.Errorf("Unable to determine the rows affected when trying to checkout a row."))
|
loghelper.LogSlotRangeStatementError(sp.startSlot.Number(), sp.endSlot.Number(), checkOutRowStmt, fmt.Errorf("Unable to determine the rows affected when trying to checkout a row."))
|
||||||
errCount = append(errCount, err)
|
errCount = append(errCount, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if rows > 1 {
|
if rows > 1 {
|
||||||
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).WithFields(log.Fields{
|
loghelper.LogSlotRangeStatementError(sp.startSlot.Number(), sp.endSlot.Number(), checkOutRowStmt, err).WithFields(log.Fields{
|
||||||
"rowsReturn": rows,
|
"rowsReturn": rows,
|
||||||
}).Error("We locked too many rows.....")
|
}).Error("We locked too many rows.....")
|
||||||
errCount = append(errCount, err)
|
errCount = append(errCount, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if rows == 0 {
|
if rows == 0 {
|
||||||
loghelper.LogSlotRangeStatementError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), checkOutRowStmt, err).WithFields(log.Fields{
|
loghelper.LogSlotRangeStatementError(sp.startSlot.Number(), sp.endSlot.Number(), checkOutRowStmt, err).WithFields(log.Fields{
|
||||||
"rowsReturn": rows,
|
"rowsReturn": rows,
|
||||||
}).Error("We did not lock a single row.")
|
}).Error("We did not lock a single row.")
|
||||||
errCount = append(errCount, err)
|
errCount = append(errCount, err)
|
||||||
@ -212,7 +212,7 @@ func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStm
|
|||||||
}
|
}
|
||||||
err = tx.Commit(dbCtx)
|
err = tx.Commit(dbCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotRangeError(strconv.Itoa(sp.startSlot), strconv.Itoa(sp.endSlot), err).Error("Unable commit transactions.")
|
loghelper.LogSlotRangeError(sp.startSlot.Number(), sp.endSlot.Number(), err).Error("Unable commit transactions.")
|
||||||
errCount = append(errCount, err)
|
errCount = append(errCount, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -241,11 +241,11 @@ func removeRowPostProcess(ctx context.Context, db sql.Database, processCh <-chan
|
|||||||
"endSlot": slots.endSlot,
|
"endSlot": slots.endSlot,
|
||||||
}).Debug("Starting to check to see if the following slots have been processed")
|
}).Debug("Starting to check to see if the following slots have been processed")
|
||||||
for {
|
for {
|
||||||
isStartProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.startSlot))
|
isStartProcess, err := isSlotProcessed(db, checkProcessedStmt, slots.startSlot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- err
|
errCh <- err
|
||||||
}
|
}
|
||||||
isEndProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.endSlot))
|
isEndProcess, err := isSlotProcessed(db, checkProcessedStmt, slots.endSlot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- err
|
errCh <- err
|
||||||
}
|
}
|
||||||
@ -255,7 +255,7 @@ func removeRowPostProcess(ctx context.Context, db sql.Database, processCh <-chan
|
|||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := db.Exec(context.Background(), removeStmt, strconv.Itoa(slots.startSlot), strconv.Itoa(slots.endSlot))
|
_, err := db.Exec(context.Background(), removeStmt, slots.startSlot.Number(), slots.endSlot.Number())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- err
|
errCh <- err
|
||||||
}
|
}
|
||||||
|
@ -30,11 +30,11 @@ import (
|
|||||||
var (
|
var (
|
||||||
// Get a single non-checked out row row from eth_beacon.known_gaps.
|
// Get a single non-checked out row row from eth_beacon.known_gaps.
|
||||||
getKgEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.known_gaps
|
getKgEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.known_gaps
|
||||||
WHERE checked_out=false
|
WHERE checked_out=false AND end_slot >= $1
|
||||||
ORDER BY priority ASC
|
ORDER BY priority ASC
|
||||||
LIMIT 1;`
|
LIMIT 1;`
|
||||||
// Used to periodically check to see if there is a new entry in the eth_beacon.known_gaps table.
|
// Used to periodically check to see if there is a new entry in the eth_beacon.known_gaps table.
|
||||||
checkKgEntryStmt string = `SELECT * FROM eth_beacon.known_gaps WHERE checked_out=false;`
|
checkKgEntryStmt string = `SELECT * FROM eth_beacon.known_gaps WHERE checked_out=false AND end_slot >= $1;`
|
||||||
// Used to checkout a row from the eth_beacon.known_gaps table
|
// Used to checkout a row from the eth_beacon.known_gaps table
|
||||||
lockKgEntryStmt string = `UPDATE eth_beacon.known_gaps
|
lockKgEntryStmt string = `UPDATE eth_beacon.known_gaps
|
||||||
SET checked_out=true, checked_out_by=$3
|
SET checked_out=true, checked_out_by=$3
|
||||||
@ -58,10 +58,10 @@ type KnownGapsProcessing struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This function will perform all the heavy lifting for tracking the head of the chain.
|
// This function will perform all the heavy lifting for tracking the head of the chain.
|
||||||
func (bc *BeaconClient) ProcessKnownGaps(ctx context.Context, maxWorkers int) []error {
|
func (bc *BeaconClient) ProcessKnownGaps(ctx context.Context, maxWorkers int, minimumSlot Slot) []error {
|
||||||
log.Info("We are starting the known gaps processing service.")
|
log.Info("We are starting the known gaps processing service.")
|
||||||
bc.KnownGapsProcess = KnownGapsProcessing{db: bc.Db, uniqueNodeIdentifier: bc.UniqueNodeIdentifier, metrics: bc.Metrics}
|
bc.KnownGapsProcess = KnownGapsProcessing{db: bc.Db, uniqueNodeIdentifier: bc.UniqueNodeIdentifier, metrics: bc.Metrics}
|
||||||
errs := handleBatchProcess(ctx, maxWorkers, bc.KnownGapsProcess, bc.KnownGapsProcess.db, bc.ServerEndpoint, bc.Metrics, bc.CheckDb, bc.Metrics.IncrementKnownGapsProcessed)
|
errs := handleBatchProcess(ctx, maxWorkers, bc.KnownGapsProcess, bc.SlotProcessingDetails(), bc.Metrics.IncrementKnownGapsProcessed, minimumSlot)
|
||||||
log.Debug("Exiting known gaps processing service")
|
log.Debug("Exiting known gaps processing service")
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
@ -78,8 +78,8 @@ func (bc *BeaconClient) StopKnownGapsProcessing(cancel context.CancelFunc) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get a single row of historical slots from the table.
|
// Get a single row of historical slots from the table.
|
||||||
func (kgp KnownGapsProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess) []error {
|
func (kgp KnownGapsProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess, minimumSlot Slot) []error {
|
||||||
return getBatchProcessRow(ctx, kgp.db, getKgEntryStmt, checkKgEntryStmt, lockKgEntryStmt, slotCh, strconv.Itoa(kgp.uniqueNodeIdentifier))
|
return getBatchProcessRow(ctx, kgp.db, getKgEntryStmt, checkKgEntryStmt, lockKgEntryStmt, slotCh, strconv.Itoa(kgp.uniqueNodeIdentifier), minimumSlot)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the table entry.
|
// Remove the table entry.
|
||||||
@ -97,21 +97,21 @@ func (kgp KnownGapsProcessing) handleProcessingErrors(ctx context.Context, errMe
|
|||||||
// Check to see if this if this entry already exists.
|
// Check to see if this if this entry already exists.
|
||||||
res, err := kgp.db.Exec(context.Background(), checkKgSingleSlotStmt, errMs.slot, errMs.slot)
|
res, err := kgp.db.Exec(context.Background(), checkKgSingleSlotStmt, errMs.slot, errMs.slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).Error("Unable to see if this slot is in the eth_beacon.known_gaps table")
|
loghelper.LogSlotError(errMs.slot.Number(), err).Error("Unable to see if this slot is in the eth_beacon.known_gaps table")
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err := res.RowsAffected()
|
rows, err := res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).WithFields(log.Fields{
|
loghelper.LogSlotError(errMs.slot.Number(), err).WithFields(log.Fields{
|
||||||
"queryStatement": checkKgSingleSlotStmt,
|
"queryStatement": checkKgSingleSlotStmt,
|
||||||
}).Error("Unable to get the number of rows affected by this statement.")
|
}).Error("Unable to get the number of rows affected by this statement.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if rows > 0 {
|
if rows > 0 {
|
||||||
loghelper.LogSlotError(strconv.Itoa(errMs.slot), errMs.err).Error("We received an error when processing a knownGap")
|
loghelper.LogSlotError(errMs.slot.Number(), errMs.err).Error("We received an error when processing a knownGap")
|
||||||
err = updateKnownGapErrors(kgp.db, errMs.slot, errMs.slot, errMs.err, kgp.metrics)
|
err = updateKnownGapErrors(kgp.db, errMs.slot, errMs.slot, errMs.err, kgp.metrics)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(strconv.Itoa(errMs.slot), err).Error("Error processing known gap")
|
loghelper.LogSlotError(errMs.slot.Number(), err).Error("Error processing known gap")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
writeKnownGaps(kgp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, kgp.metrics)
|
writeKnownGaps(kgp.db, 1, errMs.slot, errMs.slot, errMs.err, errMs.errProcess, kgp.metrics)
|
||||||
|
@ -23,36 +23,55 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jackc/pgx/v4"
|
"github.com/jackc/pgx/v4"
|
||||||
si "github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
|
||||||
dt "github.com/prysmaticlabs/prysm/encoding/ssz/detect"
|
|
||||||
|
|
||||||
// The below is temporary, once https://github.com/prysmaticlabs/prysm/issues/10006 has been resolved we wont need it.
|
|
||||||
// pb "github.com/prysmaticlabs/prysm/proto/prysm/v2"
|
|
||||||
|
|
||||||
state "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
type SlotProcessingDetails struct {
|
||||||
ParentRootUnmarshalError = "Unable to properly unmarshal the ParentRoot field in the SignedBeaconBlock."
|
Context context.Context // A context generic context with multiple uses.
|
||||||
MissingEth1Data = "Can't get the Eth1 block_hash"
|
ServerEndpoint string // What is the endpoint of the beacon server.
|
||||||
VersionedUnmarshalerError = "Unable to create a versioned unmarshaler"
|
Db sql.Database // Database object used for reads and writes.
|
||||||
)
|
Metrics *BeaconClientMetrics // An object used to keep track of certain BeaconClient Metrics.
|
||||||
|
KnownGapTableIncrement int // The max number of slots within a single known_gaps table entry.
|
||||||
|
CheckDb bool // Should we check the DB to see if the slot exists before processing it?
|
||||||
|
PerformBeaconStateProcessing bool // Should we process BeaconStates?
|
||||||
|
PerformBeaconBlockProcessing bool // Should we process BeaconBlocks?
|
||||||
|
|
||||||
|
StartingSlot Slot // If we're performing head tracking. What is the first slot we processed.
|
||||||
|
PreviousSlot Slot // Whats the previous slot we processed
|
||||||
|
PreviousBlockRoot string // Whats the previous block root, used to check the next blocks parent.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bc *BeaconClient) SlotProcessingDetails() SlotProcessingDetails {
|
||||||
|
return SlotProcessingDetails{
|
||||||
|
Context: bc.Context,
|
||||||
|
ServerEndpoint: bc.ServerEndpoint,
|
||||||
|
Db: bc.Db,
|
||||||
|
Metrics: bc.Metrics,
|
||||||
|
|
||||||
|
CheckDb: bc.CheckDb,
|
||||||
|
PerformBeaconBlockProcessing: bc.PerformBeaconBlockProcessing,
|
||||||
|
PerformBeaconStateProcessing: bc.PerformBeaconStateProcessing,
|
||||||
|
|
||||||
|
KnownGapTableIncrement: bc.KnownGapTableIncrement,
|
||||||
|
StartingSlot: bc.StartingSlot,
|
||||||
|
PreviousSlot: bc.PreviousSlot,
|
||||||
|
PreviousBlockRoot: bc.PreviousBlockRoot,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type ProcessSlot struct {
|
type ProcessSlot struct {
|
||||||
// Generic
|
// Generic
|
||||||
|
|
||||||
Slot int // The slot number.
|
Slot Slot // The slot number.
|
||||||
Epoch int // The epoch number.
|
Epoch Epoch // The epoch number.
|
||||||
BlockRoot string // The hex encoded string of the BlockRoot.
|
BlockRoot string // The hex encoded string of the BlockRoot.
|
||||||
StateRoot string // The hex encoded string of the StateRoot.
|
StateRoot string // The hex encoded string of the StateRoot.
|
||||||
ParentBlockRoot string // The hex encoded string of the parent block.
|
ParentBlockRoot string // The hex encoded string of the parent block.
|
||||||
@ -64,10 +83,10 @@ type ProcessSlot struct {
|
|||||||
// BeaconBlock
|
// BeaconBlock
|
||||||
|
|
||||||
SszSignedBeaconBlock []byte // The entire SSZ encoded SignedBeaconBlock
|
SszSignedBeaconBlock []byte // The entire SSZ encoded SignedBeaconBlock
|
||||||
FullSignedBeaconBlock si.SignedBeaconBlock // The unmarshaled BeaconState object, the unmarshalling could have errors.
|
FullSignedBeaconBlock *SignedBeaconBlock // The unmarshaled BeaconState object, the unmarshalling could have errors.
|
||||||
|
|
||||||
// BeaconState
|
// BeaconState
|
||||||
FullBeaconState state.BeaconState // The unmarshaled BeaconState object, the unmarshalling could have errors.
|
FullBeaconState *BeaconState // The unmarshaled BeaconState object, the unmarshalling could have errors.
|
||||||
SszBeaconState []byte // The entire SSZ encoded BeaconState
|
SszBeaconState []byte // The entire SSZ encoded BeaconState
|
||||||
|
|
||||||
// DB Write objects
|
// DB Write objects
|
||||||
@ -92,7 +111,16 @@ type PerformanceMetrics struct {
|
|||||||
// This function will do all the work to process the slot and write it to the DB.
|
// This function will do all the work to process the slot and write it to the DB.
|
||||||
// It will return the error and error process. The error process is used for providing reach detail to the
|
// It will return the error and error process. The error process is used for providing reach detail to the
|
||||||
// known_gaps table.
|
// known_gaps table.
|
||||||
func processFullSlot(ctx context.Context, db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, headOrHistoric string, metrics *BeaconClientMetrics, knownGapsTableIncrement int, checkDb bool) (error, string) {
|
func processFullSlot(
|
||||||
|
ctx context.Context,
|
||||||
|
slot Slot,
|
||||||
|
blockRoot string,
|
||||||
|
stateRoot string,
|
||||||
|
previousSlot Slot,
|
||||||
|
previousBlockRoot string,
|
||||||
|
knownGapsTableIncrement int,
|
||||||
|
headOrHistoric string,
|
||||||
|
spd *SlotProcessingDetails) (error, string) {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, ""
|
return nil, ""
|
||||||
@ -103,8 +131,8 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string,
|
|||||||
BlockRoot: blockRoot,
|
BlockRoot: blockRoot,
|
||||||
StateRoot: stateRoot,
|
StateRoot: stateRoot,
|
||||||
HeadOrHistoric: headOrHistoric,
|
HeadOrHistoric: headOrHistoric,
|
||||||
Db: db,
|
Db: spd.Db,
|
||||||
Metrics: metrics,
|
Metrics: spd.Metrics,
|
||||||
PerformanceMetrics: PerformanceMetrics{
|
PerformanceMetrics: PerformanceMetrics{
|
||||||
BeaconNodeBlockRetrievalTime: 0,
|
BeaconNodeBlockRetrievalTime: 0,
|
||||||
BeaconNodeStateRetrievalTime: 0,
|
BeaconNodeStateRetrievalTime: 0,
|
||||||
@ -120,8 +148,8 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
g, _ := errgroup.WithContext(context.Background())
|
g, _ := errgroup.WithContext(context.Background())
|
||||||
vUnmarshalerCh := make(chan *dt.VersionedUnmarshaler, 1)
|
|
||||||
|
|
||||||
|
if spd.PerformBeaconStateProcessing {
|
||||||
// Get the BeaconState.
|
// Get the BeaconState.
|
||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
select {
|
select {
|
||||||
@ -129,7 +157,7 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string,
|
|||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
err := ps.getBeaconState(serverAddress, vUnmarshalerCh)
|
err := ps.getBeaconState(spd.ServerEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -137,7 +165,9 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if spd.PerformBeaconBlockProcessing {
|
||||||
// Get the SignedBeaconBlock.
|
// Get the SignedBeaconBlock.
|
||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
select {
|
select {
|
||||||
@ -145,7 +175,7 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string,
|
|||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
err := ps.getSignedBeaconBlock(serverAddress, vUnmarshalerCh)
|
err := ps.getSignedBeaconBlock(spd.ServerEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -153,25 +183,40 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
}
|
||||||
|
|
||||||
if err := g.Wait(); err != nil {
|
if err := g.Wait(); err != nil {
|
||||||
return err, "processSlot"
|
return err, "processSlot"
|
||||||
}
|
}
|
||||||
|
|
||||||
parseBeaconTime := time.Now()
|
parseBeaconTime := time.Now()
|
||||||
finalBlockRoot, finalStateRoot, finalEth1BlockHash, err := ps.provideFinalHash()
|
finalBlockRoot, finalStateRoot, _, err := ps.provideFinalHash()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, "CalculateBlockRoot"
|
return err, "CalculateBlockRoot"
|
||||||
}
|
}
|
||||||
ps.PerformanceMetrics.ParseBeaconObjectForHash = time.Since(parseBeaconTime)
|
ps.PerformanceMetrics.ParseBeaconObjectForHash = time.Since(parseBeaconTime)
|
||||||
|
|
||||||
if checkDb {
|
if spd.CheckDb {
|
||||||
checkDbTime := time.Now()
|
checkDbTime := time.Now()
|
||||||
inDb, err := IsSlotInDb(ctx, ps.Db, strconv.Itoa(ps.Slot), finalBlockRoot, finalStateRoot)
|
var blockRequired bool
|
||||||
|
if spd.PerformBeaconBlockProcessing {
|
||||||
|
blockExists, err := checkSlotAndRoot(ps.Db, CheckSignedBeaconBlockStmt, ps.Slot, finalBlockRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, "checkDb"
|
return err, "checkDb"
|
||||||
}
|
}
|
||||||
if inDb {
|
blockRequired = !blockExists
|
||||||
|
}
|
||||||
|
|
||||||
|
var stateRequired bool
|
||||||
|
if spd.PerformBeaconStateProcessing {
|
||||||
|
stateExists, err := checkSlotAndRoot(ps.Db, CheckBeaconStateStmt, ps.Slot, finalStateRoot)
|
||||||
|
if err != nil {
|
||||||
|
return err, "checkDb"
|
||||||
|
}
|
||||||
|
stateRequired = !stateExists
|
||||||
|
}
|
||||||
|
|
||||||
|
if !blockRequired && !stateRequired {
|
||||||
log.WithField("slot", slot).Info("Slot already in the DB.")
|
log.WithField("slot", slot).Info("Slot already in the DB.")
|
||||||
return nil, ""
|
return nil, ""
|
||||||
}
|
}
|
||||||
@ -180,7 +225,7 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string,
|
|||||||
|
|
||||||
// Get this object ready to write
|
// Get this object ready to write
|
||||||
createDbWriteTime := time.Now()
|
createDbWriteTime := time.Now()
|
||||||
dw, err := ps.createWriteObjects(finalBlockRoot, finalStateRoot, finalEth1BlockHash)
|
dw, err := ps.createWriteObjects()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, "blockRoot"
|
return err, "blockRoot"
|
||||||
}
|
}
|
||||||
@ -206,7 +251,7 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string,
|
|||||||
reorgTime := time.Now()
|
reorgTime := time.Now()
|
||||||
headOrHistoric = strings.ToLower(headOrHistoric)
|
headOrHistoric = strings.ToLower(headOrHistoric)
|
||||||
if headOrHistoric != "head" && headOrHistoric != "historic" {
|
if headOrHistoric != "head" && headOrHistoric != "historic" {
|
||||||
return fmt.Errorf("headOrHistoric must be either historic or head!"), ""
|
return fmt.Errorf("headOrHistoric must be either historic or head"), ""
|
||||||
}
|
}
|
||||||
if ps.HeadOrHistoric == "head" && previousSlot != 0 && previousBlockRoot != "" && ps.Status != "skipped" {
|
if ps.HeadOrHistoric == "head" && previousSlot != 0 && previousBlockRoot != "" && ps.Status != "skipped" {
|
||||||
ps.checkPreviousSlot(dw.Tx, dw.Ctx, previousSlot, previousBlockRoot, knownGapsTableIncrement)
|
ps.checkPreviousSlot(dw.Tx, dw.Ctx, previousSlot, previousBlockRoot, knownGapsTableIncrement)
|
||||||
@ -234,97 +279,111 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Handle a slot that is at head. A wrapper function for calling `handleFullSlot`.
|
// Handle a slot that is at head. A wrapper function for calling `handleFullSlot`.
|
||||||
func processHeadSlot(db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, metrics *BeaconClientMetrics, knownGapsTableIncrement int, checkDb bool) {
|
func processHeadSlot(slot Slot, blockRoot string, stateRoot string, spd SlotProcessingDetails) {
|
||||||
// Get the knownGaps at startUp.
|
// Get the knownGaps at startUp
|
||||||
if previousSlot == 0 && previousBlockRoot == "" {
|
if spd.PreviousSlot == 0 && spd.PreviousBlockRoot == "" {
|
||||||
writeStartUpGaps(db, knownGapsTableIncrement, slot, metrics)
|
writeStartUpGaps(spd.Db, spd.KnownGapTableIncrement, slot, spd.Metrics)
|
||||||
}
|
}
|
||||||
err, errReason := processFullSlot(context.Background(), db, serverAddress, slot, blockRoot, stateRoot, previousSlot, previousBlockRoot, "head", metrics, knownGapsTableIncrement, checkDb)
|
// TODO(telackey): Why context.Background()?
|
||||||
|
err, errReason := processFullSlot(context.Background(), slot, blockRoot, stateRoot,
|
||||||
|
spd.PreviousSlot, spd.PreviousBlockRoot, spd.KnownGapTableIncrement, "head", &spd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeKnownGaps(db, knownGapsTableIncrement, slot, slot, err, errReason, metrics)
|
writeKnownGaps(spd.Db, spd.KnownGapTableIncrement, slot, slot, err, errReason, spd.Metrics)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle a historic slot. A wrapper function for calling `handleFullSlot`.
|
// Handle a historic slot. A wrapper function for calling `handleFullSlot`.
|
||||||
func handleHistoricSlot(ctx context.Context, db sql.Database, serverAddress string, slot int, metrics *BeaconClientMetrics, checkDb bool) (error, string) {
|
func handleHistoricSlot(ctx context.Context, slot Slot, spd SlotProcessingDetails) (error, string) {
|
||||||
return processFullSlot(ctx, db, serverAddress, slot, "", "", 0, "", "historic", metrics, 1, checkDb)
|
return processFullSlot(ctx, slot, "", "", 0, "",
|
||||||
|
1, "historic", &spd)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the SszSignedBeaconBlock and FullSignedBeaconBlock object with their respective values.
|
// Update the SszSignedBeaconBlock and FullSignedBeaconBlock object with their respective values.
|
||||||
func (ps *ProcessSlot) getSignedBeaconBlock(serverAddress string, vmCh <-chan *dt.VersionedUnmarshaler) error {
|
func (ps *ProcessSlot) getSignedBeaconBlock(serverAddress string) error {
|
||||||
var blockIdentifier string // Used to query the block
|
var blockIdentifier string // Used to query the block
|
||||||
if ps.BlockRoot != "" {
|
if ps.BlockRoot != "" {
|
||||||
blockIdentifier = ps.BlockRoot
|
blockIdentifier = ps.BlockRoot
|
||||||
} else {
|
} else {
|
||||||
blockIdentifier = strconv.Itoa(ps.Slot)
|
blockIdentifier = ps.Slot.Format()
|
||||||
}
|
|
||||||
blockEndpoint := serverAddress + BcBlockQueryEndpoint + blockIdentifier
|
|
||||||
var err error
|
|
||||||
var rc int
|
|
||||||
ps.SszSignedBeaconBlock, rc, err = querySsz(blockEndpoint, strconv.Itoa(ps.Slot))
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error("Unable to properly query the slot.")
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
vm := <-vmCh
|
blockEndpoint := serverAddress + BcBlockQueryEndpoint + blockIdentifier
|
||||||
if rc != 200 {
|
sszSignedBeaconBlock, rc, err := querySsz(blockEndpoint, ps.Slot)
|
||||||
ps.FullSignedBeaconBlock = &wrapper.Phase0SignedBeaconBlock{}
|
|
||||||
|
if err != nil || rc != 200 {
|
||||||
|
loghelper.LogSlotError(ps.Slot.Number(), err).Error("Unable to properly query the slot.")
|
||||||
|
ps.FullSignedBeaconBlock = nil
|
||||||
ps.SszSignedBeaconBlock = []byte{}
|
ps.SszSignedBeaconBlock = []byte{}
|
||||||
ps.ParentBlockRoot = ""
|
ps.ParentBlockRoot = ""
|
||||||
ps.Status = "skipped"
|
ps.Status = "skipped"
|
||||||
|
|
||||||
|
// A 404 is normal in the case of a "skipped" slot.
|
||||||
|
if rc == 404 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
return err
|
||||||
if vm == nil {
|
|
||||||
return fmt.Errorf(VersionedUnmarshalerError)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ps.FullSignedBeaconBlock, err = vm.UnmarshalBeaconBlock(ps.SszSignedBeaconBlock)
|
var signedBeaconBlock SignedBeaconBlock
|
||||||
|
err = signedBeaconBlock.UnmarshalSSZ(sszSignedBeaconBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Warn("Unable to process the slots SignedBeaconBlock")
|
loghelper.LogSlotError(ps.Slot.Number(), err).Error("Unable to unmarshal SignedBeaconBlock for slot.")
|
||||||
return nil
|
ps.FullSignedBeaconBlock = nil
|
||||||
|
ps.SszSignedBeaconBlock = []byte{}
|
||||||
|
ps.ParentBlockRoot = ""
|
||||||
|
ps.Status = "skipped"
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
ps.ParentBlockRoot = "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().ParentRoot())
|
|
||||||
|
ps.FullSignedBeaconBlock = &signedBeaconBlock
|
||||||
|
ps.SszSignedBeaconBlock = sszSignedBeaconBlock
|
||||||
|
|
||||||
|
ps.ParentBlockRoot = toHex(ps.FullSignedBeaconBlock.Block().ParentRoot())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the SszBeaconState and FullBeaconState object with their respective values.
|
// Update the SszBeaconState and FullBeaconState object with their respective values.
|
||||||
func (ps *ProcessSlot) getBeaconState(serverEndpoint string, vmCh chan<- *dt.VersionedUnmarshaler) error {
|
func (ps *ProcessSlot) getBeaconState(serverEndpoint string) error {
|
||||||
var stateIdentifier string // Used to query the state
|
var stateIdentifier string // Used to query the state
|
||||||
if ps.StateRoot != "" {
|
if ps.StateRoot != "" {
|
||||||
stateIdentifier = ps.StateRoot
|
stateIdentifier = ps.StateRoot
|
||||||
} else {
|
} else {
|
||||||
stateIdentifier = strconv.Itoa(ps.Slot)
|
stateIdentifier = ps.Slot.Format()
|
||||||
}
|
}
|
||||||
stateEndpoint := serverEndpoint + BcStateQueryEndpoint + stateIdentifier
|
|
||||||
ps.SszBeaconState, _, _ = querySsz(stateEndpoint, strconv.Itoa(ps.Slot))
|
|
||||||
|
|
||||||
versionedUnmarshaler, err := dt.FromState(ps.SszBeaconState)
|
stateEndpoint := serverEndpoint + BcStateQueryEndpoint + stateIdentifier
|
||||||
|
sszBeaconState, _, err := querySsz(stateEndpoint, ps.Slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(VersionedUnmarshalerError)
|
loghelper.LogSlotError(ps.Slot.Number(), err).Error("Unable to properly query the BeaconState.")
|
||||||
vmCh <- nil
|
|
||||||
return fmt.Errorf(VersionedUnmarshalerError)
|
|
||||||
}
|
|
||||||
vmCh <- versionedUnmarshaler
|
|
||||||
ps.FullBeaconState, err = versionedUnmarshaler.UnmarshalBeaconState(ps.SszBeaconState)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error("Unable to process the slots BeaconState")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var beaconState BeaconState
|
||||||
|
err = beaconState.UnmarshalSSZ(sszBeaconState)
|
||||||
|
if err != nil {
|
||||||
|
loghelper.LogSlotError(ps.Slot.Number(), err).Error("Unable to unmarshal the BeaconState.")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ps.FullBeaconState = &beaconState
|
||||||
|
ps.SszBeaconState = sszBeaconState
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check to make sure that the previous block we processed is the parent of the current block.
|
// Check to make sure that the previous block we processed is the parent of the current block.
|
||||||
func (ps *ProcessSlot) checkPreviousSlot(tx sql.Tx, ctx context.Context, previousSlot int, previousBlockRoot string, knownGapsTableIncrement int) {
|
func (ps *ProcessSlot) checkPreviousSlot(tx sql.Tx, ctx context.Context, previousSlot Slot, previousBlockRoot string, knownGapsTableIncrement int) {
|
||||||
parentRoot := "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().ParentRoot())
|
if nil == ps.FullSignedBeaconBlock {
|
||||||
slot := int(ps.FullBeaconState.Slot())
|
log.Debug("Can't check block root, no current block.")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
parentRoot := toHex(ps.FullSignedBeaconBlock.Block().ParentRoot())
|
||||||
|
slot := ps.Slot
|
||||||
if previousSlot == slot {
|
if previousSlot == slot {
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"slot": slot,
|
"slot": slot,
|
||||||
"fork": true,
|
"fork": true,
|
||||||
}).Warn("A fork occurred! The previous slot and current slot match.")
|
}).Warn("A fork occurred! The previous slot and current slot match.")
|
||||||
transactReorgs(tx, ctx, strconv.Itoa(ps.Slot), ps.BlockRoot, ps.Metrics)
|
transactReorgs(tx, ctx, ps.Slot, ps.BlockRoot, ps.Metrics)
|
||||||
} else if previousSlot > slot {
|
} else if previousSlot > slot {
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"previousSlot": previousSlot,
|
"previousSlot": previousSlot,
|
||||||
@ -335,20 +394,20 @@ func (ps *ProcessSlot) checkPreviousSlot(tx sql.Tx, ctx context.Context, previou
|
|||||||
"previousSlot": previousSlot,
|
"previousSlot": previousSlot,
|
||||||
"currentSlot": slot,
|
"currentSlot": slot,
|
||||||
}).Error("We skipped a few slots.")
|
}).Error("We skipped a few slots.")
|
||||||
transactKnownGaps(tx, ctx, knownGapsTableIncrement, previousSlot+1, slot-1, fmt.Errorf("Gaps during head processing"), "headGaps", ps.Metrics)
|
transactKnownGaps(tx, ctx, knownGapsTableIncrement, previousSlot+1, slot-1, fmt.Errorf("gaps during head processing"), "headGaps", ps.Metrics)
|
||||||
} else if previousBlockRoot != parentRoot {
|
} else if previousBlockRoot != parentRoot {
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"previousBlockRoot": previousBlockRoot,
|
"previousBlockRoot": previousBlockRoot,
|
||||||
"currentBlockParent": parentRoot,
|
"currentBlockParent": parentRoot,
|
||||||
}).Error("The previousBlockRoot does not match the current blocks parent, an unprocessed fork might have occurred.")
|
}).Error("The previousBlockRoot does not match the current blocks parent, an unprocessed fork might have occurred.")
|
||||||
transactReorgs(tx, ctx, strconv.Itoa(previousSlot), parentRoot, ps.Metrics)
|
transactReorgs(tx, ctx, previousSlot, parentRoot, ps.Metrics)
|
||||||
} else {
|
} else {
|
||||||
log.Debug("Previous Slot and Current Slot are one distance from each other.")
|
log.Debug("Previous Slot and Current Slot are one distance from each other.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transforms all the raw data into DB models that can be written to the DB.
|
// Transforms all the raw data into DB models that can be written to the DB.
|
||||||
func (ps *ProcessSlot) createWriteObjects(blockRoot, stateRoot, eth1BlockHash string) (*DatabaseWriter, error) {
|
func (ps *ProcessSlot) createWriteObjects() (*DatabaseWriter, error) {
|
||||||
var status string
|
var status string
|
||||||
if ps.Status != "" {
|
if ps.Status != "" {
|
||||||
status = ps.Status
|
status = ps.Status
|
||||||
@ -356,7 +415,18 @@ func (ps *ProcessSlot) createWriteObjects(blockRoot, stateRoot, eth1BlockHash st
|
|||||||
status = "proposed"
|
status = "proposed"
|
||||||
}
|
}
|
||||||
|
|
||||||
dw, err := CreateDatabaseWrite(ps.Db, ps.Slot, stateRoot, blockRoot, ps.ParentBlockRoot, eth1BlockHash, status, &ps.SszSignedBeaconBlock, &ps.SszBeaconState, ps.Metrics)
|
parseBeaconTime := time.Now()
|
||||||
|
// These will normally be pre-calculated by this point.
|
||||||
|
blockRoot, stateRoot, eth1DataBlockHash, err := ps.provideFinalHash()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ps.PerformanceMetrics.ParseBeaconObjectForHash = time.Since(parseBeaconTime)
|
||||||
|
|
||||||
|
payloadHeader := ps.provideExecutionPayloadDetails()
|
||||||
|
|
||||||
|
dw, err := CreateDatabaseWrite(ps.Db, ps.Slot, stateRoot, blockRoot, ps.ParentBlockRoot, eth1DataBlockHash,
|
||||||
|
payloadHeader, status, &ps.SszSignedBeaconBlock, &ps.SszBeaconState, ps.Metrics)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dw, err
|
return dw, err
|
||||||
}
|
}
|
||||||
@ -364,39 +434,65 @@ func (ps *ProcessSlot) createWriteObjects(blockRoot, stateRoot, eth1BlockHash st
|
|||||||
return dw, nil
|
return dw, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function will return the final blockRoot, stateRoot, and eth1BlockHash that will be
|
// This function will return the final blockRoot, stateRoot, and eth1DataBlockHash that will be
|
||||||
// used to write to a DB
|
// used to write to a DB
|
||||||
func (ps *ProcessSlot) provideFinalHash() (string, string, string, error) {
|
func (ps *ProcessSlot) provideFinalHash() (string, string, string, error) {
|
||||||
var (
|
var (
|
||||||
stateRoot string
|
stateRoot string
|
||||||
blockRoot string
|
blockRoot string
|
||||||
eth1BlockHash string
|
eth1DataBlockHash string
|
||||||
)
|
)
|
||||||
if ps.Status == "skipped" {
|
if ps.Status == "skipped" {
|
||||||
stateRoot = ""
|
stateRoot = ""
|
||||||
blockRoot = ""
|
blockRoot = ""
|
||||||
eth1BlockHash = ""
|
eth1DataBlockHash = ""
|
||||||
} else {
|
} else {
|
||||||
if ps.StateRoot != "" {
|
if ps.StateRoot != "" {
|
||||||
stateRoot = ps.StateRoot
|
stateRoot = ps.StateRoot
|
||||||
} else {
|
} else {
|
||||||
stateRoot = "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().StateRoot())
|
if nil != ps.FullSignedBeaconBlock {
|
||||||
log.Debug("StateRoot: ", stateRoot)
|
stateRoot = toHex(ps.FullSignedBeaconBlock.Block().StateRoot())
|
||||||
|
log.Debug("BeaconBlock StateRoot: ", stateRoot)
|
||||||
|
} else {
|
||||||
|
log.Debug("BeaconBlock StateRoot: <nil beacon block>")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ps.BlockRoot != "" {
|
if ps.BlockRoot != "" {
|
||||||
blockRoot = ps.BlockRoot
|
blockRoot = ps.BlockRoot
|
||||||
} else {
|
} else {
|
||||||
var err error
|
if nil != ps.FullSignedBeaconBlock {
|
||||||
rawBlockRoot, err := ps.FullSignedBeaconBlock.Block().HashTreeRoot()
|
rawBlockRoot := ps.FullSignedBeaconBlock.Block().HashTreeRoot()
|
||||||
//blockRoot, err = queryBlockRoot(blockRootEndpoint, strconv.Itoa(ps.Slot))
|
blockRoot = toHex(rawBlockRoot)
|
||||||
if err != nil {
|
|
||||||
return "", "", "", err
|
|
||||||
}
|
|
||||||
blockRoot = "0x" + hex.EncodeToString(rawBlockRoot[:])
|
|
||||||
log.WithFields(log.Fields{"blockRoot": blockRoot}).Debug("Block Root from ssz")
|
log.WithFields(log.Fields{"blockRoot": blockRoot}).Debug("Block Root from ssz")
|
||||||
|
} else {
|
||||||
|
log.Debug("BeaconBlock HashTreeRoot: <nil beacon block>")
|
||||||
}
|
}
|
||||||
eth1BlockHash = "0x" + hex.EncodeToString(ps.FullSignedBeaconBlock.Block().Body().Eth1Data().BlockHash)
|
|
||||||
}
|
}
|
||||||
return blockRoot, stateRoot, eth1BlockHash, nil
|
if nil != ps.FullSignedBeaconBlock {
|
||||||
|
eth1DataBlockHash = toHex(ps.FullSignedBeaconBlock.Block().Body().Eth1Data().BlockHash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return blockRoot, stateRoot, eth1DataBlockHash, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *ProcessSlot) provideExecutionPayloadDetails() *ExecutionPayloadHeader {
|
||||||
|
if nil == ps.FullSignedBeaconBlock || !ps.FullSignedBeaconBlock.IsBellatrix() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
payload := ps.FullSignedBeaconBlock.Block().Body().ExecutionPayloadHeader()
|
||||||
|
blockNumber := uint64(payload.BlockNumber)
|
||||||
|
|
||||||
|
// The earliest blocks on the Bellatrix fork, pre-Merge, have zeroed ExecutionPayloads.
|
||||||
|
// There is nothing useful to to store in that case, even though the structure exists.
|
||||||
|
if blockNumber == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return payload
|
||||||
|
}
|
||||||
|
|
||||||
|
func toHex(r [32]byte) string {
|
||||||
|
return "0x" + hex.EncodeToString(r[:])
|
||||||
}
|
}
|
||||||
|
@ -18,40 +18,16 @@
|
|||||||
package beaconclient
|
package beaconclient
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A helper function to query endpoints that utilize slots.
|
|
||||||
func querySsz(endpoint string, slot string) ([]byte, int, error) {
|
|
||||||
log.WithFields(log.Fields{"endpoint": endpoint}).Debug("Querying endpoint")
|
|
||||||
client := &http.Client{}
|
|
||||||
req, err := http.NewRequest("GET", endpoint, nil)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotError(slot, err).Error("Unable to create a request!")
|
|
||||||
return nil, 0, fmt.Errorf("Unable to create a request!: %s", err.Error())
|
|
||||||
}
|
|
||||||
req.Header.Set("Accept", "application/octet-stream")
|
|
||||||
response, err := client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotError(slot, err).Error("Unable to query Beacon Node!")
|
|
||||||
return nil, 0, fmt.Errorf("Unable to query Beacon Node: %s", err.Error())
|
|
||||||
}
|
|
||||||
defer response.Body.Close()
|
|
||||||
rc := response.StatusCode
|
|
||||||
body, err := ioutil.ReadAll(response.Body)
|
|
||||||
if err != nil {
|
|
||||||
loghelper.LogSlotError(slot, err).Error("Unable to turn response into a []bytes array!")
|
|
||||||
return nil, rc, fmt.Errorf("Unable to turn response into a []bytes array!: %s", err.Error())
|
|
||||||
}
|
|
||||||
return body, rc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object to unmarshal the BlockRootResponse
|
// Object to unmarshal the BlockRootResponse
|
||||||
type BlockRootResponse struct {
|
type BlockRootResponse struct {
|
||||||
Data BlockRootMessage `json:"data"`
|
Data BlockRootMessage `json:"data"`
|
||||||
@ -62,35 +38,36 @@ type BlockRootMessage struct {
|
|||||||
Root string `json:"root"`
|
Root string `json:"root"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// A function to query the blockroot for a given slot.
|
// A helper function to query endpoints that utilize slots.
|
||||||
func queryBlockRoot(endpoint string, slot string) (string, error) {
|
func querySsz(endpoint string, slot Slot) ([]byte, int, error) {
|
||||||
log.WithFields(log.Fields{"endpoint": endpoint}).Debug("Querying endpoint")
|
log.WithFields(log.Fields{"endpoint": endpoint}).Debug("Querying endpoint")
|
||||||
client := &http.Client{}
|
client := &http.Client{}
|
||||||
req, err := http.NewRequest("GET", endpoint, nil)
|
req, err := http.NewRequest("GET", endpoint, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(slot, err).Error("Unable to create a request!")
|
loghelper.LogSlotError(slot.Number(), err).Error("Unable to create a request!")
|
||||||
return "", fmt.Errorf("Unable to create a request!: %s", err.Error())
|
return nil, 0, fmt.Errorf("Unable to create a request!: %s", err.Error())
|
||||||
}
|
}
|
||||||
req.Header.Set("Accept", "application/json")
|
req.Header.Set("Accept", "application/octet-stream")
|
||||||
response, err := client.Do(req)
|
response, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogSlotError(slot, err).Error("Unable to query Beacon Node!")
|
loghelper.LogSlotError(slot.Number(), err).Error("Unable to query Beacon Node!")
|
||||||
return "", fmt.Errorf("Unable to query Beacon Node: %s", err.Error())
|
return nil, 0, fmt.Errorf("Unable to query Beacon Node: %s", err.Error())
|
||||||
}
|
}
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
body, err := ioutil.ReadAll(response.Body)
|
|
||||||
if err != nil {
|
rc := response.StatusCode
|
||||||
loghelper.LogSlotError(slot, err).Error("Unable to turn response into a []bytes array!")
|
// Any 2xx code is OK.
|
||||||
return "", fmt.Errorf("Unable to turn response into a []bytes array!: %s", err.Error())
|
if rc < 200 || rc >= 300 {
|
||||||
|
return nil, rc, fmt.Errorf("HTTP Error: %d", rc)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := BlockRootResponse{}
|
var body bytes.Buffer
|
||||||
if err := json.Unmarshal(body, &resp); err != nil {
|
buf := bufio.NewWriter(&body)
|
||||||
loghelper.LogEndpoint(endpoint).WithFields(log.Fields{
|
_, err = io.Copy(buf, response.Body)
|
||||||
"rawMessage": string(body),
|
if err != nil {
|
||||||
"err": err,
|
loghelper.LogSlotError(slot.Number(), err).Error("Unable to turn response into a []bytes array!")
|
||||||
}).Error("Unable to unmarshal the block root")
|
return nil, rc, fmt.Errorf("Unable to turn response into a []bytes array!: %s", err.Error())
|
||||||
return "", err
|
|
||||||
}
|
}
|
||||||
return resp.Data.Root, nil
|
|
||||||
|
return body.Bytes(), rc, nil
|
||||||
}
|
}
|
||||||
|
@ -1,12 +1,13 @@
|
|||||||
package beaconclient_test
|
package beaconclient_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
. "github.com/onsi/ginkgo/v2"
|
||||||
//. "github.com/onsi/gomega"
|
|
||||||
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
"github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -24,9 +25,18 @@ var (
|
|||||||
knownGapsTableIncrement: 100000000,
|
knownGapsTableIncrement: 100000000,
|
||||||
bcUniqueIdentifier: 100,
|
bcUniqueIdentifier: 100,
|
||||||
checkDb: false,
|
checkDb: false,
|
||||||
|
performBeaconBlockProcessing: true,
|
||||||
|
// As of 2022-09, generating and downloading the full BeaconState is so slow it will cause the tests to fail.
|
||||||
|
performBeaconStateProcessing: false,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Note: These tests expect to communicate with a fully-synced Beacon node.
|
||||||
|
|
||||||
var _ = Describe("Systemvalidation", Label("system"), func() {
|
var _ = Describe("Systemvalidation", Label("system"), func() {
|
||||||
|
level, _ := log.ParseLevel("debug")
|
||||||
|
log.SetLevel(level)
|
||||||
|
|
||||||
Describe("Run the application against a running lighthouse node", func() {
|
Describe("Run the application against a running lighthouse node", func() {
|
||||||
Context("When we receive head messages", func() {
|
Context("When we receive head messages", func() {
|
||||||
It("We should process the messages successfully", func() {
|
It("We should process the messages successfully", func() {
|
||||||
|
@ -28,21 +28,21 @@ func LogError(err error) *log.Entry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A simple herlper function to log slot and error.
|
// A simple herlper function to log slot and error.
|
||||||
func LogSlotError(slot string, err error) *log.Entry {
|
func LogSlotError(slot uint64, err error) *log.Entry {
|
||||||
return log.WithFields(log.Fields{
|
return log.WithFields(log.Fields{
|
||||||
"err": err,
|
"err": err,
|
||||||
"slot": slot,
|
"slot": slot,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func LogSlotRangeError(startSlot string, endSlot string, err error) *log.Entry {
|
func LogSlotRangeError(startSlot uint64, endSlot uint64, err error) *log.Entry {
|
||||||
return log.WithFields(log.Fields{
|
return log.WithFields(log.Fields{
|
||||||
"err": err,
|
"err": err,
|
||||||
"startSlot": startSlot,
|
"startSlot": startSlot,
|
||||||
"endSlot": endSlot,
|
"endSlot": endSlot,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
func LogSlotRangeStatementError(startSlot string, endSlot string, statement string, err error) *log.Entry {
|
func LogSlotRangeStatementError(startSlot uint64, endSlot uint64, statement string, err error) *log.Entry {
|
||||||
return log.WithFields(log.Fields{
|
return log.WithFields(log.Fields{
|
||||||
"err": err,
|
"err": err,
|
||||||
"startSlot": startSlot,
|
"startSlot": startSlot,
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// A simple helper function that will help wrap the reorg error messages.
|
// A simple helper function that will help wrap the reorg error messages.
|
||||||
func LogReorgError(slot string, latestBlockRoot string, err error) *log.Entry {
|
func LogReorgError(slot uint64, latestBlockRoot string, err error) *log.Entry {
|
||||||
return log.WithFields(log.Fields{
|
return log.WithFields(log.Fields{
|
||||||
"err": err,
|
"err": err,
|
||||||
"slot": slot,
|
"slot": slot,
|
||||||
@ -29,7 +29,7 @@ func LogReorgError(slot string, latestBlockRoot string, err error) *log.Entry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A simple helper function that will help wrap regular reorg messages.
|
// A simple helper function that will help wrap regular reorg messages.
|
||||||
func LogReorg(slot string, latestBlockRoot string) *log.Entry {
|
func LogReorg(slot uint64, latestBlockRoot string) *log.Entry {
|
||||||
return log.WithFields(log.Fields{
|
return log.WithFields(log.Fields{
|
||||||
"slot": slot,
|
"slot": slot,
|
||||||
"latestBlockRoot": latestBlockRoot,
|
"latestBlockRoot": latestBlockRoot,
|
||||||
|
Loading…
Reference in New Issue
Block a user