From 53c4e4243c37b2c887e4ea4f31d67bec602137ab Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Mon, 20 Jun 2022 13:56:55 -0400 Subject: [PATCH 01/18] Use pprof + pointers --- cmd/boot.go | 9 +++++++++ cmd/capture.go | 8 ++++++++ cmd/full.go | 7 +++++++ cmd/head.go | 6 ++++++ cmd/historic.go | 9 +++++++++ config/cicd/boot.ipld-eth-beacon-indexer.json | 4 +++- ...xample.ipld-eth-beacon-indexer-config.json | 4 +++- pkg/beaconclient/processslot.go | 19 ++++++++++++------- pkg/beaconclient/queryserver.go | 4 ++-- 9 files changed, 59 insertions(+), 11 deletions(-) diff --git a/cmd/boot.go b/cmd/boot.go index 63e15b9..c40f8b2 100644 --- a/cmd/boot.go +++ b/cmd/boot.go @@ -18,7 +18,10 @@ package cmd import ( "context" + "fmt" + "net/http" "os" + "strconv" "syscall" log "github.com/sirupsen/logrus" @@ -60,6 +63,12 @@ func bootApp() { notifierCh <- syscall.SIGTERM }() + if viper.GetBool("t.pprof") { + go func() { + log.Println(http.ListenAndServe(fmt.Sprint("localhost:"+strconv.Itoa(viper.GetInt("t.pprofPort"))), nil)) + }() + } + err = shutdown.ShutdownBoot(ctx, notifierCh, maxWaitSecondsShutdown, Db, Bc) if err != nil { loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!") diff --git a/cmd/capture.go b/cmd/capture.go index f5693a6..d144834 100644 --- a/cmd/capture.go +++ b/cmd/capture.go @@ -50,6 +50,8 @@ var ( maxWaitSecondsShutdown time.Duration = time.Duration(20) * time.Second notifierCh chan os.Signal = make(chan os.Signal, 1) testDisregardSync bool + isTestPprof bool + testPprofPort int ) // captureCmd represents the capture command @@ -114,6 +116,8 @@ func init() { //// Testing Specific captureCmd.PersistentFlags().BoolVar(&testDisregardSync, "t.skipSync", false, "Should we disregard the head sync?") + captureCmd.PersistentFlags().BoolVar(&isTestPprof, "t.pprof", false, "Should we start pprof?") + captureCmd.PersistentFlags().IntVar(&testPprofPort, "t.pprofPort", 6060, "What port should we export pprof at?") // Bind Flags with Viper //// DB Flags @@ -133,6 +137,10 @@ func init() { //// Testing Specific err = viper.BindPFlag("t.skipSync", captureCmd.PersistentFlags().Lookup("t.skipSync")) exitErr(err) + err = viper.BindPFlag("t.pprof", captureCmd.PersistentFlags().Lookup("t.pprof")) + exitErr(err) + err = viper.BindPFlag("t.pprofPort", captureCmd.PersistentFlags().Lookup("t.pprofPort")) + exitErr(err) //// LH specific err = viper.BindPFlag("bc.address", captureCmd.PersistentFlags().Lookup("bc.address")) diff --git a/cmd/full.go b/cmd/full.go index 0c4b9d2..7d50c8a 100644 --- a/cmd/full.go +++ b/cmd/full.go @@ -19,6 +19,7 @@ package cmd import ( "context" "fmt" + "net/http" "strconv" log "github.com/sirupsen/logrus" @@ -107,6 +108,12 @@ func startFullProcessing() { }() } + if viper.GetBool("t.pprof") { + go func() { + log.Println(http.ListenAndServe(fmt.Sprint("localhost:"+strconv.Itoa(viper.GetInt("t.pprofPort"))), nil)) + }() + } + // Shutdown when the time is right. err = shutdown.ShutdownFull(ctx, KgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) if err != nil { diff --git a/cmd/head.go b/cmd/head.go index ba70f8c..4688087 100644 --- a/cmd/head.go +++ b/cmd/head.go @@ -81,6 +81,12 @@ func startHeadTracking() { }() } + if viper.GetBool("t.pprof") { + go func() { + log.Println(http.ListenAndServe(fmt.Sprint("localhost:"+strconv.Itoa(viper.GetInt("t.pprofPort"))), nil)) + }() + } + // Shutdown when the time is right. err = shutdown.ShutdownHeadTracking(ctx, KgCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) if err != nil { diff --git a/cmd/historic.go b/cmd/historic.go index 1c6b653..6e0a03e 100644 --- a/cmd/historic.go +++ b/cmd/historic.go @@ -22,6 +22,9 @@ import ( "os" "strconv" + "net/http" + _ "net/http/pprof" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -92,6 +95,12 @@ func startHistoricProcessing() { }() } + if viper.GetBool("t.pprof") { + go func() { + log.Println(http.ListenAndServe(fmt.Sprint("localhost:"+strconv.Itoa(viper.GetInt("t.pprofPort"))), nil)) + }() + } + // Shutdown when the time is right. err = shutdown.ShutdownHistoricProcessing(ctx, kgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) if err != nil { diff --git a/config/cicd/boot.ipld-eth-beacon-indexer.json b/config/cicd/boot.ipld-eth-beacon-indexer.json index b10cc13..a042f7b 100644 --- a/config/cicd/boot.ipld-eth-beacon-indexer.json +++ b/config/cicd/boot.ipld-eth-beacon-indexer.json @@ -19,7 +19,9 @@ "checkDb": true }, "t": { - "skipSync": true + "skipSync": true, + "pprof": true, + "pprofPort": 6060 }, "log": { "level": "debug", diff --git a/config/example.ipld-eth-beacon-indexer-config.json b/config/example.ipld-eth-beacon-indexer-config.json index 7481284..b41b32c 100644 --- a/config/example.ipld-eth-beacon-indexer-config.json +++ b/config/example.ipld-eth-beacon-indexer-config.json @@ -19,7 +19,9 @@ "checkDb": true }, "t": { - "skipSync": true + "skipSync": true, + "pprof": true, + "pprofPort": 6060 }, "log": { "level": "debug", diff --git a/pkg/beaconclient/processslot.go b/pkg/beaconclient/processslot.go index 1b8f619..4e075b4 100644 --- a/pkg/beaconclient/processslot.go +++ b/pkg/beaconclient/processslot.go @@ -63,12 +63,12 @@ type ProcessSlot struct { PerformanceMetrics PerformanceMetrics // An object to keep track of performance metrics. // BeaconBlock - SszSignedBeaconBlock []byte // The entire SSZ encoded SignedBeaconBlock + SszSignedBeaconBlock *[]byte // The entire SSZ encoded SignedBeaconBlock FullSignedBeaconBlock si.SignedBeaconBlock // The unmarshaled BeaconState object, the unmarshalling could have errors. // BeaconState FullBeaconState state.BeaconState // The unmarshaled BeaconState object, the unmarshalling could have errors. - SszBeaconState []byte // The entire SSZ encoded BeaconState + SszBeaconState *[]byte // The entire SSZ encoded BeaconState // DB Write objects DbSlotsModel *DbSlots // The model being written to the slots table. @@ -155,6 +155,11 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string, }) if err := g.Wait(); err != nil { + // Make sure channel is empty. + select { + case <-vUnmarshalerCh: + default: + } return err, "processSlot" } @@ -270,7 +275,7 @@ func (ps *ProcessSlot) getSignedBeaconBlock(serverAddress string, vmCh <-chan *d vm := <-vmCh if rc != 200 { ps.FullSignedBeaconBlock = &wrapper.Phase0SignedBeaconBlock{} - ps.SszSignedBeaconBlock = []byte{} + ps.SszSignedBeaconBlock = &[]byte{} ps.ParentBlockRoot = "" ps.Status = "skipped" return nil @@ -280,7 +285,7 @@ func (ps *ProcessSlot) getSignedBeaconBlock(serverAddress string, vmCh <-chan *d return fmt.Errorf(VersionedUnmarshalerError) } - ps.FullSignedBeaconBlock, err = vm.UnmarshalBeaconBlock(ps.SszSignedBeaconBlock) + ps.FullSignedBeaconBlock, err = vm.UnmarshalBeaconBlock(*ps.SszSignedBeaconBlock) if err != nil { loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Warn("Unable to process the slots SignedBeaconBlock") return nil @@ -300,14 +305,14 @@ func (ps *ProcessSlot) getBeaconState(serverEndpoint string, vmCh chan<- *dt.Ver stateEndpoint := serverEndpoint + BcStateQueryEndpoint + stateIdentifier ps.SszBeaconState, _, _ = querySsz(stateEndpoint, strconv.Itoa(ps.Slot)) - versionedUnmarshaler, err := dt.FromState(ps.SszBeaconState) + versionedUnmarshaler, err := dt.FromState(*ps.SszBeaconState) if err != nil { loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error(VersionedUnmarshalerError) vmCh <- nil return fmt.Errorf(VersionedUnmarshalerError) } vmCh <- versionedUnmarshaler - ps.FullBeaconState, err = versionedUnmarshaler.UnmarshalBeaconState(ps.SszBeaconState) + ps.FullBeaconState, err = versionedUnmarshaler.UnmarshalBeaconState(*ps.SszBeaconState) if err != nil { loghelper.LogSlotError(strconv.Itoa(ps.Slot), err).Error("Unable to process the slots BeaconState") return err @@ -356,7 +361,7 @@ func (ps *ProcessSlot) createWriteObjects(blockRoot, stateRoot, eth1BlockHash st status = "proposed" } - dw, err := CreateDatabaseWrite(ps.Db, ps.Slot, stateRoot, blockRoot, ps.ParentBlockRoot, eth1BlockHash, status, &ps.SszSignedBeaconBlock, &ps.SszBeaconState, ps.Metrics) + dw, err := CreateDatabaseWrite(ps.Db, ps.Slot, stateRoot, blockRoot, ps.ParentBlockRoot, eth1BlockHash, status, ps.SszSignedBeaconBlock, ps.SszBeaconState, ps.Metrics) if err != nil { return dw, err } diff --git a/pkg/beaconclient/queryserver.go b/pkg/beaconclient/queryserver.go index 5294335..4962d2b 100644 --- a/pkg/beaconclient/queryserver.go +++ b/pkg/beaconclient/queryserver.go @@ -28,7 +28,7 @@ import ( ) // A helper function to query endpoints that utilize slots. -func querySsz(endpoint string, slot string) ([]byte, int, error) { +func querySsz(endpoint string, slot string) (*[]byte, int, error) { log.WithFields(log.Fields{"endpoint": endpoint}).Debug("Querying endpoint") client := &http.Client{} req, err := http.NewRequest("GET", endpoint, nil) @@ -49,7 +49,7 @@ func querySsz(endpoint string, slot string) ([]byte, int, error) { loghelper.LogSlotError(slot, err).Error("Unable to turn response into a []bytes array!") return nil, rc, fmt.Errorf("Unable to turn response into a []bytes array!: %s", err.Error()) } - return body, rc, nil + return &body, rc, nil } // Object to unmarshal the BlockRootResponse -- 2.45.2 From 79250eed1a57c59e98c6bcb6b86e2925e1c6ea32 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Mon, 20 Jun 2022 14:04:17 -0400 Subject: [PATCH 02/18] Update CICD pipeline for duplicate jobs --- .github/workflows/generic-testing.yml | 12 ++--- .../{on-publish.yml => on-pr-publish.yml} | 52 +++++++++++++++++++ .github/workflows/on-pr.yml | 47 ----------------- 3 files changed, 58 insertions(+), 53 deletions(-) rename .github/workflows/{on-publish.yml => on-pr-publish.yml} (58%) delete mode 100644 .github/workflows/on-pr.yml diff --git a/.github/workflows/generic-testing.yml b/.github/workflows/generic-testing.yml index d5f906b..993d892 100644 --- a/.github/workflows/generic-testing.yml +++ b/.github/workflows/generic-testing.yml @@ -3,22 +3,22 @@ on: workflow_call: inputs: stack-orchestrator-ref: - required: false + required: true type: string ipld-eth-beacon-db-ref: - required: false + required: true type: string ssz-data-ref: - required: false + required: true type: string secrets: GHA_KEY: required: true env: - stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref || '7fb664270a0ba09e2caa3095e8c91f3fdb5b38af' }} - ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref || '3dfe416302d553f8240f6051c08a7899b0e39e12' }} - ssz-data-ref: ${{ inputs.ssz-data-ref || 'main' }} + stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref }} + ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref }} + ssz-data-ref: ${{ inputs.ssz-data-ref } GOPATH: /tmp/go jobs: build: diff --git a/.github/workflows/on-publish.yml b/.github/workflows/on-pr-publish.yml similarity index 58% rename from .github/workflows/on-publish.yml rename to .github/workflows/on-pr-publish.yml index 7ed6b38..41b08fe 100644 --- a/.github/workflows/on-publish.yml +++ b/.github/workflows/on-pr-publish.yml @@ -2,9 +2,50 @@ name: Publish Docker image on: release: types: [published, edited] + workflow_dispatch: + inputs: + stack-orchestrator-ref: + description: "The branch, commit or sha from stack-orchestrator to checkout" + required: false + default: "7fb664270a0ba09e2caa3095e8c91f3fdb5b38af" + ipld-eth-beacon-db-ref: + description: "The branch, commit or sha from ipld-eth-beacon-db to checkout" + required: false + default: "3dfe416302d553f8240f6051c08a7899b0e39e12" + ssz-data-ref: + description: "The branch, commit or sha from ssz-data to checkout" + required: false + default: "main" + pull_request: + paths: + - "!**.md" + - "!.gitignore" + - "!LICENSE" + - "!.github/workflows/**" + - ".github/workflows/on-pr.yml" + - ".github/workflows/tests.yml" + - "**" + schedule: + - cron: '0 13 * * *' # Must be single quotes!! jobs: + pre_job: + # continue-on-error: true # Uncomment once integration is finished + runs-on: ubuntu-latest + # Map a step output to a job output + outputs: + should_skip: ${{ steps.skip_check.outputs.should_skip }} + steps: + - id: skip_check + uses: fkirc/skip-duplicate-actions@v4 + with: + # All of these options are optional, so you can remove them if you are happy with the defaults + concurrent_skipping: "never" + skip_after_successful_duplicate: "true" + do_not_skip: '["workflow_dispatch", "schedule"]' trigger-tests: uses: ./.github/workflows/generic-testing.yml + if: ${{ needs.pre_job.outputs.should_skip != 'true' }} + needs: pre_job with: stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }} ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }} @@ -12,6 +53,8 @@ jobs: secrets: GHA_KEY: ${{secrets.GHA_KEY}} system-testing: + if: ${{ needs.pre_job.outputs.should_skip != 'true' }} + needs: pre_job uses: ./.github/workflows/system-tests.yml with: stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }} @@ -25,6 +68,11 @@ jobs: needs: - trigger-tests - system-testing + if: | + always() && + (needs.trigger-tests.result == 'success' || needs.trigger-tests.result == 'skipped') && + (needs.system-testing.result == 'success' || needs.system-testing.result == 'skipped') && + github.event_name == 'release' steps: - uses: actions/checkout@v2 - name: Get the version @@ -42,6 +90,10 @@ jobs: name: Push Docker image to Docker Hub runs-on: ubuntu-latest needs: build + if: | + always() && + (needs.build.result == 'success') && + github.event_name == 'release' steps: - name: Get the version id: vars diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml deleted file mode 100644 index f3254a0..0000000 --- a/.github/workflows/on-pr.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Test Application On PR - -on: - workflow_dispatch: - inputs: - stack-orchestrator-ref: - description: "The branch, commit or sha from stack-orchestrator to checkout" - required: false - default: "main" - ipld-eth-beacon-db-ref: - description: "The branch, commit or sha from ipld-eth-beacon-db to checkout" - required: false - default: "main" - ssz-data-ref: - description: "The branch, commit or sha from ssz-data to checkout" - required: false - default: "main" - pull_request: - paths: - - "!**.md" - - "!.gitignore" - - "!LICENSE" - - "!.github/workflows/**" - - ".github/workflows/on-pr.yml" - - ".github/workflows/tests.yml" - - "**" - schedule: - - cron: '0 13 * * *' # Must be single quotes!! - -jobs: - trigger-tests: - if: github.event_name != 'schedule' - uses: ./.github/workflows/generic-testing.yml - with: - stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }} - ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }} - ssz-data-ref: ${{ github.event.inputs.ssz-data-ref }} - secrets: - GHA_KEY: ${{secrets.GHA_KEY}} - system-testing: - uses: ./.github/workflows/system-tests.yml - with: - stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref }} - ipld-eth-beacon-db-ref: ${{ github.event.inputs.ipld-eth-beacon-db-ref }} - secrets: - GHA_KEY: ${{secrets.GHA_KEY}} - BC_ADDRESS: ${{secrets.BC_ADDRESS}} -- 2.45.2 From 30e8e955ee6f64af39102fba5e6ee7d02ddeacb5 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Mon, 20 Jun 2022 14:06:06 -0400 Subject: [PATCH 03/18] fix cicd --- .github/workflows/generic-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/generic-testing.yml b/.github/workflows/generic-testing.yml index 993d892..e75f171 100644 --- a/.github/workflows/generic-testing.yml +++ b/.github/workflows/generic-testing.yml @@ -18,7 +18,7 @@ on: env: stack-orchestrator-ref: ${{ inputs.stack-orchestrator-ref }} ipld-eth-beacon-db-ref: ${{ inputs.ipld-eth-beacon-db-ref }} - ssz-data-ref: ${{ inputs.ssz-data-ref } + ssz-data-ref: ${{ inputs.ssz-data-ref }} GOPATH: /tmp/go jobs: build: -- 2.45.2 From a2f2603d38470670532e470d6b7774afcc4a1c6c Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Tue, 21 Jun 2022 14:34:10 -0400 Subject: [PATCH 04/18] Use context to stop head tracking --- cmd/full.go | 7 +- cmd/head.go | 8 +- go.mod | 1 + go.sum | 3 +- internal/shutdown/shutdown.go | 8 +- internal/shutdown/shutdown_test.go | 17 ++- pkg/beaconclient/capturehead.go | 33 +---- pkg/beaconclient/capturehead_test.go | 160 ++++++++++++++++------ pkg/beaconclient/capturehistoric.go | 3 +- pkg/beaconclient/capturehistoric_test.go | 48 +++++-- pkg/beaconclient/incomingsse.go | 13 +- pkg/beaconclient/processevents.go | 84 +++++++----- pkg/beaconclient/processslot.go | 4 +- pkg/beaconclient/systemvalidation_test.go | 8 +- 14 files changed, 258 insertions(+), 139 deletions(-) diff --git a/cmd/full.go b/cmd/full.go index 7d50c8a..840d39d 100644 --- a/cmd/full.go +++ b/cmd/full.go @@ -75,7 +75,8 @@ func startFullProcessing() { log.Info("The Beacon Client has booted successfully!") // Capture head blocks - go Bc.CaptureHead() + hdCtx, hdCancel := context.WithCancel(context.Background()) + go Bc.CaptureHead(hdCtx) hpContext, hpCancel := context.WithCancel(context.Background()) @@ -90,7 +91,7 @@ func startFullProcessing() { } return nil }) - kgCtx, KgCancel := context.WithCancel(context.Background()) + kgCtx, kgCancel := context.WithCancel(context.Background()) if viper.GetBool("kg.processKnownGaps") { go func() { errG := new(errgroup.Group) @@ -115,7 +116,7 @@ func startFullProcessing() { } // Shutdown when the time is right. - err = shutdown.ShutdownFull(ctx, KgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) + err = shutdown.ShutdownFull(ctx, hdCancel, kgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) if err != nil { loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!") } else { diff --git a/cmd/head.go b/cmd/head.go index 4688087..a4f88ec 100644 --- a/cmd/head.go +++ b/cmd/head.go @@ -62,8 +62,10 @@ func startHeadTracking() { log.Info("The Beacon Client has booted successfully!") // Capture head blocks - go Bc.CaptureHead() - kgCtx, KgCancel := context.WithCancel(context.Background()) + hdCtx, hdCancel := context.WithCancel(context.Background()) + go Bc.CaptureHead(hdCtx) + + kgCtx, kgCancel := context.WithCancel(context.Background()) if viper.GetBool("kg.processKnownGaps") { go func() { errG := new(errgroup.Group) @@ -88,7 +90,7 @@ func startHeadTracking() { } // Shutdown when the time is right. - err = shutdown.ShutdownHeadTracking(ctx, KgCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) + err = shutdown.ShutdownHeadTracking(ctx, hdCancel, kgCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) if err != nil { loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!") } else { diff --git a/go.mod b/go.mod index f3082d4..efb8ad3 100644 --- a/go.mod +++ b/go.mod @@ -70,6 +70,7 @@ require ( github.com/urfave/cli/v2 v2.3.0 // indirect go.opencensus.io v0.23.0 // indirect go.uber.org/atomic v1.9.0 // indirect + go.uber.org/goleak v1.1.12 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f // indirect diff --git a/go.sum b/go.sum index 249cbfc..0e5dee9 100644 --- a/go.sum +++ b/go.sum @@ -833,8 +833,9 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= diff --git a/internal/shutdown/shutdown.go b/internal/shutdown/shutdown.go index 13181b4..7e03fb0 100644 --- a/internal/shutdown/shutdown.go +++ b/internal/shutdown/shutdown.go @@ -40,12 +40,12 @@ func ShutdownServices(ctx context.Context, notifierCh chan os.Signal, waitTime t } // Wrapper function for shutting down the head tracking process. -func ShutdownHeadTracking(ctx context.Context, kgCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { +func ShutdownHeadTracking(ctx context.Context, hdCancel context.CancelFunc, kgCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{ // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown. "beaconClient": func(ctx context.Context) error { defer DB.Close() - err := BC.StopHeadTracking() + err := BC.StopHeadTracking(hdCancel) if err != nil { loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking") } @@ -82,7 +82,7 @@ func ShutdownHistoricProcessing(ctx context.Context, kgCancel, hpCancel context. } // Shutdown the head and historical processing -func ShutdownFull(ctx context.Context, kgCancel, hpCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { +func ShutdownFull(ctx context.Context, hdCancel context.CancelFunc, kgCancel, hpCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{ // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown. "beaconClient": func(ctx context.Context) error { @@ -97,7 +97,7 @@ func ShutdownFull(ctx context.Context, kgCancel, hpCancel context.CancelFunc, no loghelper.LogError(err).Error("Unable to stop processing known gaps") } } - err = BC.StopHeadTracking() + err = BC.StopHeadTracking(hdCancel) if err != nil { loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking") } diff --git a/internal/shutdown/shutdown_test.go b/internal/shutdown/shutdown_test.go index 97d83af..99dc6dd 100644 --- a/internal/shutdown/shutdown_test.go +++ b/internal/shutdown/shutdown_test.go @@ -68,13 +68,14 @@ var _ = Describe("Shutdown", func() { Expect(err).To(BeNil()) }) - Describe("Run Shutdown Function for head tracking,", Label("integration"), func() { + Describe("Run Shutdown Function for head tracking,", Label("integration", "shutdown"), func() { Context("When Channels are empty,", func() { It("Should Shutdown Successfully.", func() { go func() { - _, cancel := context.WithCancel(context.Background()) + _, kgCancel := context.WithCancel(context.Background()) + _, hdCancel := context.WithCancel(context.Background()) log.Debug("Starting shutdown chan") - err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC) + err = shutdown.ShutdownHeadTracking(ctx, hdCancel, kgCancel, notifierCh, maxWaitSecondsShutdown, DB, BC) log.Debug("We have completed the shutdown...") Expect(err).ToNot(HaveOccurred()) }() @@ -85,9 +86,10 @@ var _ = Describe("Shutdown", func() { shutdownCh := make(chan bool) //log.SetLevel(log.DebugLevel) go func() { - _, cancel := context.WithCancel(context.Background()) + _, kgCancel := context.WithCancel(context.Background()) + _, hdCancel := context.WithCancel(context.Background()) log.Debug("Starting shutdown chan") - err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC) + err = shutdown.ShutdownHeadTracking(ctx, hdCancel, kgCancel, notifierCh, maxWaitSecondsShutdown, DB, BC) log.Debug("We have completed the shutdown...") Expect(err).ToNot(HaveOccurred()) shutdownCh <- true @@ -120,8 +122,9 @@ var _ = Describe("Shutdown", func() { //log.SetLevel(log.DebugLevel) go func() { log.Debug("Starting shutdown chan") - _, cancel := context.WithCancel(context.Background()) - err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC) + _, kgCancel := context.WithCancel(context.Background()) + _, hdCancel := context.WithCancel(context.Background()) + err = shutdown.ShutdownHeadTracking(ctx, hdCancel, kgCancel, notifierCh, maxWaitSecondsShutdown, DB, BC) log.Debug("We have completed the shutdown...") Expect(err).To(MatchError(gracefulshutdown.TimeoutErr(maxWaitSecondsShutdown.String()))) shutdownCh <- true diff --git a/pkg/beaconclient/capturehead.go b/pkg/beaconclient/capturehead.go index a0b6e6b..0b54881 100644 --- a/pkg/beaconclient/capturehead.go +++ b/pkg/beaconclient/capturehead.go @@ -18,42 +18,23 @@ package beaconclient import ( - "time" + "context" log "github.com/sirupsen/logrus" - "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper" ) // This function will perform all the heavy lifting for tracking the head of the chain. -func (bc *BeaconClient) CaptureHead() { +func (bc *BeaconClient) CaptureHead(ctx context.Context) { log.Info("We are tracking the head of the chain.") - go bc.handleHead() - go bc.handleReorg() - bc.captureEventTopic() + go bc.handleHead(ctx) + go bc.handleReorg(ctx) + bc.captureEventTopic(ctx) } // Stop the head tracking service. -func (bc *BeaconClient) StopHeadTracking() error { +func (bc *BeaconClient) StopHeadTracking(cancel context.CancelFunc) error { log.Info("We are going to stop tracking the head of chain because of the shutdown signal.") - chHead := make(chan bool) - chReorg := make(chan bool) - - go bc.HeadTracking.finishProcessingChannel(chHead) - go bc.ReOrgTracking.finishProcessingChannel(chReorg) - - <-chHead - <-chReorg + cancel() log.Info("Successfully stopped the head tracking service.") return nil } - -// This function closes the SSE subscription, but waits until the MessagesCh is empty -func (se *SseEvents[ProcessedEvents]) finishProcessingChannel(finish chan<- bool) { - loghelper.LogEndpoint(se.Endpoint).Info("Received a close event.") - se.SseClient.Unsubscribe(se.MessagesCh) - for len(se.MessagesCh) != 0 || len(se.ProcessCh) != 0 { - time.Sleep(time.Duration(shutdownWaitInterval) * time.Millisecond) - } - loghelper.LogEndpoint(se.Endpoint).Info("Done processing all messages, ready for shutdown") - finish <- true -} diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index 1c569df..adb2bab 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -23,6 +23,7 @@ import ( "net/http" "os" "path/filepath" + "runtime" "strconv" "sync/atomic" "time" @@ -264,71 +265,103 @@ type MimicConfig struct { var _ = Describe("Capturehead", Label("head"), func() { Describe("Receiving New Head SSE messages", Label("unit", "behavioral"), func() { - Context("Correctly formatted Phase0 Block", func() { + Context("Correctly formatted Phase0 Block", Label("leak-head"), func() { It("Should turn it into a struct successfully.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "99") + log.SetLevel(log.DebugLevel) + BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "99") + + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey) validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey) + testStopHeadTracking(cancel, bc, startGoRoutines) + }) }) Context("Correctly formatted Altair Block", func() { It("Should turn it into a struct successfully.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey) validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey) + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) Context("Correctly formatted Altair Test Blocks", func() { It("Should turn it into a struct successfully.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703-dummy"].HeadMessage, 74240, maxRetry, 1, 0, 0) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703-dummy"].HeadMessage, 74240, maxRetry, 1, 0, 0) bc = setUpTest(BeaconNodeTester.TestConfig, "2375702") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703-dummy-2"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703-dummy-2"].HeadMessage, 74240, maxRetry, 1, 0, 0) + + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) Context("Correctly formatted Phase0 Test Blocks", func() { It("Should turn it into a struct successfully.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, 3, maxRetry, 1, 0, 0) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "99") + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, 3, maxRetry, 1, 0, 0) bc = setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage, 3, maxRetry, 1, 0, 0) + + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) Context("Two consecutive correct blocks", func() { It("Should handle both blocks correctly, without any reorgs or known_gaps", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 0, 0) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "99") + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 0, 0) + + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) Context("Two consecutive blocks with a bad parent", func() { It("Should add the previous block to the knownGaps table.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, 3, maxRetry, 1, 0, 0) - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 1, 1) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "99") + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 1, 1) + + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) Context("Phase 0: We have a correctly formated SSZ SignedBeaconBlock and BeaconState", func() { @@ -348,10 +381,13 @@ var _ = Describe("Capturehead", Label("head"), func() { //}) Context("When the proper SSZ objects are not served", func() { It("Should return an error, and add the slot to the knownGaps table.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "101") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["102-wrong-ssz-1"].HeadMessage, 3, maxRetry, 0, 1, 0) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "101") + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["102-wrong-ssz-1"].HeadMessage, 3, maxRetry, 0, 1, 0) knownGapCount := countKnownGapsTable(bc.Db) Expect(knownGapCount).To(Equal(1)) @@ -359,6 +395,8 @@ var _ = Describe("Capturehead", Label("head"), func() { start, end := queryKnownGaps(bc.Db, "102", "102") Expect(start).To(Equal(102)) Expect(end).To(Equal(102)) + + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) }) @@ -366,21 +404,28 @@ var _ = Describe("Capturehead", Label("head"), func() { Describe("Known Gaps Scenario", Label("unit", "behavioral"), func() { Context("There is a gap at start up within one incrementing range.", func() { It("Should add only a single entry to the knownGaps table.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "10") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testKnownGapsMessages(bc, 100, 1, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "10") + BeaconNodeTester.testKnownGapsMessages(ctx, bc, 100, 1, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage) start, end := queryKnownGaps(bc.Db, "11", "99") Expect(start).To(Equal(11)) Expect(end).To(Equal(99)) + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) Context("There is a gap at start up spanning multiple incrementing range.", func() { It("Should add multiple entries to the knownGaps table.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "5") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testKnownGapsMessages(bc, 10, 10, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "5") + BeaconNodeTester.testKnownGapsMessages(ctx, bc, 10, 10, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage) start, end := queryKnownGaps(bc.Db, "6", "16") Expect(start).To(Equal(6)) @@ -389,14 +434,19 @@ var _ = Describe("Capturehead", Label("head"), func() { start, end = queryKnownGaps(bc.Db, "96", "99") Expect(start).To(Equal(96)) Expect(end).To(Equal(99)) + + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) Context("Gaps between two head messages", func() { It("Should add the slots in-between", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testKnownGapsMessages(bc, 1000000, 3, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].HeadMessage) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "99") + BeaconNodeTester.testKnownGapsMessages(ctx, bc, 1000000, 3, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].HeadMessage) start, end := queryKnownGaps(bc.Db, "101", "1000101") Expect(start).To(Equal(101)) @@ -405,6 +455,7 @@ var _ = Describe("Capturehead", Label("head"), func() { start, end = queryKnownGaps(bc.Db, "2000101", "2375702") Expect(start).To(Equal(2000101)) Expect(end).To(Equal(2375702)) + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) }) @@ -412,34 +463,50 @@ var _ = Describe("Capturehead", Label("head"), func() { Describe("ReOrg Scenario", Label("unit", "behavioral"), func() { Context("Altair: Multiple head messages for the same slot.", func() { It("The previous block should be marked as 'forked', the new block should be the only one marked as 'proposed'.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testMultipleHead(bc, TestEvents["2375703"].HeadMessage, TestEvents["2375703-dummy"].HeadMessage, 74240, maxRetry) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") + BeaconNodeTester.testMultipleHead(ctx, bc, TestEvents["2375703"].HeadMessage, TestEvents["2375703-dummy"].HeadMessage, 74240, maxRetry) + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) Context("Phase0: Multiple head messages for the same slot.", func() { It("The previous block should be marked as 'forked', the new block should be the only one marked as 'proposed'.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testMultipleHead(bc, TestEvents["100-dummy"].HeadMessage, TestEvents["100"].HeadMessage, 3, maxRetry) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "99") + BeaconNodeTester.testMultipleHead(ctx, bc, TestEvents["100-dummy"].HeadMessage, TestEvents["100"].HeadMessage, 3, maxRetry) + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) Context("Phase 0: Multiple reorgs have occurred on this slot", func() { It("The previous blocks should be marked as 'forked', the new block should be the only one marked as 'proposed'.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testMultipleReorgs(bc, TestEvents["100-dummy"].HeadMessage, TestEvents["100-dummy-2"].HeadMessage, TestEvents["100"].HeadMessage, 3, maxRetry) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "99") + BeaconNodeTester.testMultipleReorgs(ctx, bc, TestEvents["100-dummy"].HeadMessage, TestEvents["100-dummy-2"].HeadMessage, TestEvents["100"].HeadMessage, 3, maxRetry) + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) Context("Altair: Multiple reorgs have occurred on this slot", func() { It("The previous blocks should be marked as 'forked', the new block should be the only one marked as 'proposed'.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testMultipleReorgs(bc, TestEvents["2375703-dummy"].HeadMessage, TestEvents["2375703-dummy-2"].HeadMessage, TestEvents["2375703"].HeadMessage, 74240, maxRetry) + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") + BeaconNodeTester.testMultipleReorgs(ctx, bc, TestEvents["2375703-dummy"].HeadMessage, TestEvents["2375703-dummy-2"].HeadMessage, TestEvents["2375703"].HeadMessage, 74240, maxRetry) + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) }) @@ -831,8 +898,8 @@ func (tbc TestBeaconNode) provideSsz(slotIdentifier string, sszIdentifier string // Helper function to test three reorg messages. There are going to be many functions like this, // Because we need to test the same logic for multiple phases. -func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, thirdHead beaconclient.Head, epoch int, maxRetry int) { - go bc.CaptureHead() +func (tbc TestBeaconNode) testMultipleReorgs(ctx context.Context, bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, thirdHead beaconclient.Head, epoch int, maxRetry int) { + go bc.CaptureHead(ctx) time.Sleep(1 * time.Second) log.Info("Sending Messages to BeaconClient") @@ -893,8 +960,8 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs } // A test to validate a single block was processed correctly -func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head beaconclient.Head, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) { - go bc.CaptureHead() +func (tbc TestBeaconNode) testProcessBlock(ctx context.Context, bc *beaconclient.BeaconClient, head beaconclient.Head, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) { + go bc.CaptureHead(ctx) time.Sleep(1 * time.Second) sendHeadMessage(bc, head, maxRetry, expectedSuccessInsert) @@ -923,8 +990,8 @@ func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head b // A test that ensures that if two HeadMessages occur for a single slot they are marked // as proposed and forked correctly. -func (tbc TestBeaconNode) testMultipleHead(bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, epoch int, maxRetry int) { - go bc.CaptureHead() +func (tbc TestBeaconNode) testMultipleHead(ctx context.Context, bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, epoch int, maxRetry int) { + go bc.CaptureHead(ctx) time.Sleep(1 * time.Second) sendHeadMessage(bc, firstHead, maxRetry, 1) @@ -950,9 +1017,9 @@ func (tbc TestBeaconNode) testMultipleHead(bc *beaconclient.BeaconClient, firstH // A test that ensures that if two HeadMessages occur for a single slot they are marked // as proposed and forked correctly. -func (tbc TestBeaconNode) testKnownGapsMessages(bc *beaconclient.BeaconClient, tableIncrement int, expectedEntries uint64, maxRetry int, msg ...beaconclient.Head) { +func (tbc TestBeaconNode) testKnownGapsMessages(ctx context.Context, bc *beaconclient.BeaconClient, tableIncrement int, expectedEntries uint64, maxRetry int, msg ...beaconclient.Head) { bc.KnownGapTableIncrement = tableIncrement - go bc.CaptureHead() + go bc.CaptureHead(ctx) time.Sleep(1 * time.Second) for _, headMsg := range msg { @@ -991,3 +1058,14 @@ func testSszRoot(msg Message) { Expect(err).ToNot(HaveOccurred()) Expect(msg.HeadMessage.Block).To(Equal("0x" + hex.EncodeToString(blockRoot[:]))) } + +// A make shift function to stop head tracking and insure we dont have any goroutine leaks +func testStopHeadTracking(cancel context.CancelFunc, bc *beaconclient.BeaconClient, startGoRoutines int) { + bc.Db.Close() + err := bc.StopHeadTracking(cancel) + Expect(err).ToNot(HaveOccurred()) + + time.Sleep(3 * time.Second) + endNum := runtime.NumGoroutine() + Expect(startGoRoutines).To(Equal(endNum)) +} diff --git a/pkg/beaconclient/capturehistoric.go b/pkg/beaconclient/capturehistoric.go index 2bf6dfc..50855de 100644 --- a/pkg/beaconclient/capturehistoric.go +++ b/pkg/beaconclient/capturehistoric.go @@ -164,8 +164,9 @@ func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing, errs := bp.getSlotRange(ctx, slotsCh) // Periodically adds new entries.... if errs != nil { finalErrCh <- errs + } else { + finalErrCh <- nil } - finalErrCh <- nil log.Debug("We are stopping the processing of adding new entries") }() log.Debug("Waiting for shutdown signal from channel") diff --git a/pkg/beaconclient/capturehistoric_test.go b/pkg/beaconclient/capturehistoric_test.go index 5571a7e..5f36039 100644 --- a/pkg/beaconclient/capturehistoric_test.go +++ b/pkg/beaconclient/capturehistoric_test.go @@ -3,6 +3,7 @@ package beaconclient_test import ( "context" "fmt" + "runtime" "sync/atomic" "time" @@ -24,9 +25,11 @@ var _ = Describe("Capturehistoric", func() { Describe("Run the application in historic mode", Label("unit", "behavioral", "historical"), func() { Context("Phase0 + Altairs: When we need to process a multiple blocks in a multiple entries in the eth_beacon.historic_process table.", Label("deb"), func() { It("Successfully Process the Blocks", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() + startNum := runtime.NumGoroutine() + + bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 101, 10) BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0) // Run Two seperate processes @@ -35,6 +38,11 @@ var _ = Describe("Capturehistoric", func() { time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) + time.Sleep(3 * time.Second) + + bc.Db.Close() + endNum := runtime.NumGoroutine() + Expect(startNum).To(Equal(endNum)) }) }) Context("When the start block is greater than the endBlock", func() { @@ -70,11 +78,13 @@ var _ = Describe("Capturehistoric", func() { }) }) Describe("Running the Application to process Known Gaps", Label("unit", "behavioral", "knownGaps"), func() { - Context("Phase0 + Altairs: When we need to process a multiple blocks in a multiple entries in the eth_beacon.known_gaps table.", func() { + Context("Phase0 + Altairs: When we need to process a multiple blocks in a multiple entries in the eth_beacon.known_gaps table.", Label("leak"), func() { It("Successfully Process the Blocks", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() + startNum := runtime.NumGoroutine() + + bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.writeEventToKnownGaps(bc, 100, 101) BeaconNodeTester.runKnownGapsProcess(bc, 2, 2, 0, 0, 0) // Run Two seperate processes @@ -83,6 +93,10 @@ var _ = Describe("Capturehistoric", func() { time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) + + bc.Db.Close() + endNum := runtime.NumGoroutine() + Expect(startNum).To(Equal(endNum)) }) }) Context("When the start block is greater than the endBlock", func() { @@ -107,13 +121,16 @@ var _ = Describe("Capturehistoric", func() { }) }) Describe("Running the application in Historic, Head, and KnownGaps mode", Label("unit", "historical", "full"), func() { - Context("When it recieves a head, historic and known Gaps message (in order)", func() { + Context("When it recieves a head, historic and known Gaps message (in order)", Label("deb"), func() { It("Should process them all successfully.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") // Head - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) // Historical BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10) @@ -125,19 +142,25 @@ var _ = Describe("Capturehistoric", func() { time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) + + testStopHeadTracking(cancel, bc, startGoRoutines) + }) }) Context("When it recieves a historic, head and known Gaps message (in order)", func() { It("Should process them all successfully.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") // Historical BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10) BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0) // Head - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) // Known Gaps BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101) @@ -145,13 +168,17 @@ var _ = Describe("Capturehistoric", func() { time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) Context("When it recieves a known Gaps, historic and head message (in order)", func() { It("Should process them all successfully.", func() { - bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + + bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") // Known Gaps BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101) BeaconNodeTester.runKnownGapsProcess(bc, 2, 1, 0, 0, 0) @@ -161,10 +188,11 @@ var _ = Describe("Capturehistoric", func() { BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0) // Head - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) + testStopHeadTracking(cancel, bc, startGoRoutines) }) }) }) diff --git a/pkg/beaconclient/incomingsse.go b/pkg/beaconclient/incomingsse.go index cdb4891..0833f44 100644 --- a/pkg/beaconclient/incomingsse.go +++ b/pkg/beaconclient/incomingsse.go @@ -18,6 +18,7 @@ package beaconclient import ( + "context" "encoding/json" "time" @@ -33,7 +34,7 @@ var ( // This function will capture all the SSE events for a given SseEvents object. // When new messages come in, it will ensure that they are decoded into JSON. // If any errors occur, it log the error information. -func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMetricInc func(uint64)) { +func handleIncomingSseEvent[P ProcessedEvents](ctx context.Context, eventHandler *SseEvents[P], errMetricInc func(uint64)) { go func() { errG := new(errgroup.Group) errG.Go(func() error { @@ -56,6 +57,10 @@ func handleIncomingSseEvent[P ProcessedEvents](eventHandler *SseEvents[P], errMe }() for { select { + case <-ctx.Done(): + close(eventHandler.MessagesCh) + close(eventHandler.ErrorCh) + return case message := <-eventHandler.MessagesCh: // Message can be nil if its a keep-alive message if len(message.Data) != 0 { @@ -91,8 +96,8 @@ func processMsg[P ProcessedEvents](msg []byte, processCh chan<- *P, errorCh chan } // Capture all of the event topics. -func (bc *BeaconClient) captureEventTopic() { +func (bc *BeaconClient) captureEventTopic(ctx context.Context) { log.Info("We are capturing all SSE events") - go handleIncomingSseEvent(bc.HeadTracking, bc.Metrics.IncrementHeadError) - go handleIncomingSseEvent(bc.ReOrgTracking, bc.Metrics.IncrementReorgError) + go handleIncomingSseEvent(ctx, bc.HeadTracking, bc.Metrics.IncrementHeadError) + go handleIncomingSseEvent(ctx, bc.ReOrgTracking, bc.Metrics.IncrementReorgError) } diff --git a/pkg/beaconclient/processevents.go b/pkg/beaconclient/processevents.go index 8dd5520..15d183f 100644 --- a/pkg/beaconclient/processevents.go +++ b/pkg/beaconclient/processevents.go @@ -19,6 +19,7 @@ package beaconclient import ( + "context" "fmt" "strconv" @@ -26,52 +27,63 @@ import ( ) // This function will perform the necessary steps to handle a reorg. -func (bc *BeaconClient) handleReorg() { +func (bc *BeaconClient) handleReorg(ctx context.Context) { log.Info("Starting to process reorgs.") for { - reorg := <-bc.ReOrgTracking.ProcessCh - log.WithFields(log.Fields{"reorg": reorg}).Debug("Received a new reorg message.") - writeReorgs(bc.Db, reorg.Slot, reorg.NewHeadBlock, bc.Metrics) + select { + case <-ctx.Done(): + close(bc.ReOrgTracking.ProcessCh) + return + case reorg := <-bc.ReOrgTracking.ProcessCh: + log.WithFields(log.Fields{"reorg": reorg}).Debug("Received a new reorg message.") + writeReorgs(bc.Db, reorg.Slot, reorg.NewHeadBlock, bc.Metrics) + } } } // This function will handle the latest head event. -func (bc *BeaconClient) handleHead() { +func (bc *BeaconClient) handleHead(ctx context.Context) { log.Info("Starting to process head.") errorSlots := 0 for { - head := <-bc.HeadTracking.ProcessCh - // Process all the work here. - slot, err := strconv.Atoi(head.Slot) - if err != nil { - bc.HeadTracking.ErrorCh <- &SseError{ - err: fmt.Errorf("Unable to turn the slot from string to int: %s", head.Slot), + select { + case <-ctx.Done(): + close(bc.HeadTracking.ProcessCh) + return + case head := <-bc.HeadTracking.ProcessCh: + + // Process all the work here. + slot, err := strconv.Atoi(head.Slot) + if err != nil { + bc.HeadTracking.ErrorCh <- &SseError{ + err: fmt.Errorf("Unable to turn the slot from string to int: %s", head.Slot), + } + errorSlots = errorSlots + 1 + continue } - errorSlots = errorSlots + 1 - continue + if errorSlots != 0 && bc.PreviousSlot != 0 { + log.WithFields(log.Fields{ + "lastProcessedSlot": bc.PreviousSlot, + "errorSlots": errorSlots, + }).Warn("We added slots to the knownGaps table because we got bad head messages.") + writeKnownGaps(bc.Db, bc.KnownGapTableIncrement, bc.PreviousSlot+1, slot, fmt.Errorf("Bad Head Messages"), "headProcessing", bc.Metrics) + errorSlots = 0 + } + + log.WithFields(log.Fields{"head": head}).Debug("We are going to start processing the slot.") + + // Not used anywhere yet but might be useful to have. + if bc.PreviousSlot == 0 && bc.PreviousBlockRoot == "" { + bc.StartingSlot = slot + } + + go processHeadSlot(ctx, bc.Db, bc.ServerEndpoint, slot, head.Block, head.State, bc.PreviousSlot, bc.PreviousBlockRoot, bc.Metrics, bc.KnownGapTableIncrement, bc.CheckDb) + + log.WithFields(log.Fields{"head": head.Slot}).Debug("We finished calling processHeadSlot.") + + // Update the previous block + bc.PreviousSlot = slot + bc.PreviousBlockRoot = head.Block } - if errorSlots != 0 && bc.PreviousSlot != 0 { - log.WithFields(log.Fields{ - "lastProcessedSlot": bc.PreviousSlot, - "errorSlots": errorSlots, - }).Warn("We added slots to the knownGaps table because we got bad head messages.") - writeKnownGaps(bc.Db, bc.KnownGapTableIncrement, bc.PreviousSlot+1, slot, fmt.Errorf("Bad Head Messages"), "headProcessing", bc.Metrics) - errorSlots = 0 - } - - log.WithFields(log.Fields{"head": head}).Debug("We are going to start processing the slot.") - - // Not used anywhere yet but might be useful to have. - if bc.PreviousSlot == 0 && bc.PreviousBlockRoot == "" { - bc.StartingSlot = slot - } - - go processHeadSlot(bc.Db, bc.ServerEndpoint, slot, head.Block, head.State, bc.PreviousSlot, bc.PreviousBlockRoot, bc.Metrics, bc.KnownGapTableIncrement, bc.CheckDb) - - log.WithFields(log.Fields{"head": head.Slot}).Debug("We finished calling processHeadSlot.") - - // Update the previous block - bc.PreviousSlot = slot - bc.PreviousBlockRoot = head.Block } } diff --git a/pkg/beaconclient/processslot.go b/pkg/beaconclient/processslot.go index 4e075b4..4b1f55e 100644 --- a/pkg/beaconclient/processslot.go +++ b/pkg/beaconclient/processslot.go @@ -239,12 +239,12 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string, } // Handle a slot that is at head. A wrapper function for calling `handleFullSlot`. -func processHeadSlot(db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, metrics *BeaconClientMetrics, knownGapsTableIncrement int, checkDb bool) { +func processHeadSlot(ctx context.Context, db sql.Database, serverAddress string, slot int, blockRoot string, stateRoot string, previousSlot int, previousBlockRoot string, metrics *BeaconClientMetrics, knownGapsTableIncrement int, checkDb bool) { // Get the knownGaps at startUp. if previousSlot == 0 && previousBlockRoot == "" { writeStartUpGaps(db, knownGapsTableIncrement, slot, metrics) } - err, errReason := processFullSlot(context.Background(), db, serverAddress, slot, blockRoot, stateRoot, previousSlot, previousBlockRoot, "head", metrics, knownGapsTableIncrement, checkDb) + err, errReason := processFullSlot(ctx, db, serverAddress, slot, blockRoot, stateRoot, previousSlot, previousBlockRoot, "head", metrics, knownGapsTableIncrement, checkDb) if err != nil { writeKnownGaps(db, knownGapsTableIncrement, slot, slot, err, errReason, metrics) } diff --git a/pkg/beaconclient/systemvalidation_test.go b/pkg/beaconclient/systemvalidation_test.go index 7aabd41..9187858 100644 --- a/pkg/beaconclient/systemvalidation_test.go +++ b/pkg/beaconclient/systemvalidation_test.go @@ -1,7 +1,9 @@ package beaconclient_test import ( + "context" "os" + "runtime" "strconv" "time" @@ -63,7 +65,11 @@ func getEnvInt(envVar string) int { // Start head tracking and wait for the expected results. func processProdHeadBlocks(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) { - go bc.CaptureHead() + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) + go bc.CaptureHead(ctx) time.Sleep(1 * time.Second) validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError) + + testStopHeadTracking(cancel, bc, startGoRoutines) } -- 2.45.2 From 67305e07c36a2ede7510db35d2251cda90a1d915 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Tue, 21 Jun 2022 16:42:51 -0400 Subject: [PATCH 05/18] Add condition for SSE subscription --- cmd/full.go | 2 +- cmd/head.go | 6 ++- pkg/beaconclient/capturehead.go | 4 +- pkg/beaconclient/capturehead_test.go | 9 ++-- pkg/beaconclient/incomingsse.go | 61 ++++++++++++++--------- pkg/beaconclient/systemvalidation_test.go | 2 +- 6 files changed, 50 insertions(+), 34 deletions(-) diff --git a/cmd/full.go b/cmd/full.go index 840d39d..6d27670 100644 --- a/cmd/full.go +++ b/cmd/full.go @@ -76,7 +76,7 @@ func startFullProcessing() { log.Info("The Beacon Client has booted successfully!") // Capture head blocks hdCtx, hdCancel := context.WithCancel(context.Background()) - go Bc.CaptureHead(hdCtx) + go Bc.CaptureHead(hdCtx, false) hpContext, hpCancel := context.WithCancel(context.Background()) diff --git a/cmd/head.go b/cmd/head.go index a4f88ec..41c5b43 100644 --- a/cmd/head.go +++ b/cmd/head.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net/http" + "os" "strconv" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -63,7 +64,7 @@ func startHeadTracking() { log.Info("The Beacon Client has booted successfully!") // Capture head blocks hdCtx, hdCancel := context.WithCancel(context.Background()) - go Bc.CaptureHead(hdCtx) + go Bc.CaptureHead(hdCtx, false) kgCtx, kgCancel := context.WithCancel(context.Background()) if viper.GetBool("kg.processKnownGaps") { @@ -96,7 +97,8 @@ func startHeadTracking() { } else { log.Info("Gracefully shutdown ipld-eth-beacon-indexer") } - + log.Debug("WTF") + os.Exit(0) } func init() { diff --git a/pkg/beaconclient/capturehead.go b/pkg/beaconclient/capturehead.go index 0b54881..a18a70d 100644 --- a/pkg/beaconclient/capturehead.go +++ b/pkg/beaconclient/capturehead.go @@ -24,11 +24,11 @@ import ( ) // This function will perform all the heavy lifting for tracking the head of the chain. -func (bc *BeaconClient) CaptureHead(ctx context.Context) { +func (bc *BeaconClient) CaptureHead(ctx context.Context, skipSee bool) { log.Info("We are tracking the head of the chain.") go bc.handleHead(ctx) go bc.handleReorg(ctx) - bc.captureEventTopic(ctx) + bc.captureEventTopic(ctx, skipSee) } // Stop the head tracking service. diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index adb2bab..107041f 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -899,7 +899,7 @@ func (tbc TestBeaconNode) provideSsz(slotIdentifier string, sszIdentifier string // Helper function to test three reorg messages. There are going to be many functions like this, // Because we need to test the same logic for multiple phases. func (tbc TestBeaconNode) testMultipleReorgs(ctx context.Context, bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, thirdHead beaconclient.Head, epoch int, maxRetry int) { - go bc.CaptureHead(ctx) + go bc.CaptureHead(ctx, true) time.Sleep(1 * time.Second) log.Info("Sending Messages to BeaconClient") @@ -961,7 +961,7 @@ func (tbc TestBeaconNode) testMultipleReorgs(ctx context.Context, bc *beaconclie // A test to validate a single block was processed correctly func (tbc TestBeaconNode) testProcessBlock(ctx context.Context, bc *beaconclient.BeaconClient, head beaconclient.Head, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) { - go bc.CaptureHead(ctx) + go bc.CaptureHead(ctx, true) time.Sleep(1 * time.Second) sendHeadMessage(bc, head, maxRetry, expectedSuccessInsert) @@ -991,7 +991,7 @@ func (tbc TestBeaconNode) testProcessBlock(ctx context.Context, bc *beaconclient // A test that ensures that if two HeadMessages occur for a single slot they are marked // as proposed and forked correctly. func (tbc TestBeaconNode) testMultipleHead(ctx context.Context, bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, epoch int, maxRetry int) { - go bc.CaptureHead(ctx) + go bc.CaptureHead(ctx, true) time.Sleep(1 * time.Second) sendHeadMessage(bc, firstHead, maxRetry, 1) @@ -1019,7 +1019,7 @@ func (tbc TestBeaconNode) testMultipleHead(ctx context.Context, bc *beaconclient // as proposed and forked correctly. func (tbc TestBeaconNode) testKnownGapsMessages(ctx context.Context, bc *beaconclient.BeaconClient, tableIncrement int, expectedEntries uint64, maxRetry int, msg ...beaconclient.Head) { bc.KnownGapTableIncrement = tableIncrement - go bc.CaptureHead(ctx) + go bc.CaptureHead(ctx, true) time.Sleep(1 * time.Second) for _, headMsg := range msg { @@ -1067,5 +1067,6 @@ func testStopHeadTracking(cancel context.CancelFunc, bc *beaconclient.BeaconClie time.Sleep(3 * time.Second) endNum := runtime.NumGoroutine() + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) Expect(startGoRoutines).To(Equal(endNum)) } diff --git a/pkg/beaconclient/incomingsse.go b/pkg/beaconclient/incomingsse.go index 0833f44..ab94c07 100644 --- a/pkg/beaconclient/incomingsse.go +++ b/pkg/beaconclient/incomingsse.go @@ -24,37 +24,50 @@ import ( log "github.com/sirupsen/logrus" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/loghelper" - "golang.org/x/sync/errgroup" -) - -var ( - shutdownWaitInterval = time.Duration(5) * time.Second ) // This function will capture all the SSE events for a given SseEvents object. // When new messages come in, it will ensure that they are decoded into JSON. // If any errors occur, it log the error information. -func handleIncomingSseEvent[P ProcessedEvents](ctx context.Context, eventHandler *SseEvents[P], errMetricInc func(uint64)) { - go func() { - errG := new(errgroup.Group) - errG.Go(func() error { - err := eventHandler.SseClient.SubscribeChanRaw(eventHandler.MessagesCh) +func handleIncomingSseEvent[P ProcessedEvents](ctx context.Context, eventHandler *SseEvents[P], errMetricInc func(uint64), skipSse bool) { + //go func() { + // subCh := make(chan error, 1) + // go func() { + // err := eventHandler.SseClient.SubscribeChanRawWithContext(ctx, eventHandler.MessagesCh) + // if err != nil { + // subCh <- err + // } + // subCh <- nil + // }() + // select { + // case err := <-subCh: + // if err != nil { + // log.WithFields(log.Fields{ + // "err": err, + // "endpoint": eventHandler.Endpoint, + // }).Error("Unable to subscribe to the SSE endpoint.") + // return + // } else { + // loghelper.LogEndpoint(eventHandler.Endpoint).Info("Successfully subscribed to the event stream.") + // } + // case <-ctx.Done(): + // return + // } + //}() + if !skipSse { + for { + err := eventHandler.SseClient.SubscribeChanRawWithContext(ctx, eventHandler.MessagesCh) if err != nil { - return err + loghelper.LogEndpoint(eventHandler.Endpoint).WithFields(log.Fields{ + "err": err}).Error("We are unable to subscribe to the SSE endpoint") + time.Sleep(3 * time.Second) + continue } - return nil - }) - if err := errG.Wait(); err != nil { - log.WithFields(log.Fields{ - "err": err, - "endpoint": eventHandler.Endpoint, - }).Error("Unable to subscribe to the SSE endpoint.") - return - } else { loghelper.LogEndpoint(eventHandler.Endpoint).Info("Successfully subscribed to the event stream.") + break } + } - }() for { select { case <-ctx.Done(): @@ -96,8 +109,8 @@ func processMsg[P ProcessedEvents](msg []byte, processCh chan<- *P, errorCh chan } // Capture all of the event topics. -func (bc *BeaconClient) captureEventTopic(ctx context.Context) { +func (bc *BeaconClient) captureEventTopic(ctx context.Context, skipSse bool) { log.Info("We are capturing all SSE events") - go handleIncomingSseEvent(ctx, bc.HeadTracking, bc.Metrics.IncrementHeadError) - go handleIncomingSseEvent(ctx, bc.ReOrgTracking, bc.Metrics.IncrementReorgError) + go handleIncomingSseEvent(ctx, bc.HeadTracking, bc.Metrics.IncrementHeadError, skipSse) + go handleIncomingSseEvent(ctx, bc.ReOrgTracking, bc.Metrics.IncrementReorgError, skipSse) } diff --git a/pkg/beaconclient/systemvalidation_test.go b/pkg/beaconclient/systemvalidation_test.go index 9187858..fe67772 100644 --- a/pkg/beaconclient/systemvalidation_test.go +++ b/pkg/beaconclient/systemvalidation_test.go @@ -67,7 +67,7 @@ func getEnvInt(envVar string) int { func processProdHeadBlocks(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) { startGoRoutines := runtime.NumGoroutine() ctx, cancel := context.WithCancel(context.Background()) - go bc.CaptureHead(ctx) + go bc.CaptureHead(ctx, false) time.Sleep(1 * time.Second) validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError) -- 2.45.2 From 8253201f95f8acf0f62e7d2a9bbe430be978bf39 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Wed, 22 Jun 2022 12:06:52 -0400 Subject: [PATCH 06/18] Limit head workers + use single context + skipSse --- cmd/capture.go | 8 +++-- cmd/full.go | 14 +++----- cmd/head.go | 13 +++---- cmd/historic.go | 11 +++--- internal/shutdown/shutdown.go | 36 +++++++++---------- internal/shutdown/shutdown_test.go | 15 +++----- pkg/beaconclient/capturehead.go | 20 +++++++---- pkg/beaconclient/capturehead_test.go | 44 +++++++++++------------ pkg/beaconclient/capturehistoric.go | 18 ++++++---- pkg/beaconclient/capturehistoric_test.go | 37 ++++++++++++------- pkg/beaconclient/processevents.go | 40 ++++++++++++++++++--- pkg/beaconclient/processknowngaps.go | 19 ++++++---- pkg/beaconclient/queryserver.go | 7 +++- pkg/beaconclient/systemvalidation_test.go | 4 +-- pkg/gracefulshutdown/gracefulshutdown.go | 6 +++- 15 files changed, 172 insertions(+), 120 deletions(-) diff --git a/cmd/capture.go b/cmd/capture.go index d144834..6d48027 100644 --- a/cmd/capture.go +++ b/cmd/capture.go @@ -39,6 +39,7 @@ var ( bcConnectionProtocol string bcType string bcMaxHistoricProcessWorker int + bcMaxHeadProcessWorker int bcUniqueNodeIdentifier int bcCheckDb bool kgMaxWorker int @@ -96,7 +97,8 @@ func init() { captureCmd.PersistentFlags().StringVarP(&bcConnectionProtocol, "bc.connectionProtocol", "", "http", "protocol for connecting to the beacon node.") captureCmd.PersistentFlags().IntVarP(&bcBootRetryInterval, "bc.bootRetryInterval", "", 30, "The amount of time to wait between retries while booting the application") captureCmd.PersistentFlags().IntVarP(&bcBootMaxRetry, "bc.bootMaxRetry", "", 5, "The amount of time to wait between retries while booting the application") - captureCmd.PersistentFlags().IntVarP(&bcMaxHistoricProcessWorker, "bc.maxHistoricProcessWorker", "", 30, "The number of workers that should be actively processing slots from the eth-beacon.historic_process table. Be careful of system memory.") + captureCmd.PersistentFlags().IntVarP(&bcMaxHistoricProcessWorker, "bc.maxHistoricProcessWorker", "", 3, "The number of workers that should be actively processing slots from the eth-beacon.historic_process table. Be careful of system memory.") + captureCmd.PersistentFlags().IntVarP(&bcMaxHeadProcessWorker, "bc.maxHeadProcessWorker", "", 3, "The number of workers that should be actively processing slots head slots. Be careful of system memory.") captureCmd.PersistentFlags().IntVarP(&bcUniqueNodeIdentifier, "bc.uniqueNodeIdentifier", "", 0, "The unique identifier of this application. Each application connecting to the DB should have a unique identifier.") captureCmd.PersistentFlags().BoolVarP(&bcCheckDb, "bc.checkDb", "", true, "Should we check to see if the slot exists in the DB before writing it?") // err = captureCmd.MarkPersistentFlagRequired("bc.address") @@ -107,7 +109,7 @@ func init() { //// Known Gaps specific captureCmd.PersistentFlags().BoolVarP(&kgProcessGaps, "kg.processKnownGaps", "", true, "Should we process the slots within the eth-beacon.known_gaps table.") captureCmd.PersistentFlags().IntVarP(&kgTableIncrement, "kg.increment", "", 10000, "The max slots within a single entry to the known_gaps table.") - captureCmd.PersistentFlags().IntVarP(&kgMaxWorker, "kg.maxKnownGapsWorker", "", 30, "The number of workers that should be actively processing slots from the eth-beacon.known_gaps table. Be careful of system memory.") + captureCmd.PersistentFlags().IntVarP(&kgMaxWorker, "kg.maxKnownGapsWorker", "", 3, "The number of workers that should be actively processing slots from the eth-beacon.known_gaps table. Be careful of system memory.") // Prometheus Specific captureCmd.PersistentFlags().BoolVarP(&pmMetrics, "pm.metrics", "", true, "Should we capture prometheus metrics.") @@ -157,6 +159,8 @@ func init() { exitErr(err) err = viper.BindPFlag("bc.maxHistoricProcessWorker", captureCmd.PersistentFlags().Lookup("bc.maxHistoricProcessWorker")) exitErr(err) + err = viper.BindPFlag("bc.maxHeadProcessWorker", captureCmd.PersistentFlags().Lookup("bc.maxHeadProcessWorker")) + exitErr(err) err = viper.BindPFlag("bc.uniqueNodeIdentifier", captureCmd.PersistentFlags().Lookup("bc.uniqueNodeIdentifier")) exitErr(err) err = viper.BindPFlag("bc.checkDb", captureCmd.PersistentFlags().Lookup("bc.checkDb")) diff --git a/cmd/full.go b/cmd/full.go index 6d27670..1ba83a9 100644 --- a/cmd/full.go +++ b/cmd/full.go @@ -59,7 +59,7 @@ func init() { func startFullProcessing() { // Boot the application log.Info("Starting the application in head tracking mode.") - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"), viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"), @@ -75,14 +75,11 @@ func startFullProcessing() { log.Info("The Beacon Client has booted successfully!") // Capture head blocks - hdCtx, hdCancel := context.WithCancel(context.Background()) - go Bc.CaptureHead(hdCtx, false) - - hpContext, hpCancel := context.WithCancel(context.Background()) + go Bc.CaptureHead(ctx, viper.GetInt("bc.maxHeadProcessWorker"), false) errG, _ := errgroup.WithContext(context.Background()) errG.Go(func() error { - errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker")) + errs := Bc.CaptureHistoric(ctx, viper.GetInt("bc.maxHistoricProcessWorker")) if len(errs) != 0 { if len(errs) != 0 { log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events") @@ -91,12 +88,11 @@ func startFullProcessing() { } return nil }) - kgCtx, kgCancel := context.WithCancel(context.Background()) if viper.GetBool("kg.processKnownGaps") { go func() { errG := new(errgroup.Group) errG.Go(func() error { - errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker")) + errs := Bc.ProcessKnownGaps(ctx, viper.GetInt("kg.maxKnownGapsWorker")) if len(errs) != 0 { log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps") return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps") @@ -116,7 +112,7 @@ func startFullProcessing() { } // Shutdown when the time is right. - err = shutdown.ShutdownFull(ctx, hdCancel, kgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) + err = shutdown.ShutdownFull(ctx, cancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) if err != nil { loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!") } else { diff --git a/cmd/head.go b/cmd/head.go index 41c5b43..1aea4ea 100644 --- a/cmd/head.go +++ b/cmd/head.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "net/http" - "os" "strconv" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -47,7 +46,7 @@ var headCmd = &cobra.Command{ func startHeadTracking() { // Boot the application log.Info("Starting the application in head tracking mode.") - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"), viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"), @@ -63,15 +62,13 @@ func startHeadTracking() { log.Info("The Beacon Client has booted successfully!") // Capture head blocks - hdCtx, hdCancel := context.WithCancel(context.Background()) - go Bc.CaptureHead(hdCtx, false) + go Bc.CaptureHead(ctx, viper.GetInt("bc.maxHeadProcessWorker"), false) - kgCtx, kgCancel := context.WithCancel(context.Background()) if viper.GetBool("kg.processKnownGaps") { go func() { errG := new(errgroup.Group) errG.Go(func() error { - errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker")) + errs := Bc.ProcessKnownGaps(ctx, viper.GetInt("kg.maxKnownGapsWorker")) if len(errs) != 0 { log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps") return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps") @@ -91,14 +88,12 @@ func startHeadTracking() { } // Shutdown when the time is right. - err = shutdown.ShutdownHeadTracking(ctx, hdCancel, kgCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) + err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) if err != nil { loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!") } else { log.Info("Gracefully shutdown ipld-eth-beacon-indexer") } - log.Debug("WTF") - os.Exit(0) } func init() { diff --git a/cmd/historic.go b/cmd/historic.go index 6e0a03e..78797d8 100644 --- a/cmd/historic.go +++ b/cmd/historic.go @@ -49,7 +49,7 @@ var historicCmd = &cobra.Command{ func startHistoricProcessing() { // Boot the application log.Info("Starting the application in head tracking mode.") - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) Bc, Db, err := boot.BootApplicationWithRetry(ctx, viper.GetString("db.address"), viper.GetInt("db.port"), viper.GetString("db.name"), viper.GetString("db.username"), viper.GetString("db.password"), viper.GetString("db.driver"), viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"), @@ -63,11 +63,9 @@ func startHistoricProcessing() { serveProm(addr) } - hpContext, hpCancel := context.WithCancel(context.Background()) - errG, _ := errgroup.WithContext(context.Background()) errG.Go(func() error { - errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker")) + errs := Bc.CaptureHistoric(ctx, viper.GetInt("bc.maxHistoricProcessWorker")) if len(errs) != 0 { if len(errs) != 0 { log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events") @@ -77,12 +75,11 @@ func startHistoricProcessing() { return nil }) - kgContext, kgCancel := context.WithCancel(context.Background()) if viper.GetBool("kg.processKnownGaps") { go func() { errG := new(errgroup.Group) errG.Go(func() error { - errs := Bc.ProcessKnownGaps(kgContext, viper.GetInt("kg.maxKnownGapsWorker")) + errs := Bc.ProcessKnownGaps(ctx, viper.GetInt("kg.maxKnownGapsWorker")) if len(errs) != 0 { log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps") return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps") @@ -102,7 +99,7 @@ func startHistoricProcessing() { } // Shutdown when the time is right. - err = shutdown.ShutdownHistoricProcessing(ctx, kgCancel, hpCancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) + err = shutdown.ShutdownHistoricProcessing(ctx, cancel, notifierCh, maxWaitSecondsShutdown, Db, Bc) if err != nil { loghelper.LogError(err).Error("Ungracefully Shutdown ipld-eth-beacon-indexer!") } else { diff --git a/internal/shutdown/shutdown.go b/internal/shutdown/shutdown.go index 7e03fb0..fc978af 100644 --- a/internal/shutdown/shutdown.go +++ b/internal/shutdown/shutdown.go @@ -40,68 +40,66 @@ func ShutdownServices(ctx context.Context, notifierCh chan os.Signal, waitTime t } // Wrapper function for shutting down the head tracking process. -func ShutdownHeadTracking(ctx context.Context, hdCancel context.CancelFunc, kgCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { +func ShutdownHeadTracking(ctx context.Context, cancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{ // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown. "beaconClient": func(ctx context.Context) error { defer DB.Close() - err := BC.StopHeadTracking(hdCancel) - if err != nil { - loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking") - } + cancel() + BC.StopHeadTracking(ctx, false) if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) { - err = BC.StopKnownGapsProcessing(kgCancel) + err := BC.StopKnownGapsProcessing(ctx) if err != nil { loghelper.LogError(err).Error("Unable to stop processing known gaps") + return err } } - return err + return nil }, }) } // Wrapper function for shutting down the head tracking process. -func ShutdownHistoricProcessing(ctx context.Context, kgCancel, hpCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { +func ShutdownHistoricProcessing(ctx context.Context, cancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{ // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown. "beaconClient": func(ctx context.Context) error { defer DB.Close() - err := BC.StopHistoric(hpCancel) + cancel() + err := BC.StopHistoric(ctx) if err != nil { loghelper.LogError(err).Error("Unable to stop processing historic") } if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) { - err = BC.StopKnownGapsProcessing(kgCancel) + err = BC.StopKnownGapsProcessing(ctx) if err != nil { loghelper.LogError(err).Error("Unable to stop processing known gaps") + return err } } - return err + return nil }, }) } // Shutdown the head and historical processing -func ShutdownFull(ctx context.Context, hdCancel context.CancelFunc, kgCancel, hpCancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { +func ShutdownFull(ctx context.Context, cancel context.CancelFunc, notifierCh chan os.Signal, waitTime time.Duration, DB sql.Database, BC *beaconclient.BeaconClient) error { return ShutdownServices(ctx, notifierCh, waitTime, DB, BC, map[string]gracefulshutdown.Operation{ // Combining DB shutdown with BC because BC needs DB open to cleanly shutdown. "beaconClient": func(ctx context.Context) error { defer DB.Close() - err := BC.StopHistoric(hpCancel) + cancel() + err := BC.StopHistoric(ctx) if err != nil { loghelper.LogError(err).Error("Unable to stop processing historic") } if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) { - err = BC.StopKnownGapsProcessing(kgCancel) + err = BC.StopKnownGapsProcessing(ctx) if err != nil { loghelper.LogError(err).Error("Unable to stop processing known gaps") } } - err = BC.StopHeadTracking(hdCancel) - if err != nil { - loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking") - } - + BC.StopHeadTracking(ctx, false) return err }, }) diff --git a/internal/shutdown/shutdown_test.go b/internal/shutdown/shutdown_test.go index 99dc6dd..eb56d10 100644 --- a/internal/shutdown/shutdown_test.go +++ b/internal/shutdown/shutdown_test.go @@ -56,12 +56,13 @@ var ( BC *beaconclient.BeaconClient err error ctx context.Context + cancel context.CancelFunc notifierCh chan os.Signal ) var _ = Describe("Shutdown", func() { BeforeEach(func() { - ctx = context.Background() + ctx, cancel = context.WithCancel(context.Background()) BC, DB, err = boot.BootApplicationWithRetry(ctx, dbAddress, dbPort, dbName, dbUsername, dbPassword, dbDriver, bcAddress, bcPort, bcConnectionProtocol, bcType, bcBootRetryInterval, bcBootMaxRetry, bcKgTableIncrement, "head", true, bcUniqueIdentifier, bcCheckDb) notifierCh = make(chan os.Signal, 1) @@ -72,10 +73,8 @@ var _ = Describe("Shutdown", func() { Context("When Channels are empty,", func() { It("Should Shutdown Successfully.", func() { go func() { - _, kgCancel := context.WithCancel(context.Background()) - _, hdCancel := context.WithCancel(context.Background()) log.Debug("Starting shutdown chan") - err = shutdown.ShutdownHeadTracking(ctx, hdCancel, kgCancel, notifierCh, maxWaitSecondsShutdown, DB, BC) + err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC) log.Debug("We have completed the shutdown...") Expect(err).ToNot(HaveOccurred()) }() @@ -86,10 +85,8 @@ var _ = Describe("Shutdown", func() { shutdownCh := make(chan bool) //log.SetLevel(log.DebugLevel) go func() { - _, kgCancel := context.WithCancel(context.Background()) - _, hdCancel := context.WithCancel(context.Background()) log.Debug("Starting shutdown chan") - err = shutdown.ShutdownHeadTracking(ctx, hdCancel, kgCancel, notifierCh, maxWaitSecondsShutdown, DB, BC) + err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC) log.Debug("We have completed the shutdown...") Expect(err).ToNot(HaveOccurred()) shutdownCh <- true @@ -122,9 +119,7 @@ var _ = Describe("Shutdown", func() { //log.SetLevel(log.DebugLevel) go func() { log.Debug("Starting shutdown chan") - _, kgCancel := context.WithCancel(context.Background()) - _, hdCancel := context.WithCancel(context.Background()) - err = shutdown.ShutdownHeadTracking(ctx, hdCancel, kgCancel, notifierCh, maxWaitSecondsShutdown, DB, BC) + err = shutdown.ShutdownHeadTracking(ctx, cancel, notifierCh, maxWaitSecondsShutdown, DB, BC) log.Debug("We have completed the shutdown...") Expect(err).To(MatchError(gracefulshutdown.TimeoutErr(maxWaitSecondsShutdown.String()))) shutdownCh <- true diff --git a/pkg/beaconclient/capturehead.go b/pkg/beaconclient/capturehead.go index a18a70d..9392d45 100644 --- a/pkg/beaconclient/capturehead.go +++ b/pkg/beaconclient/capturehead.go @@ -24,17 +24,23 @@ import ( ) // This function will perform all the heavy lifting for tracking the head of the chain. -func (bc *BeaconClient) CaptureHead(ctx context.Context, skipSee bool) { +func (bc *BeaconClient) CaptureHead(ctx context.Context, maxHeadWorkers int, skipSee bool) { log.Info("We are tracking the head of the chain.") - go bc.handleHead(ctx) + go bc.handleHead(ctx, maxHeadWorkers) go bc.handleReorg(ctx) bc.captureEventTopic(ctx, skipSee) } // Stop the head tracking service. -func (bc *BeaconClient) StopHeadTracking(cancel context.CancelFunc) error { - log.Info("We are going to stop tracking the head of chain because of the shutdown signal.") - cancel() - log.Info("Successfully stopped the head tracking service.") - return nil +func (bc *BeaconClient) StopHeadTracking(ctx context.Context, skipSee bool) { + select { + case <-ctx.Done(): + if !skipSee { + bc.HeadTracking.SseClient.Unsubscribe(bc.HeadTracking.MessagesCh) + bc.ReOrgTracking.SseClient.Unsubscribe(bc.ReOrgTracking.MessagesCh) + } + log.Info("Successfully stopped the head tracking service.") + default: + log.Error("The context has not completed....") + } } diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index 107041f..94f089a 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -267,8 +267,6 @@ var _ = Describe("Capturehead", Label("head"), func() { Describe("Receiving New Head SSE messages", Label("unit", "behavioral"), func() { Context("Correctly formatted Phase0 Block", Label("leak-head"), func() { It("Should turn it into a struct successfully.", func() { - log.SetLevel(log.DebugLevel) - BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() startGoRoutines := runtime.NumGoroutine() @@ -280,7 +278,7 @@ var _ = Describe("Capturehead", Label("head"), func() { validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey) validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) @@ -295,7 +293,7 @@ var _ = Describe("Capturehead", Label("head"), func() { BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey) validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("Correctly formatted Altair Test Blocks", func() { @@ -313,7 +311,7 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703-dummy-2"].HeadMessage, 74240, maxRetry, 1, 0, 0) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) @@ -332,7 +330,7 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage, 3, maxRetry, 1, 0, 0) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) @@ -347,7 +345,7 @@ var _ = Describe("Capturehead", Label("head"), func() { BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 0, 0) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("Two consecutive blocks with a bad parent", func() { @@ -361,7 +359,7 @@ var _ = Describe("Capturehead", Label("head"), func() { BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, 3, maxRetry, 1, 0, 0) BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 1, 1) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("Phase 0: We have a correctly formated SSZ SignedBeaconBlock and BeaconState", func() { @@ -396,7 +394,7 @@ var _ = Describe("Capturehead", Label("head"), func() { Expect(start).To(Equal(102)) Expect(end).To(Equal(102)) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) }) @@ -414,7 +412,7 @@ var _ = Describe("Capturehead", Label("head"), func() { start, end := queryKnownGaps(bc.Db, "11", "99") Expect(start).To(Equal(11)) Expect(end).To(Equal(99)) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("There is a gap at start up spanning multiple incrementing range.", func() { @@ -435,7 +433,7 @@ var _ = Describe("Capturehead", Label("head"), func() { Expect(start).To(Equal(96)) Expect(end).To(Equal(99)) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("Gaps between two head messages", func() { @@ -455,7 +453,7 @@ var _ = Describe("Capturehead", Label("head"), func() { start, end = queryKnownGaps(bc.Db, "2000101", "2375702") Expect(start).To(Equal(2000101)) Expect(end).To(Equal(2375702)) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) }) @@ -470,7 +468,7 @@ var _ = Describe("Capturehead", Label("head"), func() { bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") BeaconNodeTester.testMultipleHead(ctx, bc, TestEvents["2375703"].HeadMessage, TestEvents["2375703-dummy"].HeadMessage, 74240, maxRetry) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("Phase0: Multiple head messages for the same slot.", func() { @@ -482,7 +480,7 @@ var _ = Describe("Capturehead", Label("head"), func() { bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.testMultipleHead(ctx, bc, TestEvents["100-dummy"].HeadMessage, TestEvents["100"].HeadMessage, 3, maxRetry) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("Phase 0: Multiple reorgs have occurred on this slot", func() { @@ -494,7 +492,7 @@ var _ = Describe("Capturehead", Label("head"), func() { bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.testMultipleReorgs(ctx, bc, TestEvents["100-dummy"].HeadMessage, TestEvents["100-dummy-2"].HeadMessage, TestEvents["100"].HeadMessage, 3, maxRetry) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("Altair: Multiple reorgs have occurred on this slot", func() { @@ -506,7 +504,7 @@ var _ = Describe("Capturehead", Label("head"), func() { bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") BeaconNodeTester.testMultipleReorgs(ctx, bc, TestEvents["2375703-dummy"].HeadMessage, TestEvents["2375703-dummy-2"].HeadMessage, TestEvents["2375703"].HeadMessage, 74240, maxRetry) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) }) @@ -899,7 +897,7 @@ func (tbc TestBeaconNode) provideSsz(slotIdentifier string, sszIdentifier string // Helper function to test three reorg messages. There are going to be many functions like this, // Because we need to test the same logic for multiple phases. func (tbc TestBeaconNode) testMultipleReorgs(ctx context.Context, bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, thirdHead beaconclient.Head, epoch int, maxRetry int) { - go bc.CaptureHead(ctx, true) + go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) log.Info("Sending Messages to BeaconClient") @@ -961,7 +959,7 @@ func (tbc TestBeaconNode) testMultipleReorgs(ctx context.Context, bc *beaconclie // A test to validate a single block was processed correctly func (tbc TestBeaconNode) testProcessBlock(ctx context.Context, bc *beaconclient.BeaconClient, head beaconclient.Head, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) { - go bc.CaptureHead(ctx, true) + go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) sendHeadMessage(bc, head, maxRetry, expectedSuccessInsert) @@ -991,7 +989,7 @@ func (tbc TestBeaconNode) testProcessBlock(ctx context.Context, bc *beaconclient // A test that ensures that if two HeadMessages occur for a single slot they are marked // as proposed and forked correctly. func (tbc TestBeaconNode) testMultipleHead(ctx context.Context, bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, epoch int, maxRetry int) { - go bc.CaptureHead(ctx, true) + go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) sendHeadMessage(bc, firstHead, maxRetry, 1) @@ -1019,7 +1017,7 @@ func (tbc TestBeaconNode) testMultipleHead(ctx context.Context, bc *beaconclient // as proposed and forked correctly. func (tbc TestBeaconNode) testKnownGapsMessages(ctx context.Context, bc *beaconclient.BeaconClient, tableIncrement int, expectedEntries uint64, maxRetry int, msg ...beaconclient.Head) { bc.KnownGapTableIncrement = tableIncrement - go bc.CaptureHead(ctx, true) + go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) for _, headMsg := range msg { @@ -1060,10 +1058,10 @@ func testSszRoot(msg Message) { } // A make shift function to stop head tracking and insure we dont have any goroutine leaks -func testStopHeadTracking(cancel context.CancelFunc, bc *beaconclient.BeaconClient, startGoRoutines int) { +func testStopHeadTracking(ctx context.Context, cancel context.CancelFunc, bc *beaconclient.BeaconClient, startGoRoutines int) { bc.Db.Close() - err := bc.StopHeadTracking(cancel) - Expect(err).ToNot(HaveOccurred()) + cancel() + bc.StopHeadTracking(ctx, true) time.Sleep(3 * time.Second) endNum := runtime.NumGoroutine() diff --git a/pkg/beaconclient/capturehistoric.go b/pkg/beaconclient/capturehistoric.go index 50855de..a7a56a2 100644 --- a/pkg/beaconclient/capturehistoric.go +++ b/pkg/beaconclient/capturehistoric.go @@ -37,14 +37,18 @@ func (bc *BeaconClient) CaptureHistoric(ctx context.Context, maxWorkers int) []e } // This function will perform all the necessary clean up tasks for stopping historical processing. -func (bc *BeaconClient) StopHistoric(cancel context.CancelFunc) error { - log.Info("We are stopping the historical processing service.") - cancel() - err := bc.HistoricalProcess.releaseDbLocks() - if err != nil { - loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the eth_beacon.historic_processing table. Manual Intervention is needed!") +func (bc *BeaconClient) StopHistoric(ctx context.Context) error { + select { + case <-ctx.Done(): + log.Info("We are stopping the historical processing service.") + err := bc.HistoricalProcess.releaseDbLocks() + if err != nil { + loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the eth_beacon.historic_processing table. Manual Intervention is needed!") + } + return nil + default: + return fmt.Errorf("Tried to stop historic before the context ended...") } - return nil } // An interface to enforce any batch processing. Currently there are two use cases for this. diff --git a/pkg/beaconclient/capturehistoric_test.go b/pkg/beaconclient/capturehistoric_test.go index 5f36039..565f97f 100644 --- a/pkg/beaconclient/capturehistoric_test.go +++ b/pkg/beaconclient/capturehistoric_test.go @@ -143,7 +143,7 @@ var _ = Describe("Capturehistoric", func() { time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) @@ -168,7 +168,7 @@ var _ = Describe("Capturehistoric", func() { time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("When it recieves a known Gaps, historic and head message (in order)", func() { @@ -192,7 +192,7 @@ var _ = Describe("Capturehistoric", func() { time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) }) @@ -228,25 +228,20 @@ func (tbc TestBeaconNode) writeEventToHistoricProcess(bc *beaconclient.BeaconCli // Start the CaptureHistoric function, and check for the correct inserted slots. func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) { + startGoRoutines := runtime.NumGoroutine() ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHistoric(ctx, maxWorkers) validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError) - log.Debug("Calling the stop function for historical processing..") - err := bc.StopHistoric(cancel) - time.Sleep(5 * time.Second) - Expect(err).ToNot(HaveOccurred()) - validateAllRowsCheckedOut(bc.Db, hpCheckCheckedOutStmt) + testStopHistoricTracking(ctx, cancel, bc, startGoRoutines) } // Wrapper function that processes knownGaps func (tbc TestBeaconNode) runKnownGapsProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) { + startGoRoutines := runtime.NumGoroutine() ctx, cancel := context.WithCancel(context.Background()) go bc.ProcessKnownGaps(ctx, maxWorkers) validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError) - err := bc.StopKnownGapsProcessing(cancel) - time.Sleep(5 * time.Second) - Expect(err).ToNot(HaveOccurred()) - validateAllRowsCheckedOut(bc.Db, kgCheckCheckedOutStmt) + testStopHistoricTracking(ctx, cancel, bc, startGoRoutines) } func validateMetrics(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) { @@ -316,3 +311,21 @@ func validateAllRowsCheckedOut(db sql.Database, checkStmt string) { Expect(err).ToNot(HaveOccurred()) Expect(rows).To(Equal(int64(0))) } + +// A make shift function to stop head tracking and insure we dont have any goroutine leaks +func testStopHistoricTracking(ctx context.Context, cancel context.CancelFunc, bc *beaconclient.BeaconClient, startGoRoutines int) { + log.Debug("Calling the stop function for historical processing..") + cancel() + err := bc.StopKnownGapsProcessing(ctx) + Expect(err).ToNot(HaveOccurred()) + time.Sleep(5 * time.Second) + validateAllRowsCheckedOut(bc.Db, kgCheckCheckedOutStmt) + err = bc.Db.Close() + Expect(err).ToNot(HaveOccurred()) + + time.Sleep(3 * time.Second) + endNum := runtime.NumGoroutine() + + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + Expect(startGoRoutines).To(Equal(endNum)) +} diff --git a/pkg/beaconclient/processevents.go b/pkg/beaconclient/processevents.go index 15d183f..9aa604e 100644 --- a/pkg/beaconclient/processevents.go +++ b/pkg/beaconclient/processevents.go @@ -24,6 +24,7 @@ import ( "strconv" log "github.com/sirupsen/logrus" + "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/database/sql" ) // This function will perform the necessary steps to handle a reorg. @@ -42,8 +43,14 @@ func (bc *BeaconClient) handleReorg(ctx context.Context) { } // This function will handle the latest head event. -func (bc *BeaconClient) handleHead(ctx context.Context) { +func (bc *BeaconClient) handleHead(ctx context.Context, maxWorkers int) { log.Info("Starting to process head.") + + workCh := make(chan workParams) + log.WithField("workerNumber", maxWorkers).Info("Creating Workers") + for i := 1; i < maxWorkers; i++ { + go bc.headBlockProcessor(ctx, workCh) + } errorSlots := 0 for { select { @@ -77,9 +84,8 @@ func (bc *BeaconClient) handleHead(ctx context.Context) { bc.StartingSlot = slot } - go processHeadSlot(ctx, bc.Db, bc.ServerEndpoint, slot, head.Block, head.State, bc.PreviousSlot, bc.PreviousBlockRoot, bc.Metrics, bc.KnownGapTableIncrement, bc.CheckDb) - - log.WithFields(log.Fields{"head": head.Slot}).Debug("We finished calling processHeadSlot.") + workCh <- workParams{db: bc.Db, serverEndpoint: bc.ServerEndpoint, slot: slot, blockRoot: head.Block, stateRoot: head.State, previousSlot: bc.PreviousSlot, previousBlockRoot: bc.PreviousBlockRoot, metrics: bc.Metrics, knownGapsTableIncrement: bc.KnownGapTableIncrement, checkDb: bc.CheckDb} + log.WithFields(log.Fields{"head": head.Slot}).Debug("We finished sending this slot to the workCh") // Update the previous block bc.PreviousSlot = slot @@ -87,3 +93,29 @@ func (bc *BeaconClient) handleHead(ctx context.Context) { } } } + +// A worker that will process head slots. +func (bc *BeaconClient) headBlockProcessor(ctx context.Context, workCh <-chan workParams) { + for { + select { + case <-ctx.Done(): + return + case wp := <-workCh: + processHeadSlot(ctx, wp.db, wp.serverEndpoint, wp.slot, wp.blockRoot, wp.stateRoot, wp.previousSlot, wp.previousBlockRoot, wp.metrics, wp.knownGapsTableIncrement, wp.checkDb) + } + } +} + +// A struct used to pass parameters to the worker. +type workParams struct { + db sql.Database + serverEndpoint string + slot int + blockRoot string + stateRoot string + previousSlot int + previousBlockRoot string + metrics *BeaconClientMetrics + knownGapsTableIncrement int + checkDb bool +} diff --git a/pkg/beaconclient/processknowngaps.go b/pkg/beaconclient/processknowngaps.go index 343fc4a..f389e49 100644 --- a/pkg/beaconclient/processknowngaps.go +++ b/pkg/beaconclient/processknowngaps.go @@ -20,6 +20,7 @@ package beaconclient import ( "context" + "fmt" "strconv" log "github.com/sirupsen/logrus" @@ -67,14 +68,18 @@ func (bc *BeaconClient) ProcessKnownGaps(ctx context.Context, maxWorkers int) [] } // This function will perform all the necessary clean up tasks for stopping historical processing. -func (bc *BeaconClient) StopKnownGapsProcessing(cancel context.CancelFunc) error { - log.Info("We are stopping the known gaps processing service.") - cancel() - err := bc.KnownGapsProcess.releaseDbLocks() - if err != nil { - loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the eth_beacon.known_gaps table. Manual Intervention is needed!") +func (bc *BeaconClient) StopKnownGapsProcessing(ctx context.Context) error { + select { + case <-ctx.Done(): + log.Info("We are stopping the known gaps processing service.") + err := bc.KnownGapsProcess.releaseDbLocks() + if err != nil { + loghelper.LogError(err).WithField("uniqueIdentifier", bc.UniqueNodeIdentifier).Error("We were unable to remove the locks from the eth_beacon.known_gaps table. Manual Intervention is needed!") + } + return nil + default: + return fmt.Errorf("Tried to stop knownGaps Processing without closing the context..") } - return nil } // Get a single row of historical slots from the table. diff --git a/pkg/beaconclient/queryserver.go b/pkg/beaconclient/queryserver.go index 4962d2b..b21cacf 100644 --- a/pkg/beaconclient/queryserver.go +++ b/pkg/beaconclient/queryserver.go @@ -44,7 +44,12 @@ func querySsz(endpoint string, slot string) (*[]byte, int, error) { } defer response.Body.Close() rc := response.StatusCode - body, err := ioutil.ReadAll(response.Body) + + var body []byte + //io.Copy(body, response.Body) + //bytes.buffer... + _, err = response.Body.Read(body) + //body, err := ioutil.ReadAll(response.Body) if err != nil { loghelper.LogSlotError(slot, err).Error("Unable to turn response into a []bytes array!") return nil, rc, fmt.Errorf("Unable to turn response into a []bytes array!: %s", err.Error()) diff --git a/pkg/beaconclient/systemvalidation_test.go b/pkg/beaconclient/systemvalidation_test.go index fe67772..5b14d23 100644 --- a/pkg/beaconclient/systemvalidation_test.go +++ b/pkg/beaconclient/systemvalidation_test.go @@ -67,9 +67,9 @@ func getEnvInt(envVar string) int { func processProdHeadBlocks(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) { startGoRoutines := runtime.NumGoroutine() ctx, cancel := context.WithCancel(context.Background()) - go bc.CaptureHead(ctx, false) + go bc.CaptureHead(ctx, 2, false) time.Sleep(1 * time.Second) validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError) - testStopHeadTracking(cancel, bc, startGoRoutines) + testStopHeadTracking(ctx, cancel, bc, startGoRoutines) } diff --git a/pkg/gracefulshutdown/gracefulshutdown.go b/pkg/gracefulshutdown/gracefulshutdown.go index 29fe19e..b5a2f9e 100644 --- a/pkg/gracefulshutdown/gracefulshutdown.go +++ b/pkg/gracefulshutdown/gracefulshutdown.go @@ -45,7 +45,11 @@ func Shutdown(ctx context.Context, notifierCh chan os.Signal, timeout time.Durat // add any other syscalls that you want to be notified with signal.Notify(notifierCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) - <-notifierCh + // Wait for one or the other... + select { + case <-notifierCh: + case <-ctx.Done(): + } log.Info("Shutting Down your application") -- 2.45.2 From 43e0c71639effd023a4f0f548d0614cb3131ce4a Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Wed, 22 Jun 2022 12:36:49 -0400 Subject: [PATCH 07/18] Improve test stop --- internal/shutdown/shutdown.go | 4 +- pkg/beaconclient/capturehead_test.go | 110 ++++++++-------------- pkg/beaconclient/capturehistoric.go | 2 +- pkg/beaconclient/capturehistoric_test.go | 48 +++++----- pkg/beaconclient/queryserver.go | 6 +- pkg/beaconclient/systemvalidation_test.go | 3 +- 6 files changed, 73 insertions(+), 100 deletions(-) diff --git a/internal/shutdown/shutdown.go b/internal/shutdown/shutdown.go index fc978af..47f62d1 100644 --- a/internal/shutdown/shutdown.go +++ b/internal/shutdown/shutdown.go @@ -66,7 +66,7 @@ func ShutdownHistoricProcessing(ctx context.Context, cancel context.CancelFunc, "beaconClient": func(ctx context.Context) error { defer DB.Close() cancel() - err := BC.StopHistoric(ctx) + err := BC.StopHistoricProcess(ctx) if err != nil { loghelper.LogError(err).Error("Unable to stop processing historic") } @@ -89,7 +89,7 @@ func ShutdownFull(ctx context.Context, cancel context.CancelFunc, notifierCh cha "beaconClient": func(ctx context.Context) error { defer DB.Close() cancel() - err := BC.StopHistoric(ctx) + err := BC.StopHistoricProcess(ctx) if err != nil { loghelper.LogError(err).Error("Unable to stop processing historic") } diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index 94f089a..cc75ebd 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -269,49 +269,37 @@ var _ = Describe("Capturehead", Label("head"), func() { It("Should turn it into a struct successfully.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) - bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey) validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) - }) }) Context("Correctly formatted Altair Block", func() { It("Should turn it into a struct successfully.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey) validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("Correctly formatted Altair Test Blocks", func() { It("Should turn it into a struct successfully.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703-dummy"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703-dummy"].HeadMessage, 74240, maxRetry, 1, 0, 0) bc = setUpTest(BeaconNodeTester.TestConfig, "2375702") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703-dummy-2"].HeadMessage, 74240, maxRetry, 1, 0, 0) - - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703-dummy-2"].HeadMessage, 74240, maxRetry, 1, 0, 0) }) }) @@ -319,18 +307,15 @@ var _ = Describe("Capturehead", Label("head"), func() { It("Should turn it into a struct successfully.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, 3, maxRetry, 1, 0, 0) bc = setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage, 3, maxRetry, 1, 0, 0) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) @@ -338,28 +323,22 @@ var _ = Describe("Capturehead", Label("head"), func() { It("Should handle both blocks correctly, without any reorgs or known_gaps", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 0, 0) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("Two consecutive blocks with a bad parent", func() { It("Should add the previous block to the knownGaps table.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, 3, maxRetry, 1, 0, 0) - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 1, 1) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 1, 1) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("Phase 0: We have a correctly formated SSZ SignedBeaconBlock and BeaconState", func() { @@ -381,11 +360,9 @@ var _ = Describe("Capturehead", Label("head"), func() { It("Should return an error, and add the slot to the knownGaps table.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "101") - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["102-wrong-ssz-1"].HeadMessage, 3, maxRetry, 0, 1, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["102-wrong-ssz-1"].HeadMessage, 3, maxRetry, 0, 1, 0) knownGapCount := countKnownGapsTable(bc.Db) Expect(knownGapCount).To(Equal(1)) @@ -394,7 +371,6 @@ var _ = Describe("Capturehead", Label("head"), func() { Expect(start).To(Equal(102)) Expect(end).To(Equal(102)) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) }) @@ -404,26 +380,21 @@ var _ = Describe("Capturehead", Label("head"), func() { It("Should add only a single entry to the knownGaps table.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "10") - BeaconNodeTester.testKnownGapsMessages(ctx, bc, 100, 1, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage) + BeaconNodeTester.testKnownGapsMessages(bc, 100, 1, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage) start, end := queryKnownGaps(bc.Db, "11", "99") Expect(start).To(Equal(11)) Expect(end).To(Equal(99)) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("There is a gap at start up spanning multiple incrementing range.", func() { It("Should add multiple entries to the knownGaps table.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "5") - BeaconNodeTester.testKnownGapsMessages(ctx, bc, 10, 10, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage) + BeaconNodeTester.testKnownGapsMessages(bc, 10, 10, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage) start, end := queryKnownGaps(bc.Db, "6", "16") Expect(start).To(Equal(6)) @@ -432,19 +403,15 @@ var _ = Describe("Capturehead", Label("head"), func() { start, end = queryKnownGaps(bc.Db, "96", "99") Expect(start).To(Equal(96)) Expect(end).To(Equal(99)) - - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("Gaps between two head messages", func() { It("Should add the slots in-between", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testKnownGapsMessages(ctx, bc, 1000000, 3, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].HeadMessage) + BeaconNodeTester.testKnownGapsMessages(bc, 1000000, 3, maxRetry, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].HeadMessage) start, end := queryKnownGaps(bc.Db, "101", "1000101") Expect(start).To(Equal(101)) @@ -453,7 +420,6 @@ var _ = Describe("Capturehead", Label("head"), func() { start, end = queryKnownGaps(bc.Db, "2000101", "2375702") Expect(start).To(Equal(2000101)) Expect(end).To(Equal(2375702)) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) }) @@ -463,48 +429,36 @@ var _ = Describe("Capturehead", Label("head"), func() { It("The previous block should be marked as 'forked', the new block should be the only one marked as 'proposed'.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") - BeaconNodeTester.testMultipleHead(ctx, bc, TestEvents["2375703"].HeadMessage, TestEvents["2375703-dummy"].HeadMessage, 74240, maxRetry) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) + BeaconNodeTester.testMultipleHead(bc, TestEvents["2375703"].HeadMessage, TestEvents["2375703-dummy"].HeadMessage, 74240, maxRetry) }) }) Context("Phase0: Multiple head messages for the same slot.", func() { It("The previous block should be marked as 'forked', the new block should be the only one marked as 'proposed'.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testMultipleHead(ctx, bc, TestEvents["100-dummy"].HeadMessage, TestEvents["100"].HeadMessage, 3, maxRetry) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) + BeaconNodeTester.testMultipleHead(bc, TestEvents["100-dummy"].HeadMessage, TestEvents["100"].HeadMessage, 3, maxRetry) }) }) Context("Phase 0: Multiple reorgs have occurred on this slot", func() { It("The previous blocks should be marked as 'forked', the new block should be the only one marked as 'proposed'.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testMultipleReorgs(ctx, bc, TestEvents["100-dummy"].HeadMessage, TestEvents["100-dummy-2"].HeadMessage, TestEvents["100"].HeadMessage, 3, maxRetry) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) + BeaconNodeTester.testMultipleReorgs(bc, TestEvents["100-dummy"].HeadMessage, TestEvents["100-dummy-2"].HeadMessage, TestEvents["100"].HeadMessage, 3, maxRetry) }) }) Context("Altair: Multiple reorgs have occurred on this slot", func() { It("The previous blocks should be marked as 'forked', the new block should be the only one marked as 'proposed'.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") - BeaconNodeTester.testMultipleReorgs(ctx, bc, TestEvents["2375703-dummy"].HeadMessage, TestEvents["2375703-dummy-2"].HeadMessage, TestEvents["2375703"].HeadMessage, 74240, maxRetry) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) + BeaconNodeTester.testMultipleReorgs(bc, TestEvents["2375703-dummy"].HeadMessage, TestEvents["2375703-dummy-2"].HeadMessage, TestEvents["2375703"].HeadMessage, 74240, maxRetry) }) }) }) @@ -896,7 +850,9 @@ func (tbc TestBeaconNode) provideSsz(slotIdentifier string, sszIdentifier string // Helper function to test three reorg messages. There are going to be many functions like this, // Because we need to test the same logic for multiple phases. -func (tbc TestBeaconNode) testMultipleReorgs(ctx context.Context, bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, thirdHead beaconclient.Head, epoch int, maxRetry int) { +func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, thirdHead beaconclient.Head, epoch int, maxRetry int) { + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) @@ -955,10 +911,14 @@ func (tbc TestBeaconNode) testMultipleReorgs(ctx context.Context, bc *beaconclie validateSlot(bc, secondHead, epoch, "proposed") validateSlot(bc, thirdHead, epoch, "forked") + cancel() + testStopHeadTracking(ctx, bc, startGoRoutines) } // A test to validate a single block was processed correctly -func (tbc TestBeaconNode) testProcessBlock(ctx context.Context, bc *beaconclient.BeaconClient, head beaconclient.Head, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) { +func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head beaconclient.Head, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) { + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) sendHeadMessage(bc, head, maxRetry, expectedSuccessInsert) @@ -984,11 +944,15 @@ func (tbc TestBeaconNode) testProcessBlock(ctx context.Context, bc *beaconclient if expectedSuccessInsert > 0 { validateSlot(bc, head, epoch, "proposed") } + cancel() + testStopHeadTracking(ctx, bc, startGoRoutines) } // A test that ensures that if two HeadMessages occur for a single slot they are marked // as proposed and forked correctly. -func (tbc TestBeaconNode) testMultipleHead(ctx context.Context, bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, epoch int, maxRetry int) { +func (tbc TestBeaconNode) testMultipleHead(bc *beaconclient.BeaconClient, firstHead beaconclient.Head, secondHead beaconclient.Head, epoch int, maxRetry int) { + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) @@ -1011,12 +975,17 @@ func (tbc TestBeaconNode) testMultipleHead(ctx context.Context, bc *beaconclient log.Info("Checking Altair to make sure the fork was marked properly.") validateSlot(bc, firstHead, epoch, "forked") validateSlot(bc, secondHead, epoch, "proposed") + cancel() + testStopHeadTracking(ctx, bc, startGoRoutines) } // A test that ensures that if two HeadMessages occur for a single slot they are marked // as proposed and forked correctly. -func (tbc TestBeaconNode) testKnownGapsMessages(ctx context.Context, bc *beaconclient.BeaconClient, tableIncrement int, expectedEntries uint64, maxRetry int, msg ...beaconclient.Head) { +func (tbc TestBeaconNode) testKnownGapsMessages(bc *beaconclient.BeaconClient, tableIncrement int, expectedEntries uint64, maxRetry int, msg ...beaconclient.Head) { bc.KnownGapTableIncrement = tableIncrement + + startGoRoutines := runtime.NumGoroutine() + ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) @@ -1040,6 +1009,8 @@ func (tbc TestBeaconNode) testKnownGapsMessages(ctx context.Context, bc *beaconc if atomic.LoadUint64(&bc.Metrics.ReorgInserts) != 0 { Fail("We found reorgs when we didn't expect it") } + cancel() + testStopHeadTracking(ctx, bc, startGoRoutines) } // This function will make sure we are properly able to get the SszRoot of the SignedBeaconBlock and the BeaconState. @@ -1058,9 +1029,8 @@ func testSszRoot(msg Message) { } // A make shift function to stop head tracking and insure we dont have any goroutine leaks -func testStopHeadTracking(ctx context.Context, cancel context.CancelFunc, bc *beaconclient.BeaconClient, startGoRoutines int) { +func testStopHeadTracking(ctx context.Context, bc *beaconclient.BeaconClient, startGoRoutines int) { bc.Db.Close() - cancel() bc.StopHeadTracking(ctx, true) time.Sleep(3 * time.Second) diff --git a/pkg/beaconclient/capturehistoric.go b/pkg/beaconclient/capturehistoric.go index a7a56a2..defcb9e 100644 --- a/pkg/beaconclient/capturehistoric.go +++ b/pkg/beaconclient/capturehistoric.go @@ -37,7 +37,7 @@ func (bc *BeaconClient) CaptureHistoric(ctx context.Context, maxWorkers int) []e } // This function will perform all the necessary clean up tasks for stopping historical processing. -func (bc *BeaconClient) StopHistoric(ctx context.Context) error { +func (bc *BeaconClient) StopHistoricProcess(ctx context.Context) error { select { case <-ctx.Done(): log.Info("We are stopping the historical processing service.") diff --git a/pkg/beaconclient/capturehistoric_test.go b/pkg/beaconclient/capturehistoric_test.go index 565f97f..54e1a3c 100644 --- a/pkg/beaconclient/capturehistoric_test.go +++ b/pkg/beaconclient/capturehistoric_test.go @@ -27,7 +27,6 @@ var _ = Describe("Capturehistoric", func() { It("Successfully Process the Blocks", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startNum := runtime.NumGoroutine() bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 101, 10) @@ -36,13 +35,8 @@ var _ = Describe("Capturehistoric", func() { BeaconNodeTester.writeEventToHistoricProcess(bc, 2375703, 2375703, 10) BeaconNodeTester.runHistoricalProcess(bc, 2, 3, 0, 0, 0) - time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) - time.Sleep(3 * time.Second) - bc.Db.Close() - endNum := runtime.NumGoroutine() - Expect(startNum).To(Equal(endNum)) }) }) Context("When the start block is greater than the endBlock", func() { @@ -125,12 +119,10 @@ var _ = Describe("Capturehistoric", func() { It("Should process them all successfully.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") // Head - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) // Historical BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10) @@ -143,16 +135,12 @@ var _ = Describe("Capturehistoric", func() { time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) - }) }) Context("When it recieves a historic, head and known Gaps message (in order)", func() { It("Should process them all successfully.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") // Historical @@ -160,7 +148,7 @@ var _ = Describe("Capturehistoric", func() { BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0) // Head - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) // Known Gaps BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101) @@ -168,15 +156,12 @@ var _ = Describe("Capturehistoric", func() { time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) Context("When it recieves a known Gaps, historic and head message (in order)", func() { It("Should process them all successfully.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startGoRoutines := runtime.NumGoroutine() - ctx, cancel := context.WithCancel(context.Background()) bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") // Known Gaps @@ -188,11 +173,10 @@ var _ = Describe("Capturehistoric", func() { BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0) // Head - BeaconNodeTester.testProcessBlock(ctx, bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) }) }) }) @@ -232,7 +216,8 @@ func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, ma ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHistoric(ctx, maxWorkers) validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError) - testStopHistoricTracking(ctx, cancel, bc, startGoRoutines) + cancel() + testStopHistoricProcessing(ctx, bc, startGoRoutines) } // Wrapper function that processes knownGaps @@ -241,7 +226,8 @@ func (tbc TestBeaconNode) runKnownGapsProcess(bc *beaconclient.BeaconClient, max ctx, cancel := context.WithCancel(context.Background()) go bc.ProcessKnownGaps(ctx, maxWorkers) validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError) - testStopHistoricTracking(ctx, cancel, bc, startGoRoutines) + cancel() + testStopKnownGapProcessing(ctx, bc, startGoRoutines) } func validateMetrics(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) { @@ -313,9 +299,25 @@ func validateAllRowsCheckedOut(db sql.Database, checkStmt string) { } // A make shift function to stop head tracking and insure we dont have any goroutine leaks -func testStopHistoricTracking(ctx context.Context, cancel context.CancelFunc, bc *beaconclient.BeaconClient, startGoRoutines int) { +func testStopHistoricProcessing(ctx context.Context, bc *beaconclient.BeaconClient, startGoRoutines int) { log.Debug("Calling the stop function for historical processing..") - cancel() + err := bc.StopHistoricProcess(ctx) + Expect(err).ToNot(HaveOccurred()) + time.Sleep(5 * time.Second) + validateAllRowsCheckedOut(bc.Db, hpCheckCheckedOutStmt) + err = bc.Db.Close() + Expect(err).ToNot(HaveOccurred()) + + time.Sleep(3 * time.Second) + endNum := runtime.NumGoroutine() + + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + Expect(startGoRoutines).To(Equal(endNum)) +} + +// A make shift function to stop head tracking and insure we dont have any goroutine leaks +func testStopKnownGapProcessing(ctx context.Context, bc *beaconclient.BeaconClient, startGoRoutines int) { + log.Debug("Calling the stop function for knownGaps processing..") err := bc.StopKnownGapsProcessing(ctx) Expect(err).ToNot(HaveOccurred()) time.Sleep(5 * time.Second) diff --git a/pkg/beaconclient/queryserver.go b/pkg/beaconclient/queryserver.go index b21cacf..76bb14e 100644 --- a/pkg/beaconclient/queryserver.go +++ b/pkg/beaconclient/queryserver.go @@ -45,11 +45,11 @@ func querySsz(endpoint string, slot string) (*[]byte, int, error) { defer response.Body.Close() rc := response.StatusCode - var body []byte + //var body []byte //io.Copy(body, response.Body) //bytes.buffer... - _, err = response.Body.Read(body) - //body, err := ioutil.ReadAll(response.Body) + //_, err = response.Body.Read(body) + body, err := ioutil.ReadAll(response.Body) if err != nil { loghelper.LogSlotError(slot, err).Error("Unable to turn response into a []bytes array!") return nil, rc, fmt.Errorf("Unable to turn response into a []bytes array!: %s", err.Error()) diff --git a/pkg/beaconclient/systemvalidation_test.go b/pkg/beaconclient/systemvalidation_test.go index 5b14d23..3c5cb17 100644 --- a/pkg/beaconclient/systemvalidation_test.go +++ b/pkg/beaconclient/systemvalidation_test.go @@ -71,5 +71,6 @@ func processProdHeadBlocks(bc *beaconclient.BeaconClient, expectedInserts, expec time.Sleep(1 * time.Second) validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError) - testStopHeadTracking(ctx, cancel, bc, startGoRoutines) + cancel() + testStopHeadTracking(ctx, bc, startGoRoutines) } -- 2.45.2 From 6cecc69520121a4a019bfb0a8aba13b2a6855c6a Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Wed, 22 Jun 2022 14:09:39 -0400 Subject: [PATCH 08/18] Don't close DB --- pkg/beaconclient/capturehead_test.go | 19 ++++++++++++------- pkg/beaconclient/capturehistoric_test.go | 10 ++++------ 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index cc75ebd..2932da8 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -24,6 +24,7 @@ import ( "os" "path/filepath" "runtime" + "runtime/pprof" "strconv" "sync/atomic" "time" @@ -265,8 +266,8 @@ type MimicConfig struct { var _ = Describe("Capturehead", Label("head"), func() { Describe("Receiving New Head SSE messages", Label("unit", "behavioral"), func() { - Context("Correctly formatted Phase0 Block", Label("leak-head"), func() { - It("Should turn it into a struct successfully.", func() { + Context("Correctly formatted Phase0 Block", func() { + It("Should process it successfully.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") @@ -277,8 +278,9 @@ var _ = Describe("Capturehead", Label("head"), func() { }) }) - Context("Correctly formatted Altair Block", func() { - It("Should turn it into a struct successfully.", func() { + Context("Correctly formatted Altair Block", Label("leak-head"), func() { + It("Should process it successfully.", func() { + log.SetLevel(log.DebugLevel) BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() @@ -917,6 +919,7 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs // A test to validate a single block was processed correctly func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head beaconclient.Head, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) { + pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) startGoRoutines := runtime.NumGoroutine() ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHead(ctx, 2, true) @@ -1030,11 +1033,13 @@ func testSszRoot(msg Message) { // A make shift function to stop head tracking and insure we dont have any goroutine leaks func testStopHeadTracking(ctx context.Context, bc *beaconclient.BeaconClient, startGoRoutines int) { - bc.Db.Close() bc.StopHeadTracking(ctx, true) time.Sleep(3 * time.Second) endNum := runtime.NumGoroutine() - //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - Expect(startGoRoutines).To(Equal(endNum)) + pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + log.WithField("startNum", startGoRoutines).Info("Start Go routine number") + log.WithField("endNum", endNum).Info("End Go routine number") + //Expect(endNum <= startGoRoutines).To(BeTrue()) + Expect(endNum).To(Equal(startGoRoutines)) } diff --git a/pkg/beaconclient/capturehistoric_test.go b/pkg/beaconclient/capturehistoric_test.go index 54e1a3c..faef10b 100644 --- a/pkg/beaconclient/capturehistoric_test.go +++ b/pkg/beaconclient/capturehistoric_test.go @@ -305,14 +305,13 @@ func testStopHistoricProcessing(ctx context.Context, bc *beaconclient.BeaconClie Expect(err).ToNot(HaveOccurred()) time.Sleep(5 * time.Second) validateAllRowsCheckedOut(bc.Db, hpCheckCheckedOutStmt) - err = bc.Db.Close() - Expect(err).ToNot(HaveOccurred()) time.Sleep(3 * time.Second) endNum := runtime.NumGoroutine() //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - Expect(startGoRoutines).To(Equal(endNum)) + //Expect(endNum <= startGoRoutines).To(BeTrue()) + Expect(endNum).To(Equal(startGoRoutines)) } // A make shift function to stop head tracking and insure we dont have any goroutine leaks @@ -322,12 +321,11 @@ func testStopKnownGapProcessing(ctx context.Context, bc *beaconclient.BeaconClie Expect(err).ToNot(HaveOccurred()) time.Sleep(5 * time.Second) validateAllRowsCheckedOut(bc.Db, kgCheckCheckedOutStmt) - err = bc.Db.Close() - Expect(err).ToNot(HaveOccurred()) time.Sleep(3 * time.Second) endNum := runtime.NumGoroutine() //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - Expect(startGoRoutines).To(Equal(endNum)) + //Expect(endNum <= startGoRoutines).To(BeTrue()) + Expect(endNum).To(Equal(startGoRoutines)) } -- 2.45.2 From 2299e541975feba7037cf207b18fc55635656421 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Wed, 22 Jun 2022 14:44:26 -0400 Subject: [PATCH 09/18] Use context to end goroutine --- pkg/beaconclient/capturehead_test.go | 8 ++--- pkg/beaconclient/capturehistoric_test.go | 5 +++- pkg/beaconclient/processhistoric.go | 36 +++++++++++++---------- pkg/beaconclient/systemvalidation_test.go | 2 +- 4 files changed, 29 insertions(+), 22 deletions(-) diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index 2932da8..9c350e2 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -24,7 +24,6 @@ import ( "os" "path/filepath" "runtime" - "runtime/pprof" "strconv" "sync/atomic" "time" @@ -407,7 +406,7 @@ var _ = Describe("Capturehead", Label("head"), func() { Expect(end).To(Equal(99)) }) }) - Context("Gaps between two head messages", func() { + Context("Gaps between two head messages", Label("gap-head"), func() { It("Should add the slots in-between", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() @@ -919,7 +918,7 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs // A test to validate a single block was processed correctly func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head beaconclient.Head, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) { - pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) startGoRoutines := runtime.NumGoroutine() ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHead(ctx, 2, true) @@ -987,6 +986,7 @@ func (tbc TestBeaconNode) testMultipleHead(bc *beaconclient.BeaconClient, firstH func (tbc TestBeaconNode) testKnownGapsMessages(bc *beaconclient.BeaconClient, tableIncrement int, expectedEntries uint64, maxRetry int, msg ...beaconclient.Head) { bc.KnownGapTableIncrement = tableIncrement + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) startGoRoutines := runtime.NumGoroutine() ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHead(ctx, 2, true) @@ -1037,7 +1037,7 @@ func testStopHeadTracking(ctx context.Context, bc *beaconclient.BeaconClient, st time.Sleep(3 * time.Second) endNum := runtime.NumGoroutine() - pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) log.WithField("startNum", startGoRoutines).Info("Start Go routine number") log.WithField("endNum", endNum).Info("End Go routine number") //Expect(endNum <= startGoRoutines).To(BeTrue()) diff --git a/pkg/beaconclient/capturehistoric_test.go b/pkg/beaconclient/capturehistoric_test.go index faef10b..fe5ec83 100644 --- a/pkg/beaconclient/capturehistoric_test.go +++ b/pkg/beaconclient/capturehistoric_test.go @@ -212,6 +212,7 @@ func (tbc TestBeaconNode) writeEventToHistoricProcess(bc *beaconclient.BeaconCli // Start the CaptureHistoric function, and check for the correct inserted slots. func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) { + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) startGoRoutines := runtime.NumGoroutine() ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHistoric(ctx, maxWorkers) @@ -306,11 +307,13 @@ func testStopHistoricProcessing(ctx context.Context, bc *beaconclient.BeaconClie time.Sleep(5 * time.Second) validateAllRowsCheckedOut(bc.Db, hpCheckCheckedOutStmt) - time.Sleep(3 * time.Second) + time.Sleep(5 * time.Second) endNum := runtime.NumGoroutine() //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) //Expect(endNum <= startGoRoutines).To(BeTrue()) + log.WithField("startNum", startGoRoutines).Info("Start Go routine number") + log.WithField("endNum", endNum).Info("End Go routine number") Expect(endNum).To(Equal(startGoRoutines)) } diff --git a/pkg/beaconclient/processhistoric.go b/pkg/beaconclient/processhistoric.go index c520e41..9b2392b 100644 --- a/pkg/beaconclient/processhistoric.go +++ b/pkg/beaconclient/processhistoric.go @@ -241,23 +241,27 @@ func removeRowPostProcess(ctx context.Context, db sql.Database, processCh <-chan "endSlot": slots.endSlot, }).Debug("Starting to check to see if the following slots have been processed") for { - isStartProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.startSlot)) - if err != nil { - errCh <- err + select { + case <-ctx.Done(): + return + default: + isStartProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.startSlot)) + if err != nil { + errCh <- err + } + isEndProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.endSlot)) + if err != nil { + errCh <- err + } + if isStartProcess && isEndProcess { + _, err := db.Exec(context.Background(), removeStmt, strconv.Itoa(slots.startSlot), strconv.Itoa(slots.endSlot)) + if err != nil { + errCh <- err + } + return + } + time.Sleep(3 * time.Second) } - isEndProcess, err := isSlotProcessed(db, checkProcessedStmt, strconv.Itoa(slots.endSlot)) - if err != nil { - errCh <- err - } - if isStartProcess && isEndProcess { - break - } - time.Sleep(3 * time.Second) - } - - _, err := db.Exec(context.Background(), removeStmt, strconv.Itoa(slots.startSlot), strconv.Itoa(slots.endSlot)) - if err != nil { - errCh <- err } }() diff --git a/pkg/beaconclient/systemvalidation_test.go b/pkg/beaconclient/systemvalidation_test.go index 3c5cb17..b204404 100644 --- a/pkg/beaconclient/systemvalidation_test.go +++ b/pkg/beaconclient/systemvalidation_test.go @@ -30,7 +30,7 @@ var ( ) var _ = Describe("Systemvalidation", Label("system"), func() { Describe("Run the application against a running lighthouse node", func() { - Context("When we receive head messages", func() { + Context("When we receive head messages", Label("system-head"), func() { It("We should process the messages successfully", func() { bc := setUpTest(prodConfig, "10000000000") processProdHeadBlocks(bc, 3, 0, 0, 0) -- 2.45.2 From 041f862c11689102fbc79a73131ea075d0a08a45 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Thu, 23 Jun 2022 07:50:36 -0400 Subject: [PATCH 10/18] Make channels buffered --- pkg/beaconclient/beaconclient.go | 10 ++++----- pkg/beaconclient/capturehead.go | 1 + pkg/beaconclient/capturehead_test.go | 15 +++++++------- pkg/beaconclient/capturehistoric.go | 8 +++---- pkg/beaconclient/incomingsse.go | 31 ++++------------------------ pkg/beaconclient/processevents.go | 5 +++-- pkg/beaconclient/processhistoric.go | 3 +-- pkg/beaconclient/queryserver.go | 1 + 8 files changed, 27 insertions(+), 47 deletions(-) diff --git a/pkg/beaconclient/beaconclient.go b/pkg/beaconclient/beaconclient.go index 4f959e3..1efcaa7 100644 --- a/pkg/beaconclient/beaconclient.go +++ b/pkg/beaconclient/beaconclient.go @@ -76,8 +76,8 @@ type BeaconClient struct { type SseEvents[P ProcessedEvents] struct { Endpoint string // The endpoint for the subscription. Primarily used for logging MessagesCh chan *sse.Event // Contains all the messages from the SSE Channel - ErrorCh chan *SseError // Contains any errors while SSE streaming occurred - ProcessCh chan *P // Used to capture processed data in its proper struct. + ErrorCh chan SseError // Contains any errors while SSE streaming occurred + ProcessCh chan P // Used to capture processed data in its proper struct. SseClient *sse.Client // sse.Client object that is used to interact with the SSE stream } @@ -119,9 +119,9 @@ func createSseEvent[P ProcessedEvents](baseEndpoint string, path string) *SseEve endpoint := baseEndpoint + path sseEvents := &SseEvents[P]{ Endpoint: endpoint, - MessagesCh: make(chan *sse.Event, 1), - ErrorCh: make(chan *SseError), - ProcessCh: make(chan *P), + MessagesCh: make(chan *sse.Event, 10), + ErrorCh: make(chan SseError, 10), + ProcessCh: make(chan P, 10), SseClient: func(endpoint string) *sse.Client { log.WithFields(log.Fields{"endpoint": endpoint}).Info("Creating SSE client") return sse.NewClient(endpoint) diff --git a/pkg/beaconclient/capturehead.go b/pkg/beaconclient/capturehead.go index 9392d45..51cecae 100644 --- a/pkg/beaconclient/capturehead.go +++ b/pkg/beaconclient/capturehead.go @@ -38,6 +38,7 @@ func (bc *BeaconClient) StopHeadTracking(ctx context.Context, skipSee bool) { if !skipSee { bc.HeadTracking.SseClient.Unsubscribe(bc.HeadTracking.MessagesCh) bc.ReOrgTracking.SseClient.Unsubscribe(bc.ReOrgTracking.MessagesCh) + log.Info("Successfully unsubscribed to SSE client") } log.Info("Successfully stopped the head tracking service.") default: diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index 9c350e2..40587e3 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -24,6 +24,7 @@ import ( "os" "path/filepath" "runtime" + "runtime/pprof" "strconv" "sync/atomic" "time" @@ -913,7 +914,7 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs validateSlot(bc, thirdHead, epoch, "forked") cancel() - testStopHeadTracking(ctx, bc, startGoRoutines) + testStopHeadTracking(ctx, bc, startGoRoutines, true) } // A test to validate a single block was processed correctly @@ -947,7 +948,7 @@ func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head b validateSlot(bc, head, epoch, "proposed") } cancel() - testStopHeadTracking(ctx, bc, startGoRoutines) + testStopHeadTracking(ctx, bc, startGoRoutines, true) } // A test that ensures that if two HeadMessages occur for a single slot they are marked @@ -978,7 +979,7 @@ func (tbc TestBeaconNode) testMultipleHead(bc *beaconclient.BeaconClient, firstH validateSlot(bc, firstHead, epoch, "forked") validateSlot(bc, secondHead, epoch, "proposed") cancel() - testStopHeadTracking(ctx, bc, startGoRoutines) + testStopHeadTracking(ctx, bc, startGoRoutines, true) } // A test that ensures that if two HeadMessages occur for a single slot they are marked @@ -1013,7 +1014,7 @@ func (tbc TestBeaconNode) testKnownGapsMessages(bc *beaconclient.BeaconClient, t Fail("We found reorgs when we didn't expect it") } cancel() - testStopHeadTracking(ctx, bc, startGoRoutines) + testStopHeadTracking(ctx, bc, startGoRoutines, true) } // This function will make sure we are properly able to get the SszRoot of the SignedBeaconBlock and the BeaconState. @@ -1032,12 +1033,12 @@ func testSszRoot(msg Message) { } // A make shift function to stop head tracking and insure we dont have any goroutine leaks -func testStopHeadTracking(ctx context.Context, bc *beaconclient.BeaconClient, startGoRoutines int) { - bc.StopHeadTracking(ctx, true) +func testStopHeadTracking(ctx context.Context, bc *beaconclient.BeaconClient, startGoRoutines int, skipSse bool) { + bc.StopHeadTracking(ctx, skipSse) time.Sleep(3 * time.Second) endNum := runtime.NumGoroutine() - //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) log.WithField("startNum", startGoRoutines).Info("Start Go routine number") log.WithField("endNum", endNum).Info("End Go routine number") //Expect(endNum <= startGoRoutines).To(BeTrue()) diff --git a/pkg/beaconclient/capturehistoric.go b/pkg/beaconclient/capturehistoric.go index defcb9e..a4f4400 100644 --- a/pkg/beaconclient/capturehistoric.go +++ b/pkg/beaconclient/capturehistoric.go @@ -96,10 +96,10 @@ type batchHistoricError struct { // // 5. Handle any errors. func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing, db sql.Database, serverEndpoint string, metrics *BeaconClientMetrics, checkDb bool, incrementTracker func(uint64)) []error { - slotsCh := make(chan slotsToProcess) - workCh := make(chan int) - processedCh := make(chan slotsToProcess) - errCh := make(chan batchHistoricError) + slotsCh := make(chan slotsToProcess, 5) + workCh := make(chan int, 5) + processedCh := make(chan slotsToProcess, 5) + errCh := make(chan batchHistoricError, 5) finalErrCh := make(chan []error, 1) // Checkout Rows with same node Identifier. diff --git a/pkg/beaconclient/incomingsse.go b/pkg/beaconclient/incomingsse.go index ab94c07..6859764 100644 --- a/pkg/beaconclient/incomingsse.go +++ b/pkg/beaconclient/incomingsse.go @@ -30,30 +30,6 @@ import ( // When new messages come in, it will ensure that they are decoded into JSON. // If any errors occur, it log the error information. func handleIncomingSseEvent[P ProcessedEvents](ctx context.Context, eventHandler *SseEvents[P], errMetricInc func(uint64), skipSse bool) { - //go func() { - // subCh := make(chan error, 1) - // go func() { - // err := eventHandler.SseClient.SubscribeChanRawWithContext(ctx, eventHandler.MessagesCh) - // if err != nil { - // subCh <- err - // } - // subCh <- nil - // }() - // select { - // case err := <-subCh: - // if err != nil { - // log.WithFields(log.Fields{ - // "err": err, - // "endpoint": eventHandler.Endpoint, - // }).Error("Unable to subscribe to the SSE endpoint.") - // return - // } else { - // loghelper.LogEndpoint(eventHandler.Endpoint).Info("Successfully subscribed to the event stream.") - // } - // case <-ctx.Done(): - // return - // } - //}() if !skipSse { for { err := eventHandler.SseClient.SubscribeChanRawWithContext(ctx, eventHandler.MessagesCh) @@ -94,18 +70,19 @@ func handleIncomingSseEvent[P ProcessedEvents](ctx context.Context, eventHandler } // Turn the data object into a Struct. -func processMsg[P ProcessedEvents](msg []byte, processCh chan<- *P, errorCh chan<- *SseError) { +func processMsg[P ProcessedEvents](msg []byte, processCh chan<- P, errorCh chan<- SseError) { var msgMarshaled P err := json.Unmarshal(msg, &msgMarshaled) if err != nil { loghelper.LogError(err).Error("Unable to parse message") - errorCh <- &SseError{ + errorCh <- SseError{ err: err, msg: msg, } return } - processCh <- &msgMarshaled + processCh <- msgMarshaled + log.Info("Done sending") } // Capture all of the event topics. diff --git a/pkg/beaconclient/processevents.go b/pkg/beaconclient/processevents.go index 9aa604e..ed8da5c 100644 --- a/pkg/beaconclient/processevents.go +++ b/pkg/beaconclient/processevents.go @@ -46,7 +46,7 @@ func (bc *BeaconClient) handleReorg(ctx context.Context) { func (bc *BeaconClient) handleHead(ctx context.Context, maxWorkers int) { log.Info("Starting to process head.") - workCh := make(chan workParams) + workCh := make(chan workParams, 5) log.WithField("workerNumber", maxWorkers).Info("Creating Workers") for i := 1; i < maxWorkers; i++ { go bc.headBlockProcessor(ctx, workCh) @@ -56,13 +56,14 @@ func (bc *BeaconClient) handleHead(ctx context.Context, maxWorkers int) { select { case <-ctx.Done(): close(bc.HeadTracking.ProcessCh) + close(workCh) return case head := <-bc.HeadTracking.ProcessCh: // Process all the work here. slot, err := strconv.Atoi(head.Slot) if err != nil { - bc.HeadTracking.ErrorCh <- &SseError{ + bc.HeadTracking.ErrorCh <- SseError{ err: fmt.Errorf("Unable to turn the slot from string to int: %s", head.Slot), } errorSlots = errorSlots + 1 diff --git a/pkg/beaconclient/processhistoric.go b/pkg/beaconclient/processhistoric.go index 9b2392b..49b0fd1 100644 --- a/pkg/beaconclient/processhistoric.go +++ b/pkg/beaconclient/processhistoric.go @@ -228,7 +228,7 @@ func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStm // After a row has been processed it should be removed from its appropriate table. func removeRowPostProcess(ctx context.Context, db sql.Database, processCh <-chan slotsToProcess, checkProcessedStmt, removeStmt string) error { - errCh := make(chan error) + errCh := make(chan error, 1) for { select { case <-ctx.Done(): @@ -263,7 +263,6 @@ func removeRowPostProcess(ctx context.Context, db sql.Database, processCh <-chan time.Sleep(3 * time.Second) } } - }() if len(errCh) != 0 { return <-errCh diff --git a/pkg/beaconclient/queryserver.go b/pkg/beaconclient/queryserver.go index 76bb14e..ee2ac4d 100644 --- a/pkg/beaconclient/queryserver.go +++ b/pkg/beaconclient/queryserver.go @@ -54,6 +54,7 @@ func querySsz(endpoint string, slot string) (*[]byte, int, error) { loghelper.LogSlotError(slot, err).Error("Unable to turn response into a []bytes array!") return nil, rc, fmt.Errorf("Unable to turn response into a []bytes array!: %s", err.Error()) } + //log.WithField("body", unsafe.Sizeof(body)).Debug("Size of the raw SSZ object") return &body, rc, nil } -- 2.45.2 From ac1e3075e24f9e1480daf7747c3b2c6c44fad5d7 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Thu, 23 Jun 2022 08:07:19 -0400 Subject: [PATCH 11/18] Update system testing --- pkg/beaconclient/incomingsse.go | 2 +- pkg/beaconclient/processslot.go | 14 ++++++++------ pkg/beaconclient/systemvalidation_test.go | 19 +++++++++++++++---- 3 files changed, 24 insertions(+), 11 deletions(-) diff --git a/pkg/beaconclient/incomingsse.go b/pkg/beaconclient/incomingsse.go index 6859764..5831407 100644 --- a/pkg/beaconclient/incomingsse.go +++ b/pkg/beaconclient/incomingsse.go @@ -82,7 +82,7 @@ func processMsg[P ProcessedEvents](msg []byte, processCh chan<- P, errorCh chan< return } processCh <- msgMarshaled - log.Info("Done sending") + log.Debug("Done sending") } // Capture all of the event topics. diff --git a/pkg/beaconclient/processslot.go b/pkg/beaconclient/processslot.go index 4b1f55e..9e09c3a 100644 --- a/pkg/beaconclient/processslot.go +++ b/pkg/beaconclient/processslot.go @@ -156,10 +156,6 @@ func processFullSlot(ctx context.Context, db sql.Database, serverAddress string, if err := g.Wait(); err != nil { // Make sure channel is empty. - select { - case <-vUnmarshalerCh: - default: - } return err, "processSlot" } @@ -296,14 +292,20 @@ func (ps *ProcessSlot) getSignedBeaconBlock(serverAddress string, vmCh <-chan *d // Update the SszBeaconState and FullBeaconState object with their respective values. func (ps *ProcessSlot) getBeaconState(serverEndpoint string, vmCh chan<- *dt.VersionedUnmarshaler) error { - var stateIdentifier string // Used to query the state + var ( + stateIdentifier string // Used to query the state + err error + ) if ps.StateRoot != "" { stateIdentifier = ps.StateRoot } else { stateIdentifier = strconv.Itoa(ps.Slot) } stateEndpoint := serverEndpoint + BcStateQueryEndpoint + stateIdentifier - ps.SszBeaconState, _, _ = querySsz(stateEndpoint, strconv.Itoa(ps.Slot)) + ps.SszBeaconState, _, err = querySsz(stateEndpoint, strconv.Itoa(ps.Slot)) + if err != nil { + return fmt.Errorf("Unable to querrySSZ") + } versionedUnmarshaler, err := dt.FromState(*ps.SszBeaconState) if err != nil { diff --git a/pkg/beaconclient/systemvalidation_test.go b/pkg/beaconclient/systemvalidation_test.go index b204404..3efbd6b 100644 --- a/pkg/beaconclient/systemvalidation_test.go +++ b/pkg/beaconclient/systemvalidation_test.go @@ -3,11 +3,11 @@ package beaconclient_test import ( "context" "os" - "runtime" "strconv" "time" . "github.com/onsi/ginkgo/v2" + //. "github.com/onsi/gomega" "github.com/vulcanize/ipld-eth-beacon-indexer/pkg/beaconclient" ) @@ -65,12 +65,23 @@ func getEnvInt(envVar string) int { // Start head tracking and wait for the expected results. func processProdHeadBlocks(bc *beaconclient.BeaconClient, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) { - startGoRoutines := runtime.NumGoroutine() + //startGoRoutines := runtime.NumGoroutine() + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHead(ctx, 2, false) - time.Sleep(1 * time.Second) + time.Sleep(5 * time.Second) validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError) cancel() - testStopHeadTracking(ctx, bc, startGoRoutines) + time.Sleep(4) + testStopSystemHeadTracking(ctx, bc) +} + +// Custom stop for system testing +func testStopSystemHeadTracking(ctx context.Context, bc *beaconclient.BeaconClient) { + bc.StopHeadTracking(ctx, false) + + time.Sleep(3 * time.Second) + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + //Expect(endNum <= startGoRoutines).To(BeTrue()) } -- 2.45.2 From 9664783eef50fd034cfa44b4a82001e782ee6322 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Thu, 23 Jun 2022 08:26:23 -0400 Subject: [PATCH 12/18] Remove pprof --- pkg/beaconclient/capturehead_test.go | 3 +-- pkg/beaconclient/systemvalidation_test.go | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index 40587e3..8362bb3 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -24,7 +24,6 @@ import ( "os" "path/filepath" "runtime" - "runtime/pprof" "strconv" "sync/atomic" "time" @@ -1038,7 +1037,7 @@ func testStopHeadTracking(ctx context.Context, bc *beaconclient.BeaconClient, st time.Sleep(3 * time.Second) endNum := runtime.NumGoroutine() - pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) log.WithField("startNum", startGoRoutines).Info("Start Go routine number") log.WithField("endNum", endNum).Info("End Go routine number") //Expect(endNum <= startGoRoutines).To(BeTrue()) diff --git a/pkg/beaconclient/systemvalidation_test.go b/pkg/beaconclient/systemvalidation_test.go index 3efbd6b..807ea59 100644 --- a/pkg/beaconclient/systemvalidation_test.go +++ b/pkg/beaconclient/systemvalidation_test.go @@ -73,7 +73,7 @@ func processProdHeadBlocks(bc *beaconclient.BeaconClient, expectedInserts, expec validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError) cancel() - time.Sleep(4) + time.Sleep(4 * time.Second) testStopSystemHeadTracking(ctx, bc) } -- 2.45.2 From 32ec784e1e21db2c9517d97cc8be1f56f7190449 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Thu, 23 Jun 2022 08:50:09 -0400 Subject: [PATCH 13/18] Close channels in the correct location --- pkg/beaconclient/capturehead.go | 2 ++ pkg/beaconclient/incomingsse.go | 3 +-- pkg/beaconclient/processevents.go | 2 -- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/beaconclient/capturehead.go b/pkg/beaconclient/capturehead.go index 51cecae..5ab00e2 100644 --- a/pkg/beaconclient/capturehead.go +++ b/pkg/beaconclient/capturehead.go @@ -39,6 +39,8 @@ func (bc *BeaconClient) StopHeadTracking(ctx context.Context, skipSee bool) { bc.HeadTracking.SseClient.Unsubscribe(bc.HeadTracking.MessagesCh) bc.ReOrgTracking.SseClient.Unsubscribe(bc.ReOrgTracking.MessagesCh) log.Info("Successfully unsubscribed to SSE client") + close(bc.ReOrgTracking.MessagesCh) + close(bc.HeadTracking.MessagesCh) } log.Info("Successfully stopped the head tracking service.") default: diff --git a/pkg/beaconclient/incomingsse.go b/pkg/beaconclient/incomingsse.go index 5831407..0ebd1d5 100644 --- a/pkg/beaconclient/incomingsse.go +++ b/pkg/beaconclient/incomingsse.go @@ -47,8 +47,7 @@ func handleIncomingSseEvent[P ProcessedEvents](ctx context.Context, eventHandler for { select { case <-ctx.Done(): - close(eventHandler.MessagesCh) - close(eventHandler.ErrorCh) + close(eventHandler.ProcessCh) return case message := <-eventHandler.MessagesCh: // Message can be nil if its a keep-alive message diff --git a/pkg/beaconclient/processevents.go b/pkg/beaconclient/processevents.go index ed8da5c..f95cb31 100644 --- a/pkg/beaconclient/processevents.go +++ b/pkg/beaconclient/processevents.go @@ -33,7 +33,6 @@ func (bc *BeaconClient) handleReorg(ctx context.Context) { for { select { case <-ctx.Done(): - close(bc.ReOrgTracking.ProcessCh) return case reorg := <-bc.ReOrgTracking.ProcessCh: log.WithFields(log.Fields{"reorg": reorg}).Debug("Received a new reorg message.") @@ -55,7 +54,6 @@ func (bc *BeaconClient) handleHead(ctx context.Context, maxWorkers int) { for { select { case <-ctx.Done(): - close(bc.HeadTracking.ProcessCh) close(workCh) return case head := <-bc.HeadTracking.ProcessCh: -- 2.45.2 From 16d034c844ce2dce0ebded282c2c2b06a0d3f659 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Thu, 23 Jun 2022 09:17:46 -0400 Subject: [PATCH 14/18] Close idle connections and chan's properly. --- pkg/beaconclient/beaconclient.go | 4 ++-- pkg/beaconclient/capturehistoric.go | 4 +++- pkg/beaconclient/capturehistoric_test.go | 4 +++- pkg/beaconclient/processhistoric.go | 1 + pkg/beaconclient/queryserver.go | 2 ++ 5 files changed, 11 insertions(+), 4 deletions(-) diff --git a/pkg/beaconclient/beaconclient.go b/pkg/beaconclient/beaconclient.go index 1efcaa7..066c455 100644 --- a/pkg/beaconclient/beaconclient.go +++ b/pkg/beaconclient/beaconclient.go @@ -119,8 +119,8 @@ func createSseEvent[P ProcessedEvents](baseEndpoint string, path string) *SseEve endpoint := baseEndpoint + path sseEvents := &SseEvents[P]{ Endpoint: endpoint, - MessagesCh: make(chan *sse.Event, 10), - ErrorCh: make(chan SseError, 10), + MessagesCh: make(chan *sse.Event), + ErrorCh: make(chan SseError), ProcessCh: make(chan P, 10), SseClient: func(endpoint string) *sse.Client { log.WithFields(log.Fields{"endpoint": endpoint}).Info("Creating SSE client") diff --git a/pkg/beaconclient/capturehistoric.go b/pkg/beaconclient/capturehistoric.go index a4f4400..9627965 100644 --- a/pkg/beaconclient/capturehistoric.go +++ b/pkg/beaconclient/capturehistoric.go @@ -96,7 +96,7 @@ type batchHistoricError struct { // // 5. Handle any errors. func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing, db sql.Database, serverEndpoint string, metrics *BeaconClientMetrics, checkDb bool, incrementTracker func(uint64)) []error { - slotsCh := make(chan slotsToProcess, 5) + slotsCh := make(chan slotsToProcess) workCh := make(chan int, 5) processedCh := make(chan slotsToProcess, 5) errCh := make(chan batchHistoricError, 5) @@ -120,6 +120,8 @@ func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing, for { select { case <-ctx.Done(): + close(workCh) + close(processedCh) return case slots := <-slotsCh: if slots.startSlot > slots.endSlot { diff --git a/pkg/beaconclient/capturehistoric_test.go b/pkg/beaconclient/capturehistoric_test.go index fe5ec83..b97827d 100644 --- a/pkg/beaconclient/capturehistoric_test.go +++ b/pkg/beaconclient/capturehistoric_test.go @@ -3,7 +3,9 @@ package beaconclient_test import ( "context" "fmt" + "os" "runtime" + "runtime/pprof" "sync/atomic" "time" @@ -328,7 +330,7 @@ func testStopKnownGapProcessing(ctx context.Context, bc *beaconclient.BeaconClie time.Sleep(3 * time.Second) endNum := runtime.NumGoroutine() - //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) //Expect(endNum <= startGoRoutines).To(BeTrue()) Expect(endNum).To(Equal(startGoRoutines)) } diff --git a/pkg/beaconclient/processhistoric.go b/pkg/beaconclient/processhistoric.go index 49b0fd1..8b52fc9 100644 --- a/pkg/beaconclient/processhistoric.go +++ b/pkg/beaconclient/processhistoric.go @@ -132,6 +132,7 @@ func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStm for len(errCount) < 5 { select { case <-ctx.Done(): + close(slotCh) return errCount default: if len(errCount) != prevErrCount { diff --git a/pkg/beaconclient/queryserver.go b/pkg/beaconclient/queryserver.go index ee2ac4d..e9bb9e2 100644 --- a/pkg/beaconclient/queryserver.go +++ b/pkg/beaconclient/queryserver.go @@ -43,6 +43,8 @@ func querySsz(endpoint string, slot string) (*[]byte, int, error) { return nil, 0, fmt.Errorf("Unable to query Beacon Node: %s", err.Error()) } defer response.Body.Close() + // Needed for testing.... But might be interesting to test with... + defer client.CloseIdleConnections() rc := response.StatusCode //var body []byte -- 2.45.2 From 0954dc615935df6648f62b8e9f7dc76c74d14093 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Thu, 23 Jun 2022 11:35:18 -0400 Subject: [PATCH 15/18] Update head testing --- Makefile | 3 +- pkg/beaconclient/capturehead_test.go | 78 ++++++++++++------------ pkg/beaconclient/capturehistoric_test.go | 10 ++- 3 files changed, 46 insertions(+), 45 deletions(-) diff --git a/Makefile b/Makefile index 7f9f925..db6b2c2 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,7 @@ integration-test-ci: go fmt ./... $(GINKGO) -r --label-filter integration \ --procs=4 --compilers=4 \ + --flake-attempts=3 \ --randomize-all --randomize-suites \ --fail-on-pending --keep-going \ --cover --coverprofile=cover.profile \ @@ -76,7 +77,7 @@ unit-test-ci: go vet ./... go fmt ./... $(GINKGO) -r --label-filter unit \ - --randomize-all --randomize-suites + --randomize-all --randomize-suites \ --flake-attempts=3 \ --fail-on-pending --keep-going \ --cover --coverprofile=cover.profile \ diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index 8362bb3..05aac4f 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -271,7 +271,7 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["100"].HeadMessage) validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey) validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey) @@ -284,7 +284,7 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703"].HeadMessage) validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey) validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey) }) @@ -295,12 +295,12 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703-dummy"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 2, 0, 0, BeaconNodeTester.TestEvents["2375703-dummy"].HeadMessage, BeaconNodeTester.TestEvents["2375703-dummy-2"].HeadMessage) - bc = setUpTest(BeaconNodeTester.TestConfig, "2375702") - BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) - defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703-dummy-2"].HeadMessage, 74240, maxRetry, 1, 0, 0) + //bc = setUpTest(BeaconNodeTester.TestConfig, "2375702") + //BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) + //defer httpmock.DeactivateAndReset() + //BeaconNodeTester.testProcessBlock(bc, , 74240, maxRetry, 1, 0, 0) }) }) @@ -310,24 +310,23 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 2, 0, 0, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage) - bc = setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) - defer httpmock.DeactivateAndReset() - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage, 3, maxRetry, 1, 0, 0) + //bc = setUpTest(BeaconNodeTester.TestConfig, "99") + //BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) + //defer httpmock.DeactivateAndReset() + //BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage, 3, maxRetry, 1, 0, 0) }) }) - Context("Two consecutive correct blocks", func() { + Context("Two consecutive correct blocks", Label("bug"), func() { It("Should handle both blocks correctly, without any reorgs or known_gaps", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, 3, maxRetry, 1, 0, 0) - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 2, 0, 0, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["101"].HeadMessage) }) }) @@ -337,8 +336,7 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, 3, maxRetry, 1, 0, 0) - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["101"].HeadMessage, 3, maxRetry, 1, 1, 1) + BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 2, 0, 0, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, BeaconNodeTester.TestEvents["101"].HeadMessage) }) }) @@ -363,7 +361,7 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "101") - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["102-wrong-ssz-1"].HeadMessage, 3, maxRetry, 0, 1, 0) + BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 0, 1, 0, BeaconNodeTester.TestEvents["102-wrong-ssz-1"].HeadMessage) knownGapCount := countKnownGapsTable(bc.Db) Expect(knownGapCount).To(Equal(1)) @@ -543,18 +541,25 @@ func validateBeaconState(bc *beaconclient.BeaconClient, headMessage beaconclient } // Wrapper function to send a head message to the beaconclient -func sendHeadMessage(bc *beaconclient.BeaconClient, head beaconclient.Head, maxRetry int, expectedSuccessfulInserts uint64) { - - data, err := json.Marshal(head) - Expect(err).ToNot(HaveOccurred()) +func sendHeadMessage(bc *beaconclient.BeaconClient, maxRetry int, expectedSuccessfulInserts uint64, head ...beaconclient.Head) { + var ( + data []byte + err error + ) startInserts := atomic.LoadUint64(&bc.Metrics.SlotInserts) - bc.HeadTracking.MessagesCh <- &sse.Event{ - ID: []byte{}, - Data: data, - Event: []byte{}, - Retry: []byte{}, + for _, ms := range head { + data, err = json.Marshal(ms) + Expect(err).ToNot(HaveOccurred()) + time.Sleep(1 * time.Second) + bc.HeadTracking.MessagesCh <- &sse.Event{ + ID: []byte{}, + Data: data, + Event: []byte{}, + Retry: []byte{}, + } } + curRetry := 0 for atomic.LoadUint64(&bc.Metrics.SlotInserts) != startInserts+expectedSuccessfulInserts { time.Sleep(1 * time.Second) @@ -858,9 +863,7 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs time.Sleep(1 * time.Second) log.Info("Sending Messages to BeaconClient") - sendHeadMessage(bc, firstHead, maxRetry, 1) - sendHeadMessage(bc, secondHead, maxRetry, 1) - sendHeadMessage(bc, thirdHead, maxRetry, 1) + sendHeadMessage(bc, maxRetry, 3, firstHead, secondHead, thirdHead) curRetry := 0 for atomic.LoadUint64(&bc.Metrics.ReorgInserts) != 2 { @@ -917,13 +920,13 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs } // A test to validate a single block was processed correctly -func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head beaconclient.Head, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64) { +func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64, head ...beaconclient.Head) { //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) startGoRoutines := runtime.NumGoroutine() ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) - sendHeadMessage(bc, head, maxRetry, expectedSuccessInsert) + sendHeadMessage(bc, maxRetry, expectedSuccessInsert, head...) curRetry := 0 for atomic.LoadUint64(&bc.Metrics.KnownGapsInserts) != expectedKnownGaps { @@ -944,7 +947,9 @@ func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, head b } if expectedSuccessInsert > 0 { - validateSlot(bc, head, epoch, "proposed") + for _, msg := range head { + validateSlot(bc, msg, epoch, "proposed") + } } cancel() testStopHeadTracking(ctx, bc, startGoRoutines, true) @@ -958,8 +963,7 @@ func (tbc TestBeaconNode) testMultipleHead(bc *beaconclient.BeaconClient, firstH go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) - sendHeadMessage(bc, firstHead, maxRetry, 1) - sendHeadMessage(bc, secondHead, maxRetry, 1) + sendHeadMessage(bc, maxRetry, 2, firstHead, secondHead) curRetry := 0 for atomic.LoadUint64(&bc.Metrics.ReorgInserts) != 1 { @@ -992,9 +996,7 @@ func (tbc TestBeaconNode) testKnownGapsMessages(bc *beaconclient.BeaconClient, t go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) - for _, headMsg := range msg { - sendHeadMessage(bc, headMsg, maxRetry, 1) - } + sendHeadMessage(bc, maxRetry, 1, msg...) curRetry := 0 for atomic.LoadUint64(&bc.Metrics.KnownGapsInserts) != expectedEntries { diff --git a/pkg/beaconclient/capturehistoric_test.go b/pkg/beaconclient/capturehistoric_test.go index b97827d..92f2a71 100644 --- a/pkg/beaconclient/capturehistoric_test.go +++ b/pkg/beaconclient/capturehistoric_test.go @@ -3,9 +3,7 @@ package beaconclient_test import ( "context" "fmt" - "os" "runtime" - "runtime/pprof" "sync/atomic" "time" @@ -124,7 +122,7 @@ var _ = Describe("Capturehistoric", func() { bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") // Head - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703"].HeadMessage) // Historical BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10) @@ -150,7 +148,7 @@ var _ = Describe("Capturehistoric", func() { BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0) // Head - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703"].HeadMessage) // Known Gaps BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101) @@ -175,7 +173,7 @@ var _ = Describe("Capturehistoric", func() { BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0) // Head - BeaconNodeTester.testProcessBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, 74240, maxRetry, 1, 0, 0) + BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703"].HeadMessage) time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) @@ -330,7 +328,7 @@ func testStopKnownGapProcessing(ctx context.Context, bc *beaconclient.BeaconClie time.Sleep(3 * time.Second) endNum := runtime.NumGoroutine() - pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) //Expect(endNum <= startGoRoutines).To(BeTrue()) Expect(endNum).To(Equal(startGoRoutines)) } -- 2.45.2 From 4825a54e4e745d958daa70bd646f4070af3a6ec0 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Thu, 23 Jun 2022 13:47:26 -0400 Subject: [PATCH 16/18] Fix test conditions --- Makefile | 1 + pkg/beaconclient/capturehead_test.go | 21 +++++++++++---------- pkg/beaconclient/capturehistoric_test.go | 4 ---- 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index db6b2c2..ca856ca 100644 --- a/Makefile +++ b/Makefile @@ -89,6 +89,7 @@ system-test-ci: go fmt ./... $(GINKGO) -r --label-filter system \ --randomize-all --randomize-suites \ + --flake-attempts=3 \ --fail-on-pending --keep-going \ --cover --coverprofile=cover.profile \ --trace --json-report=report.json diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index 05aac4f..197befc 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -289,28 +289,29 @@ var _ = Describe("Capturehead", Label("head"), func() { validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey) }) }) - Context("Correctly formatted Altair Test Blocks", func() { + Context("Correctly formatted Altair Test Blocks", Label("correct-test-altairs"), func() { It("Should turn it into a struct successfully.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") - BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 2, 0, 0, BeaconNodeTester.TestEvents["2375703-dummy"].HeadMessage, BeaconNodeTester.TestEvents["2375703-dummy-2"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703-dummy"].HeadMessage) - //bc = setUpTest(BeaconNodeTester.TestConfig, "2375702") - //BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) - //defer httpmock.DeactivateAndReset() - //BeaconNodeTester.testProcessBlock(bc, , 74240, maxRetry, 1, 0, 0) + bc = setUpTest(BeaconNodeTester.TestConfig, "2375702") + BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703-dummy-2"].HeadMessage) }) }) - Context("Correctly formatted Phase0 Test Blocks", func() { + Context("Correctly formatted Phase0 Test Blocks", Label("correct-test-phase0"), func() { It("Should turn it into a struct successfully.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 2, 0, 0, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage) + + bc = setUpTest(BeaconNodeTester.TestConfig, "99") + BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage) //bc = setUpTest(BeaconNodeTester.TestConfig, "99") //BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) @@ -336,7 +337,7 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 2, 0, 0, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, BeaconNodeTester.TestEvents["101"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 2, 1, 0, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, BeaconNodeTester.TestEvents["101"].HeadMessage) }) }) @@ -942,7 +943,7 @@ func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, epoch time.Sleep(1 * time.Second) curRetry = curRetry + 1 if curRetry == maxRetry { - Fail(fmt.Sprintf("Wrong reorg metrics, got: %d, wanted %d", bc.Metrics.KnownGapsInserts, expectedKnownGaps)) + Fail(fmt.Sprintf("Wrong reorg metrics, got: %d, wanted %d", bc.Metrics.ReorgInserts, expectedReorgs)) } } diff --git a/pkg/beaconclient/capturehistoric_test.go b/pkg/beaconclient/capturehistoric_test.go index 92f2a71..735dc63 100644 --- a/pkg/beaconclient/capturehistoric_test.go +++ b/pkg/beaconclient/capturehistoric_test.go @@ -76,7 +76,6 @@ var _ = Describe("Capturehistoric", func() { It("Successfully Process the Blocks", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() - startNum := runtime.NumGoroutine() bc := setUpTest(BeaconNodeTester.TestConfig, "99") BeaconNodeTester.writeEventToKnownGaps(bc, 100, 101) @@ -88,9 +87,6 @@ var _ = Describe("Capturehistoric", func() { time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) - bc.Db.Close() - endNum := runtime.NumGoroutine() - Expect(startNum).To(Equal(endNum)) }) }) Context("When the start block is greater than the endBlock", func() { -- 2.45.2 From 58d210439223da1e1c79e6c27f541fa70a0c194c Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Thu, 23 Jun 2022 14:10:14 -0400 Subject: [PATCH 17/18] Correct condition --- pkg/beaconclient/capturehead_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index 197befc..7d7cb1a 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -337,7 +337,7 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 2, 1, 0, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, BeaconNodeTester.TestEvents["101"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 2, 1, 1, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, BeaconNodeTester.TestEvents["101"].HeadMessage) }) }) -- 2.45.2 From 1ea88d05ba42b6c9d2885a18b37f5204a40c7013 Mon Sep 17 00:00:00 2001 From: Abdul Rabbani Date: Thu, 23 Jun 2022 14:44:32 -0400 Subject: [PATCH 18/18] Allow custom epoch and status for multiple messages --- .github/workflows/generic-testing.yml | 2 +- pkg/beaconclient/capturehead_test.go | 82 ++++++++++++++++++++---- pkg/beaconclient/capturehistoric_test.go | 18 +++++- 3 files changed, 85 insertions(+), 17 deletions(-) diff --git a/.github/workflows/generic-testing.yml b/.github/workflows/generic-testing.yml index e75f171..3a9b32d 100644 --- a/.github/workflows/generic-testing.yml +++ b/.github/workflows/generic-testing.yml @@ -66,7 +66,7 @@ jobs: run: | until $(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" --env-file ./config.sh cp ipld-eth-beacon-indexer:/root/HEALTH ./HEALTH) ; do sleep 10; done cat ./HEALTH - if [[ "$(cat ./HEALTH)" -eq "0" ]]; then echo "Application boot successful" && (exit 0); else docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" cp ipld-eth-beacon-indexer:/root/ipld-eth-beacon-indexer.log . && cat ipld-eth-beacon-indexer.log && (exit 1); fi + if [[ "$(cat ./HEALTH)" -eq "0" ]]; then echo "Application boot successful" && (exit 0); else docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-beacon-indexer.yml" --env-file ./config.sh cp ipld-eth-beacon-indexer:/root/ipld-eth-beacon-indexer.log . && cat ipld-eth-beacon-indexer.log && (exit 1); fi unit-test: name: Run Unit Tests diff --git a/pkg/beaconclient/capturehead_test.go b/pkg/beaconclient/capturehead_test.go index 7d7cb1a..dd5b1e0 100644 --- a/pkg/beaconclient/capturehead_test.go +++ b/pkg/beaconclient/capturehead_test.go @@ -271,7 +271,11 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["100"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 1, 0, 0, headBlocksSent{ + head: BeaconNodeTester.TestEvents["100"].HeadMessage, + expectedEpoch: 3, + expectStatus: "proposed", + }) validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectParentRoot, BeaconNodeTester.TestEvents["100"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["100"].CorrectSignedBeaconBlockMhKey) validateBeaconState(bc, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["100"].CorrectBeaconStateMhKey) @@ -284,7 +288,11 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") - BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 1, 0, 0, headBlocksSent{ + head: BeaconNodeTester.TestEvents["2375703"].HeadMessage, + expectedEpoch: 74240, + expectStatus: "proposed", + }) validateSignedBeaconBlock(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectParentRoot, BeaconNodeTester.TestEvents["2375703"].CorrectEth1BlockHash, BeaconNodeTester.TestEvents["2375703"].CorrectSignedBeaconBlockMhKey) validateBeaconState(bc, BeaconNodeTester.TestEvents["2375703"].HeadMessage, BeaconNodeTester.TestEvents["2375703"].CorrectBeaconStateMhKey) }) @@ -295,10 +303,18 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") - BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703-dummy"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 1, 0, 0, headBlocksSent{ + head: BeaconNodeTester.TestEvents["2375703-dummy"].HeadMessage, + expectedEpoch: 74240, + expectStatus: "proposed", + }) bc = setUpTest(BeaconNodeTester.TestConfig, "2375702") - BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703-dummy-2"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 1, 0, 0, headBlocksSent{ + head: BeaconNodeTester.TestEvents["2375703-dummy-2"].HeadMessage, + expectedEpoch: 74240, + expectStatus: "proposed", + }) }) }) @@ -308,10 +324,18 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 1, 0, 0, headBlocksSent{ + head: BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, + expectedEpoch: 3, + expectStatus: "proposed", + }) bc = setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 1, 0, 0, headBlocksSent{ + head: BeaconNodeTester.TestEvents["100-dummy-2"].HeadMessage, + expectedEpoch: 3, + expectStatus: "proposed", + }) //bc = setUpTest(BeaconNodeTester.TestConfig, "99") //BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) @@ -327,17 +351,35 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 2, 0, 0, BeaconNodeTester.TestEvents["100"].HeadMessage, BeaconNodeTester.TestEvents["101"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 2, 0, 0, headBlocksSent{ + head: BeaconNodeTester.TestEvents["100"].HeadMessage, + expectedEpoch: 3, + expectStatus: "proposed", + }, headBlocksSent{ + head: BeaconNodeTester.TestEvents["101"].HeadMessage, + expectedEpoch: 3, + expectStatus: "proposed", + }) }) }) - Context("Two consecutive blocks with a bad parent", func() { + Context("Two consecutive blocks with a bad parent", Label("bad-parent"), func() { It("Should add the previous block to the knownGaps table.", func() { BeaconNodeTester.SetupBeaconNodeMock(BeaconNodeTester.TestEvents, BeaconNodeTester.TestConfig.protocol, BeaconNodeTester.TestConfig.address, BeaconNodeTester.TestConfig.port, BeaconNodeTester.TestConfig.dummyParentRoot) defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "99") - BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 2, 1, 1, BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, BeaconNodeTester.TestEvents["101"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 2, 1, 1, + headBlocksSent{ + head: BeaconNodeTester.TestEvents["100-dummy"].HeadMessage, + expectedEpoch: 3, + expectStatus: "forked", + }, + headBlocksSent{ + head: BeaconNodeTester.TestEvents["101"].HeadMessage, + expectedEpoch: 3, + expectStatus: "proposed", + }) }) }) @@ -362,7 +404,11 @@ var _ = Describe("Capturehead", Label("head"), func() { defer httpmock.DeactivateAndReset() bc := setUpTest(BeaconNodeTester.TestConfig, "101") - BeaconNodeTester.testProcessBlock(bc, 3, maxRetry, 0, 1, 0, BeaconNodeTester.TestEvents["102-wrong-ssz-1"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 0, 1, 0, headBlocksSent{ + head: BeaconNodeTester.TestEvents["102-wrong-ssz-1"].HeadMessage, + expectedEpoch: 3, + expectStatus: "proposed", + }) knownGapCount := countKnownGapsTable(bc.Db) Expect(knownGapCount).To(Equal(1)) @@ -921,13 +967,17 @@ func (tbc TestBeaconNode) testMultipleReorgs(bc *beaconclient.BeaconClient, firs } // A test to validate a single block was processed correctly -func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, epoch int, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64, head ...beaconclient.Head) { +func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, maxRetry int, expectedSuccessInsert uint64, expectedKnownGaps uint64, expectedReorgs uint64, head ...headBlocksSent) { //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) startGoRoutines := runtime.NumGoroutine() ctx, cancel := context.WithCancel(context.Background()) go bc.CaptureHead(ctx, 2, true) time.Sleep(1 * time.Second) - sendHeadMessage(bc, maxRetry, expectedSuccessInsert, head...) + heads := make([]beaconclient.Head, 0) + for _, msgs := range head { + heads = append(heads, msgs.head) + } + sendHeadMessage(bc, maxRetry, expectedSuccessInsert, heads...) curRetry := 0 for atomic.LoadUint64(&bc.Metrics.KnownGapsInserts) != expectedKnownGaps { @@ -949,7 +999,7 @@ func (tbc TestBeaconNode) testProcessBlock(bc *beaconclient.BeaconClient, epoch if expectedSuccessInsert > 0 { for _, msg := range head { - validateSlot(bc, msg, epoch, "proposed") + validateSlot(bc, msg.head, msg.expectedEpoch, msg.expectStatus) } } cancel() @@ -1046,3 +1096,9 @@ func testStopHeadTracking(ctx context.Context, bc *beaconclient.BeaconClient, st //Expect(endNum <= startGoRoutines).To(BeTrue()) Expect(endNum).To(Equal(startGoRoutines)) } + +type headBlocksSent struct { + head beaconclient.Head + expectedEpoch int + expectStatus string +} diff --git a/pkg/beaconclient/capturehistoric_test.go b/pkg/beaconclient/capturehistoric_test.go index 735dc63..c644129 100644 --- a/pkg/beaconclient/capturehistoric_test.go +++ b/pkg/beaconclient/capturehistoric_test.go @@ -118,7 +118,11 @@ var _ = Describe("Capturehistoric", func() { bc := setUpTest(BeaconNodeTester.TestConfig, "2375702") // Head - BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 1, 0, 0, headBlocksSent{ + head: BeaconNodeTester.TestEvents["2375703"].HeadMessage, + expectedEpoch: 74240, + expectStatus: "proposed", + }) // Historical BeaconNodeTester.writeEventToHistoricProcess(bc, 100, 100, 10) @@ -144,7 +148,11 @@ var _ = Describe("Capturehistoric", func() { BeaconNodeTester.runHistoricalProcess(bc, 2, 1, 0, 0, 0) // Head - BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 1, 0, 0, headBlocksSent{ + head: BeaconNodeTester.TestEvents["2375703"].HeadMessage, + expectedEpoch: 74240, + expectStatus: "proposed", + }) // Known Gaps BeaconNodeTester.writeEventToKnownGaps(bc, 101, 101) @@ -169,7 +177,11 @@ var _ = Describe("Capturehistoric", func() { BeaconNodeTester.runHistoricalProcess(bc, 2, 2, 0, 0, 0) // Head - BeaconNodeTester.testProcessBlock(bc, 74240, maxRetry, 1, 0, 0, BeaconNodeTester.TestEvents["2375703"].HeadMessage) + BeaconNodeTester.testProcessBlock(bc, maxRetry, 1, 0, 0, headBlocksSent{ + head: BeaconNodeTester.TestEvents["2375703"].HeadMessage, + expectedEpoch: 74240, + expectStatus: "proposed", + }) time.Sleep(2 * time.Second) validatePopularBatchBlocks(bc) -- 2.45.2