Set minimum slot for historic and known-gaps processing.
This commit is contained in:
parent
6034679ec1
commit
8af61d2a2a
@ -81,7 +81,7 @@ func startFullProcessing() {
|
|||||||
|
|
||||||
errG, _ := errgroup.WithContext(context.Background())
|
errG, _ := errgroup.WithContext(context.Background())
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"))
|
errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"), viper.GetUint64("bc.minimumSlot"))
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
|
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
|
||||||
@ -95,7 +95,7 @@ func startFullProcessing() {
|
|||||||
go func() {
|
go func() {
|
||||||
errG := new(errgroup.Group)
|
errG := new(errgroup.Group)
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"))
|
errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"), viper.GetUint64("kg.minimumSlot"))
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
||||||
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
||||||
|
@ -69,7 +69,7 @@ func startHeadTracking() {
|
|||||||
go func() {
|
go func() {
|
||||||
errG := new(errgroup.Group)
|
errG := new(errgroup.Group)
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"))
|
errs := Bc.ProcessKnownGaps(kgCtx, viper.GetInt("kg.maxKnownGapsWorker"), viper.GetUint64("kg.minimumSlot"))
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
||||||
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
||||||
|
@ -65,7 +65,7 @@ func startHistoricProcessing() {
|
|||||||
|
|
||||||
errG, _ := errgroup.WithContext(context.Background())
|
errG, _ := errgroup.WithContext(context.Background())
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"))
|
errs := Bc.CaptureHistoric(hpContext, viper.GetInt("bc.maxHistoricProcessWorker"), viper.GetUint64("bc.minimumSlot"))
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
|
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing historic events")
|
||||||
@ -80,7 +80,7 @@ func startHistoricProcessing() {
|
|||||||
go func() {
|
go func() {
|
||||||
errG := new(errgroup.Group)
|
errG := new(errgroup.Group)
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
errs := Bc.ProcessKnownGaps(kgContext, viper.GetInt("kg.maxKnownGapsWorker"))
|
errs := Bc.ProcessKnownGaps(kgContext, viper.GetInt("kg.maxKnownGapsWorker"), viper.GetUint64("kg.minimumSlot"))
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
log.WithFields(log.Fields{"errs": errs}).Error("All errors when processing knownGaps")
|
||||||
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
return fmt.Errorf("Application ended because there were too many error when attempting to process knownGaps")
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"db": {
|
"db": {
|
||||||
"address": "vulcanize_db",
|
"address": "localhost",
|
||||||
"password": "password",
|
"password": "password",
|
||||||
"port": 5432,
|
"port": 5432,
|
||||||
"username": "vdbm",
|
"username": "vdbm",
|
||||||
@ -8,7 +8,7 @@
|
|||||||
"driver": "PGX"
|
"driver": "PGX"
|
||||||
},
|
},
|
||||||
"bc": {
|
"bc": {
|
||||||
"address": "host.docker.internal",
|
"address": "localhost",
|
||||||
"port": 5052,
|
"port": 5052,
|
||||||
"type": "lighthouse",
|
"type": "lighthouse",
|
||||||
"bootRetryInterval": 30,
|
"bootRetryInterval": 30,
|
||||||
@ -18,7 +18,8 @@
|
|||||||
"uniqueNodeIdentifier": 100,
|
"uniqueNodeIdentifier": 100,
|
||||||
"checkDb": true,
|
"checkDb": true,
|
||||||
"performBeaconStateProcessing": false,
|
"performBeaconStateProcessing": false,
|
||||||
"performBeaconBlockProcessing": true
|
"performBeaconBlockProcessing": true,
|
||||||
|
"minimumSlot": 4700013
|
||||||
},
|
},
|
||||||
"t": {
|
"t": {
|
||||||
"skipSync": true
|
"skipSync": true
|
||||||
@ -31,8 +32,9 @@
|
|||||||
},
|
},
|
||||||
"kg": {
|
"kg": {
|
||||||
"increment": 10000,
|
"increment": 10000,
|
||||||
"processKnownGaps": false,
|
"processKnownGaps": true,
|
||||||
"maxKnownGapsWorker": 2
|
"maxKnownGapsWorker": 2,
|
||||||
|
"minimumSlot": 4700013
|
||||||
},
|
},
|
||||||
"pm": {
|
"pm": {
|
||||||
"address": "localhost",
|
"address": "localhost",
|
||||||
|
@ -27,10 +27,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// This function will perform all the heavy lifting for tracking the head of the chain.
|
// This function will perform all the heavy lifting for tracking the head of the chain.
|
||||||
func (bc *BeaconClient) CaptureHistoric(ctx context.Context, maxWorkers int) []error {
|
func (bc *BeaconClient) CaptureHistoric(ctx context.Context, maxWorkers int, minimumSlot uint64) []error {
|
||||||
log.Info("We are starting the historical processing service.")
|
log.Info("We are starting the historical processing service.")
|
||||||
bc.HistoricalProcess = HistoricProcessing{db: bc.Db, metrics: bc.Metrics, uniqueNodeIdentifier: bc.UniqueNodeIdentifier}
|
bc.HistoricalProcess = HistoricProcessing{db: bc.Db, metrics: bc.Metrics, uniqueNodeIdentifier: bc.UniqueNodeIdentifier}
|
||||||
errs := handleBatchProcess(ctx, maxWorkers, bc.HistoricalProcess, bc.SlotProcessingDetails(), bc.Metrics.IncrementHistoricSlotProcessed)
|
errs := handleBatchProcess(ctx, maxWorkers, bc.HistoricalProcess, bc.SlotProcessingDetails(), bc.Metrics.IncrementHistoricSlotProcessed, minimumSlot)
|
||||||
log.Debug("Exiting Historical")
|
log.Debug("Exiting Historical")
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
@ -52,10 +52,10 @@ func (bc *BeaconClient) StopHistoric(cancel context.CancelFunc) error {
|
|||||||
//
|
//
|
||||||
// 2. Known Gaps Processing
|
// 2. Known Gaps Processing
|
||||||
type BatchProcessing interface {
|
type BatchProcessing interface {
|
||||||
getSlotRange(context.Context, chan<- slotsToProcess) []error // Write the slots to process in a channel, return an error if you cant get the next slots to write.
|
getSlotRange(context.Context, chan<- slotsToProcess, uint64) []error // Write the slots to process in a channel, return an error if you cant get the next slots to write.
|
||||||
handleProcessingErrors(context.Context, <-chan batchHistoricError) // Custom logic to handle errors.
|
handleProcessingErrors(context.Context, <-chan batchHistoricError) // Custom logic to handle errors.
|
||||||
removeTableEntry(context.Context, <-chan slotsToProcess) error // With the provided start and end slot, remove the entry from the database.
|
removeTableEntry(context.Context, <-chan slotsToProcess) error // With the provided start and end slot, remove the entry from the database.
|
||||||
releaseDbLocks() error // Update the checked_out column to false for whatever table is being updated.
|
releaseDbLocks() error // Update the checked_out column to false for whatever table is being updated.
|
||||||
}
|
}
|
||||||
|
|
||||||
/// ^^^
|
/// ^^^
|
||||||
@ -90,7 +90,7 @@ type batchHistoricError struct {
|
|||||||
// 4. Remove the slot entry from the DB.
|
// 4. Remove the slot entry from the DB.
|
||||||
//
|
//
|
||||||
// 5. Handle any errors.
|
// 5. Handle any errors.
|
||||||
func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing, spd SlotProcessingDetails, incrementTracker func(uint64)) []error {
|
func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing, spd SlotProcessingDetails, incrementTracker func(uint64), minimumSlot uint64) []error {
|
||||||
slotsCh := make(chan slotsToProcess)
|
slotsCh := make(chan slotsToProcess)
|
||||||
workCh := make(chan uint64)
|
workCh := make(chan uint64)
|
||||||
processedCh := make(chan slotsToProcess)
|
processedCh := make(chan slotsToProcess)
|
||||||
@ -160,7 +160,7 @@ func handleBatchProcess(ctx context.Context, maxWorkers int, bp BatchProcessing,
|
|||||||
|
|
||||||
// Get slots from the DB.
|
// Get slots from the DB.
|
||||||
go func() {
|
go func() {
|
||||||
errs := bp.getSlotRange(ctx, slotsCh) // Periodically adds new entries....
|
errs := bp.getSlotRange(ctx, slotsCh, minimumSlot) // Periodically adds new entries....
|
||||||
if errs != nil {
|
if errs != nil {
|
||||||
finalErrCh <- errs
|
finalErrCh <- errs
|
||||||
}
|
}
|
||||||
|
@ -205,7 +205,7 @@ func (tbc TestBeaconNode) writeEventToHistoricProcess(bc *beaconclient.BeaconCli
|
|||||||
// Start the CaptureHistoric function, and check for the correct inserted slots.
|
// Start the CaptureHistoric function, and check for the correct inserted slots.
|
||||||
func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
go bc.CaptureHistoric(ctx, maxWorkers)
|
go bc.CaptureHistoric(ctx, maxWorkers, 0)
|
||||||
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
|
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
|
||||||
log.Debug("Calling the stop function for historical processing..")
|
log.Debug("Calling the stop function for historical processing..")
|
||||||
err := bc.StopHistoric(cancel)
|
err := bc.StopHistoric(cancel)
|
||||||
@ -217,7 +217,7 @@ func (tbc TestBeaconNode) runHistoricalProcess(bc *beaconclient.BeaconClient, ma
|
|||||||
// Wrapper function that processes knownGaps
|
// Wrapper function that processes knownGaps
|
||||||
func (tbc TestBeaconNode) runKnownGapsProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
func (tbc TestBeaconNode) runKnownGapsProcess(bc *beaconclient.BeaconClient, maxWorkers int, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError uint64) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
go bc.ProcessKnownGaps(ctx, maxWorkers)
|
go bc.ProcessKnownGaps(ctx, maxWorkers, 0)
|
||||||
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
|
validateMetrics(bc, expectedInserts, expectedReorgs, expectedKnownGaps, expectedKnownGapsReprocessError)
|
||||||
err := bc.StopKnownGapsProcessing(cancel)
|
err := bc.StopKnownGapsProcessing(cancel)
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
|
@ -33,11 +33,11 @@ import (
|
|||||||
var (
|
var (
|
||||||
// Get a single highest priority and non-checked out row row from eth_beacon.historical_process
|
// Get a single highest priority and non-checked out row row from eth_beacon.historical_process
|
||||||
getHpEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.historic_process
|
getHpEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.historic_process
|
||||||
WHERE checked_out=false
|
WHERE checked_out=false AND end_slot >= $1
|
||||||
ORDER BY priority ASC
|
ORDER BY priority ASC
|
||||||
LIMIT 1;`
|
LIMIT 1;`
|
||||||
// Used to periodically check to see if there is a new entry in the eth_beacon.historic_process table.
|
// Used to periodically check to see if there is a new entry in the eth_beacon.historic_process table.
|
||||||
checkHpEntryStmt string = `SELECT * FROM eth_beacon.historic_process WHERE checked_out=false;`
|
checkHpEntryStmt string = `SELECT * FROM eth_beacon.historic_process WHERE checked_out=false AND end_slot >= $1;`
|
||||||
// Used to checkout a row from the eth_beacon.historic_process table
|
// Used to checkout a row from the eth_beacon.historic_process table
|
||||||
lockHpEntryStmt string = `UPDATE eth_beacon.historic_process
|
lockHpEntryStmt string = `UPDATE eth_beacon.historic_process
|
||||||
SET checked_out=true, checked_out_by=$3
|
SET checked_out=true, checked_out_by=$3
|
||||||
@ -58,8 +58,8 @@ type HistoricProcessing struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get a single row of historical slots from the table.
|
// Get a single row of historical slots from the table.
|
||||||
func (hp HistoricProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess) []error {
|
func (hp HistoricProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess, minimumSlot uint64) []error {
|
||||||
return getBatchProcessRow(ctx, hp.db, getHpEntryStmt, checkHpEntryStmt, lockHpEntryStmt, slotCh, strconv.Itoa(hp.uniqueNodeIdentifier))
|
return getBatchProcessRow(ctx, hp.db, getHpEntryStmt, checkHpEntryStmt, lockHpEntryStmt, slotCh, strconv.Itoa(hp.uniqueNodeIdentifier), minimumSlot)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the table entry.
|
// Remove the table entry.
|
||||||
@ -123,7 +123,7 @@ func processSlotRangeWorker(ctx context.Context, workCh <-chan uint64, errCh cha
|
|||||||
// It also locks the row by updating the checked_out column.
|
// It also locks the row by updating the checked_out column.
|
||||||
// The statement for getting the start_slot and end_slot must be provided.
|
// The statement for getting the start_slot and end_slot must be provided.
|
||||||
// The statement for "locking" the row must also be provided.
|
// The statement for "locking" the row must also be provided.
|
||||||
func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStmt string, checkNewRowsStmt string, checkOutRowStmt string, slotCh chan<- slotsToProcess, uniqueNodeIdentifier string) []error {
|
func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStmt string, checkNewRowsStmt string, checkOutRowStmt string, slotCh chan<- slotsToProcess, uniqueNodeIdentifier string, minimumSlot uint64) []error {
|
||||||
errCount := make([]error, 0)
|
errCount := make([]error, 0)
|
||||||
|
|
||||||
// 5 is an arbitrary number. It allows us to retry a few times before
|
// 5 is an arbitrary number. It allows us to retry a few times before
|
||||||
@ -139,7 +139,7 @@ func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStm
|
|||||||
"errCount": errCount,
|
"errCount": errCount,
|
||||||
}).Error("New error entry added")
|
}).Error("New error entry added")
|
||||||
}
|
}
|
||||||
processRow, err := db.Exec(context.Background(), checkNewRowsStmt)
|
processRow, err := db.Exec(context.Background(), checkNewRowsStmt, minimumSlot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCount = append(errCount, err)
|
errCount = append(errCount, err)
|
||||||
}
|
}
|
||||||
@ -172,7 +172,7 @@ func getBatchProcessRow(ctx context.Context, db sql.Database, getStartEndSlotStm
|
|||||||
|
|
||||||
// Query the DB for slots.
|
// Query the DB for slots.
|
||||||
sp := slotsToProcess{}
|
sp := slotsToProcess{}
|
||||||
err = tx.QueryRow(dbCtx, getStartEndSlotStmt).Scan(&sp.startSlot, &sp.endSlot)
|
err = tx.QueryRow(dbCtx, getStartEndSlotStmt, minimumSlot).Scan(&sp.startSlot, &sp.endSlot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == pgx.ErrNoRows {
|
if err == pgx.ErrNoRows {
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
@ -30,11 +30,11 @@ import (
|
|||||||
var (
|
var (
|
||||||
// Get a single non-checked out row row from eth_beacon.known_gaps.
|
// Get a single non-checked out row row from eth_beacon.known_gaps.
|
||||||
getKgEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.known_gaps
|
getKgEntryStmt string = `SELECT start_slot, end_slot FROM eth_beacon.known_gaps
|
||||||
WHERE checked_out=false
|
WHERE checked_out=false AND end_slot >= $1
|
||||||
ORDER BY priority ASC
|
ORDER BY priority ASC
|
||||||
LIMIT 1;`
|
LIMIT 1;`
|
||||||
// Used to periodically check to see if there is a new entry in the eth_beacon.known_gaps table.
|
// Used to periodically check to see if there is a new entry in the eth_beacon.known_gaps table.
|
||||||
checkKgEntryStmt string = `SELECT * FROM eth_beacon.known_gaps WHERE checked_out=false;`
|
checkKgEntryStmt string = `SELECT * FROM eth_beacon.known_gaps WHERE checked_out=false AND end_slot >= $1;`
|
||||||
// Used to checkout a row from the eth_beacon.known_gaps table
|
// Used to checkout a row from the eth_beacon.known_gaps table
|
||||||
lockKgEntryStmt string = `UPDATE eth_beacon.known_gaps
|
lockKgEntryStmt string = `UPDATE eth_beacon.known_gaps
|
||||||
SET checked_out=true, checked_out_by=$3
|
SET checked_out=true, checked_out_by=$3
|
||||||
@ -58,10 +58,10 @@ type KnownGapsProcessing struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This function will perform all the heavy lifting for tracking the head of the chain.
|
// This function will perform all the heavy lifting for tracking the head of the chain.
|
||||||
func (bc *BeaconClient) ProcessKnownGaps(ctx context.Context, maxWorkers int) []error {
|
func (bc *BeaconClient) ProcessKnownGaps(ctx context.Context, maxWorkers int, minimumSlot uint64) []error {
|
||||||
log.Info("We are starting the known gaps processing service.")
|
log.Info("We are starting the known gaps processing service.")
|
||||||
bc.KnownGapsProcess = KnownGapsProcessing{db: bc.Db, uniqueNodeIdentifier: bc.UniqueNodeIdentifier, metrics: bc.Metrics}
|
bc.KnownGapsProcess = KnownGapsProcessing{db: bc.Db, uniqueNodeIdentifier: bc.UniqueNodeIdentifier, metrics: bc.Metrics}
|
||||||
errs := handleBatchProcess(ctx, maxWorkers, bc.KnownGapsProcess, bc.SlotProcessingDetails(), bc.Metrics.IncrementKnownGapsProcessed)
|
errs := handleBatchProcess(ctx, maxWorkers, bc.KnownGapsProcess, bc.SlotProcessingDetails(), bc.Metrics.IncrementKnownGapsProcessed, minimumSlot)
|
||||||
log.Debug("Exiting known gaps processing service")
|
log.Debug("Exiting known gaps processing service")
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
@ -78,8 +78,8 @@ func (bc *BeaconClient) StopKnownGapsProcessing(cancel context.CancelFunc) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get a single row of historical slots from the table.
|
// Get a single row of historical slots from the table.
|
||||||
func (kgp KnownGapsProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess) []error {
|
func (kgp KnownGapsProcessing) getSlotRange(ctx context.Context, slotCh chan<- slotsToProcess, minimumSlot uint64) []error {
|
||||||
return getBatchProcessRow(ctx, kgp.db, getKgEntryStmt, checkKgEntryStmt, lockKgEntryStmt, slotCh, strconv.Itoa(kgp.uniqueNodeIdentifier))
|
return getBatchProcessRow(ctx, kgp.db, getKgEntryStmt, checkKgEntryStmt, lockKgEntryStmt, slotCh, strconv.Itoa(kgp.uniqueNodeIdentifier), minimumSlot)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the table entry.
|
// Remove the table entry.
|
||||||
|
Loading…
Reference in New Issue
Block a user