Capture the unique identifier everywhere its needed. #51
@ -49,7 +49,7 @@ func bootApp() {
|
|||||||
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
viper.GetString("bc.address"), viper.GetInt("bc.port"), viper.GetString("bc.connectionProtocol"), viper.GetString("bc.type"), viper.GetInt("bc.bootRetryInterval"), viper.GetInt("bc.bootMaxRetry"),
|
||||||
viper.GetInt("kg.increment"), "boot", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"))
|
viper.GetInt("kg.increment"), "boot", viper.GetBool("t.skipSync"), viper.GetInt("bc.uniqueNodeIdentifier"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogError(err).Error("Unable to Start application")
|
StopApplicationPreBoot(err, Db)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Boot complete, we are going to shutdown.")
|
log.Info("Boot complete, we are going to shutdown.")
|
||||||
|
@ -125,6 +125,8 @@ func init() {
|
|||||||
exitErr(err)
|
exitErr(err)
|
||||||
err = viper.BindPFlag("db.name", captureCmd.PersistentFlags().Lookup("db.name"))
|
err = viper.BindPFlag("db.name", captureCmd.PersistentFlags().Lookup("db.name"))
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
|
err = viper.BindPFlag("db.driver", captureCmd.PersistentFlags().Lookup("db.driver"))
|
||||||
|
exitErr(err)
|
||||||
|
|
||||||
//// Testing Specific
|
//// Testing Specific
|
||||||
err = viper.BindPFlag("t.skipSync", captureCmd.PersistentFlags().Lookup("t.skipSync"))
|
err = viper.BindPFlag("t.skipSync", captureCmd.PersistentFlags().Lookup("t.skipSync"))
|
||||||
|
@ -94,6 +94,7 @@ func init() {
|
|||||||
captureCmd.AddCommand(headCmd)
|
captureCmd.AddCommand(headCmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start prometheus server
|
||||||
func serveProm(addr string) {
|
func serveProm(addr string) {
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
mux.Handle("/metrics", promhttp.Handler())
|
mux.Handle("/metrics", promhttp.Handler())
|
||||||
|
@ -49,6 +49,12 @@ func ShutdownHeadTracking(ctx context.Context, notifierCh chan os.Signal, waitTi
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking")
|
loghelper.LogError(err).Error("Unable to trigger shutdown of head tracking")
|
||||||
}
|
}
|
||||||
|
if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) {
|
||||||
|
err = BC.StopKnownGapsProcessing()
|
||||||
|
if err != nil {
|
||||||
|
loghelper.LogError(err).Error("Unable to stop processing known gaps")
|
||||||
|
}
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
@ -64,10 +70,12 @@ func ShutdownHistoricProcessing(ctx context.Context, notifierCh chan os.Signal,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogError(err).Error("Unable to stop processing historic")
|
loghelper.LogError(err).Error("Unable to stop processing historic")
|
||||||
}
|
}
|
||||||
|
if BC.KnownGapsProcess != (beaconclient.KnownGapsProcessing{}) {
|
||||||
err = BC.StopKnownGapsProcessing()
|
err = BC.StopKnownGapsProcessing()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
loghelper.LogError(err).Error("Unable to stop processing known gaps")
|
loghelper.LogError(err).Error("Unable to stop processing known gaps")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -50,6 +50,7 @@ type BeaconClient struct {
|
|||||||
Metrics *BeaconClientMetrics // An object used to keep track of certain BeaconClient Metrics.
|
Metrics *BeaconClientMetrics // An object used to keep track of certain BeaconClient Metrics.
|
||||||
KnownGapTableIncrement int // The max number of slots within a single known_gaps table entry.
|
KnownGapTableIncrement int // The max number of slots within a single known_gaps table entry.
|
||||||
UniqueNodeIdentifier int // The unique identifier within the cluster of this individual node.
|
UniqueNodeIdentifier int // The unique identifier within the cluster of this individual node.
|
||||||
|
KnownGapsProcess KnownGapsProcessing // object keeping track of knowngaps processing
|
||||||
|
|
||||||
// Used for Head Tracking
|
// Used for Head Tracking
|
||||||
|
|
||||||
@ -68,8 +69,7 @@ type BeaconClient struct {
|
|||||||
// This value is lazily updated. Therefore at times it will be outdated.
|
// This value is lazily updated. Therefore at times it will be outdated.
|
||||||
LatestSlotInBeaconServer int64
|
LatestSlotInBeaconServer int64
|
||||||
PerformHistoricalProcessing bool // Should we perform historical processing?
|
PerformHistoricalProcessing bool // Should we perform historical processing?
|
||||||
HistoricalProcess historicProcessing
|
HistoricalProcess historicProcessing // object keeping track of historical processing
|
||||||
KnownGapsProcess knownGapsProcessing
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A struct to keep track of relevant the head event topic.
|
// A struct to keep track of relevant the head event topic.
|
||||||
|
@ -50,7 +50,7 @@ var (
|
|||||||
WHERE checked_out_by=$1`
|
WHERE checked_out_by=$1`
|
||||||
)
|
)
|
||||||
|
|
||||||
type knownGapsProcessing struct {
|
type KnownGapsProcessing struct {
|
||||||
db sql.Database //db connection
|
db sql.Database //db connection
|
||||||
metrics *BeaconClientMetrics // metrics for beaconclient
|
metrics *BeaconClientMetrics // metrics for beaconclient
|
||||||
uniqueNodeIdentifier int // node unique identifier.
|
uniqueNodeIdentifier int // node unique identifier.
|
||||||
@ -60,7 +60,7 @@ type knownGapsProcessing struct {
|
|||||||
// This function will perform all the heavy lifting for tracking the head of the chain.
|
// This function will perform all the heavy lifting for tracking the head of the chain.
|
||||||
func (bc *BeaconClient) ProcessKnownGaps(maxWorkers int) []error {
|
func (bc *BeaconClient) ProcessKnownGaps(maxWorkers int) []error {
|
||||||
log.Info("We are starting the known gaps processing service.")
|
log.Info("We are starting the known gaps processing service.")
|
||||||
bc.KnownGapsProcess = knownGapsProcessing{db: bc.Db, uniqueNodeIdentifier: bc.UniqueNodeIdentifier, metrics: bc.Metrics, finishProcessing: make(chan int)}
|
bc.KnownGapsProcess = KnownGapsProcessing{db: bc.Db, uniqueNodeIdentifier: bc.UniqueNodeIdentifier, metrics: bc.Metrics, finishProcessing: make(chan int)}
|
||||||
errs := handleBatchProcess(maxWorkers, bc.KnownGapsProcess, bc.KnownGapsProcess.finishProcessing, bc.KnownGapsProcess.db, bc.ServerEndpoint, bc.Metrics)
|
errs := handleBatchProcess(maxWorkers, bc.KnownGapsProcess, bc.KnownGapsProcess.finishProcessing, bc.KnownGapsProcess.db, bc.ServerEndpoint, bc.Metrics)
|
||||||
log.Debug("Exiting known gaps processing service")
|
log.Debug("Exiting known gaps processing service")
|
||||||
return errs
|
return errs
|
||||||
@ -77,17 +77,17 @@ func (bc *BeaconClient) StopKnownGapsProcessing() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get a single row of historical slots from the table.
|
// Get a single row of historical slots from the table.
|
||||||
func (kgp knownGapsProcessing) getSlotRange(slotCh chan<- slotsToProcess) []error {
|
func (kgp KnownGapsProcessing) getSlotRange(slotCh chan<- slotsToProcess) []error {
|
||||||
return getBatchProcessRow(kgp.db, getKgEntryStmt, checkKgEntryStmt, lockKgEntryStmt, slotCh, strconv.Itoa(kgp.uniqueNodeIdentifier))
|
return getBatchProcessRow(kgp.db, getKgEntryStmt, checkKgEntryStmt, lockKgEntryStmt, slotCh, strconv.Itoa(kgp.uniqueNodeIdentifier))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the table entry.
|
// Remove the table entry.
|
||||||
func (kgp knownGapsProcessing) removeTableEntry(processCh <-chan slotsToProcess) error {
|
func (kgp KnownGapsProcessing) removeTableEntry(processCh <-chan slotsToProcess) error {
|
||||||
return removeRowPostProcess(kgp.db, processCh, QueryBySlotStmt, deleteKgEntryStmt)
|
return removeRowPostProcess(kgp.db, processCh, QueryBySlotStmt, deleteKgEntryStmt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the table entry.
|
// Remove the table entry.
|
||||||
func (kgp knownGapsProcessing) handleProcessingErrors(errMessages <-chan batchHistoricError) {
|
func (kgp KnownGapsProcessing) handleProcessingErrors(errMessages <-chan batchHistoricError) {
|
||||||
for {
|
for {
|
||||||
errMs := <-errMessages
|
errMs := <-errMessages
|
||||||
|
|
||||||
@ -117,7 +117,7 @@ func (kgp knownGapsProcessing) handleProcessingErrors(errMessages <-chan batchHi
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Updated checked_out column for the uniqueNodeIdentifier.
|
// Updated checked_out column for the uniqueNodeIdentifier.
|
||||||
func (kgp knownGapsProcessing) releaseDbLocks() error {
|
func (kgp KnownGapsProcessing) releaseDbLocks() error {
|
||||||
go func() { kgp.finishProcessing <- 1 }()
|
go func() { kgp.finishProcessing <- 1 }()
|
||||||
res, err := kgp.db.Exec(context.Background(), releaseKgLockStmt, kgp.uniqueNodeIdentifier)
|
res, err := kgp.db.Exec(context.Background(), releaseKgLockStmt, kgp.uniqueNodeIdentifier)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -49,7 +49,7 @@ func SetupPostgresDb(dbHostname string, dbPort int, dbName string, dbUsername st
|
|||||||
"driver_name_provided": driverName,
|
"driver_name_provided": driverName,
|
||||||
}).Error("Can't resolve driver type")
|
}).Error("Can't resolve driver type")
|
||||||
}
|
}
|
||||||
log.Info("Using Driver:", DbDriver)
|
log.Info("Using Driver: ", DbDriver)
|
||||||
|
|
||||||
postgresConfig := Config{
|
postgresConfig := Config{
|
||||||
Hostname: dbHostname,
|
Hostname: dbHostname,
|
||||||
|
Loading…
Reference in New Issue
Block a user