Make pprof configurable, remove todos
This commit is contained in:
parent
b1440d9673
commit
a2772762e1
@ -123,6 +123,9 @@ An example config file:
|
|||||||
# path to custom chain config file (optional)
|
# path to custom chain config file (optional)
|
||||||
# keep chainID same as that in chain config file
|
# keep chainID same as that in chain config file
|
||||||
chainConfig = "./chain.json" # ETH_CHAIN_CONFIG
|
chainConfig = "./chain.json" # ETH_CHAIN_CONFIG
|
||||||
|
|
||||||
|
[debug]
|
||||||
|
pprof = false # Enable pprof agent listener on port 6060
|
||||||
```
|
```
|
||||||
|
|
||||||
### Local Setup
|
### Local Setup
|
||||||
|
@ -82,6 +82,8 @@ const (
|
|||||||
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
|
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
|
||||||
DATABASE_CONN_TIMEOUT = "DATABSE_CONN_TIMEOUT"
|
DATABASE_CONN_TIMEOUT = "DATABSE_CONN_TIMEOUT"
|
||||||
DATABASE_MAX_CONN_IDLE_TIME = "DATABASE_MAX_CONN_IDLE_TIME"
|
DATABASE_MAX_CONN_IDLE_TIME = "DATABASE_MAX_CONN_IDLE_TIME"
|
||||||
|
|
||||||
|
DEBUG_PPROF = "DEBUG_PPROF"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Bind env vars for eth node and DB configuration
|
// Bind env vars for eth node and DB configuration
|
||||||
@ -136,7 +138,7 @@ func init() {
|
|||||||
|
|
||||||
viper.BindEnv("statediff.prerun", STATEDIFF_PRERUN)
|
viper.BindEnv("statediff.prerun", STATEDIFF_PRERUN)
|
||||||
viper.BindEnv("prerun.only", PRERUN_ONLY)
|
viper.BindEnv("prerun.only", PRERUN_ONLY)
|
||||||
viper.BindEnv("prerun.only", PRERUN_PARALLEL)
|
viper.BindEnv("prerun.parallel", PRERUN_PARALLEL)
|
||||||
viper.BindEnv("prerun.start", PRERUN_RANGE_START)
|
viper.BindEnv("prerun.start", PRERUN_RANGE_START)
|
||||||
viper.BindEnv("prerun.stop", PRERUN_RANGE_STOP)
|
viper.BindEnv("prerun.stop", PRERUN_RANGE_STOP)
|
||||||
viper.BindEnv("prerun.params.intermediateStateNodes", PRERUN_INTERMEDIATE_STATE_NODES)
|
viper.BindEnv("prerun.params.intermediateStateNodes", PRERUN_INTERMEDIATE_STATE_NODES)
|
||||||
@ -148,4 +150,6 @@ func init() {
|
|||||||
|
|
||||||
viper.BindEnv("log.level", LOG_LEVEL)
|
viper.BindEnv("log.level", LOG_LEVEL)
|
||||||
viper.BindEnv("log.file", LOG_FILE_PATH)
|
viper.BindEnv("log.file", LOG_FILE_PATH)
|
||||||
|
|
||||||
|
viper.BindEnv("debug.pprof", DEBUG_PPROF)
|
||||||
}
|
}
|
||||||
|
11
cmd/serve.go
11
cmd/serve.go
@ -68,15 +68,18 @@ func serve() {
|
|||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// short circuit if we only want to perform prerun
|
// Enable the pprof agent if configured
|
||||||
if viper.GetBool("prerun.only") {
|
if viper.GetBool("debug.pprof") {
|
||||||
// TODO: make pprof optional
|
|
||||||
// See: https://www.farsightsecurity.com/blog/txt-record/go-remote-profiling-20161028/
|
// See: https://www.farsightsecurity.com/blog/txt-record/go-remote-profiling-20161028/
|
||||||
// Do not use the default http multiplexor elsewhere in this process.
|
// For security reasons: do not use the default http multiplexor elsewhere in this process.
|
||||||
go func() {
|
go func() {
|
||||||
logWithCommand.Info("Starting pprof listener on port 6060")
|
logWithCommand.Info("Starting pprof listener on port 6060")
|
||||||
logWithCommand.Fatal(http.ListenAndServe("localhost:6060", nil))
|
logWithCommand.Fatal(http.ListenAndServe("localhost:6060", nil))
|
||||||
}()
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// short circuit if we only want to perform prerun
|
||||||
|
if viper.GetBool("prerun.only") {
|
||||||
parallel := viper.GetBool("prerun.parallel")
|
parallel := viper.GetBool("prerun.parallel")
|
||||||
if err := statediffService.Run(nil, parallel); err != nil {
|
if err := statediffService.Run(nil, parallel); err != nil {
|
||||||
logWithCommand.Fatal("unable to perform prerun: %v", err)
|
logWithCommand.Fatal("unable to perform prerun: %v", err)
|
||||||
|
@ -133,7 +133,6 @@ func (sds *Service) Run(rngs []RangeRequest, parallel bool) error {
|
|||||||
wg := new(sync.WaitGroup)
|
wg := new(sync.WaitGroup)
|
||||||
for i := 0; i < int(sds.workers); i++ {
|
for i := 0; i < int(sds.workers); i++ {
|
||||||
blockRange := RangeRequest{
|
blockRange := RangeRequest{
|
||||||
// TODO(dboreham): check this math doesn't leave gaps (are start/stop inclusive?)
|
|
||||||
Start: preRun.Start + uint64(i)*chunkSize,
|
Start: preRun.Start + uint64(i)*chunkSize,
|
||||||
Stop: preRun.Start + uint64(i)*chunkSize + chunkSize - 1,
|
Stop: preRun.Start + uint64(i)*chunkSize + chunkSize - 1,
|
||||||
Params: preRun.Params,
|
Params: preRun.Params,
|
||||||
@ -165,7 +164,7 @@ func (sds *Service) Run(rngs []RangeRequest, parallel bool) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
sds.preruns = nil
|
sds.preruns = nil
|
||||||
// TODO(dboreham): seems like this code is never called so we have not written the parallel version
|
// At present this code is never called so we have not written the parallel version:
|
||||||
for _, rng := range rngs {
|
for _, rng := range rngs {
|
||||||
logrus.Infof("processing requested range (%d, %d)", rng.Start, rng.Stop)
|
logrus.Infof("processing requested range (%d, %d)", rng.Start, rng.Stop)
|
||||||
for i := rng.Start; i <= rng.Stop; i++ {
|
for i := rng.Start; i <= rng.Stop; i++ {
|
||||||
|
Loading…
Reference in New Issue
Block a user