plugeth/metrics/metrics.go

251 lines
9.2 KiB
Go
Raw Normal View History

// Go port of Coda Hale's Metrics library
2015-07-07 00:54:22 +00:00
//
// <https://github.com/rcrowley/go-metrics>
2015-07-07 00:54:22 +00:00
//
// Coda Hale's original work: <https://github.com/codahale/metrics>
package metrics
import (
"os"
"runtime/metrics"
"runtime/pprof"
"strconv"
"strings"
"syscall"
"time"
"github.com/ethereum/go-ethereum/log"
)
// Enabled is checked by the constructor functions for all of the
// standard metrics. If it is true, the metric returned is a stub.
//
// This global kill-switch helps quantify the observer effect and makes
// for less cluttered pprof profiles.
var Enabled = false
// EnabledExpensive is a soft-flag meant for external packages to check if costly
// metrics gathering is allowed or not. The goal is to separate standard metrics
// for health monitoring and debug metrics that might impact runtime performance.
var EnabledExpensive = false
// enablerFlags is the CLI flag names to use to enable metrics collections.
var enablerFlags = []string{"metrics"}
// enablerEnvVars is the env var names to use to enable metrics collections.
var enablerEnvVars = []string{"GETH_METRICS"}
// expensiveEnablerFlags is the CLI flag names to use to enable metrics collections.
var expensiveEnablerFlags = []string{"metrics.expensive"}
// expensiveEnablerEnvVars is the env var names to use to enable metrics collections.
var expensiveEnablerEnvVars = []string{"GETH_METRICS_EXPENSIVE"}
// Init enables or disables the metrics system. Since we need this to run before
// any other code gets to create meters and timers, we'll actually do an ugly hack
// and peek into the command line args for the metrics flag.
func init() {
for _, enabler := range enablerEnvVars {
if val, found := syscall.Getenv(enabler); found && !Enabled {
if enable, _ := strconv.ParseBool(val); enable { // ignore error, flag parser will choke on it later
log.Info("Enabling metrics collection")
Enabled = true
}
}
}
for _, enabler := range expensiveEnablerEnvVars {
if val, found := syscall.Getenv(enabler); found && !EnabledExpensive {
if enable, _ := strconv.ParseBool(val); enable { // ignore error, flag parser will choke on it later
log.Info("Enabling expensive metrics collection")
EnabledExpensive = true
}
}
}
for _, arg := range os.Args {
flag := strings.TrimLeft(arg, "-")
for _, enabler := range enablerFlags {
if !Enabled && flag == enabler {
log.Info("Enabling metrics collection")
Enabled = true
}
}
for _, enabler := range expensiveEnablerFlags {
if !EnabledExpensive && flag == enabler {
log.Info("Enabling expensive metrics collection")
EnabledExpensive = true
}
}
}
}
var threadCreateProfile = pprof.Lookup("threadcreate")
type runtimeStats struct {
GCPauses *metrics.Float64Histogram
GCAllocBytes uint64
GCFreedBytes uint64
MemTotal uint64
HeapObjects uint64
HeapFree uint64
HeapReleased uint64
HeapUnused uint64
Goroutines uint64
SchedLatency *metrics.Float64Histogram
}
var runtimeSamples = []metrics.Sample{
{Name: "/gc/pauses:seconds"}, // histogram
{Name: "/gc/heap/allocs:bytes"},
{Name: "/gc/heap/frees:bytes"},
{Name: "/memory/classes/total:bytes"},
{Name: "/memory/classes/heap/objects:bytes"},
{Name: "/memory/classes/heap/free:bytes"},
{Name: "/memory/classes/heap/released:bytes"},
{Name: "/memory/classes/heap/unused:bytes"},
{Name: "/sched/goroutines:goroutines"},
{Name: "/sched/latencies:seconds"}, // histogram
}
metrics: refactor metrics (#28035) This change includes a lot of things, listed below. ### Split up interfaces, write vs read The interfaces have been split up into one write-interface and one read-interface, with `Snapshot` being the gateway from write to read. This simplifies the semantics _a lot_. Example of splitting up an interface into one readonly 'snapshot' part, and one updatable writeonly part: ```golang type MeterSnapshot interface { Count() int64 Rate1() float64 Rate5() float64 Rate15() float64 RateMean() float64 } // Meters count events to produce exponentially-weighted moving average rates // at one-, five-, and fifteen-minutes and a mean rate. type Meter interface { Mark(int64) Snapshot() MeterSnapshot Stop() } ``` ### A note about concurrency This PR makes the concurrency model clearer. We have actual meters and snapshot of meters. The `meter` is the thing which can be accessed from the registry, and updates can be made to it. - For all `meters`, (`Gauge`, `Timer` etc), it is assumed that they are accessed by different threads, making updates. Therefore, all `meters` update-methods (`Inc`, `Add`, `Update`, `Clear` etc) need to be concurrency-safe. - All `meters` have a `Snapshot()` method. This method is _usually_ called from one thread, a backend-exporter. But it's fully possible to have several exporters simultaneously: therefore this method should also be concurrency-safe. TLDR: `meter`s are accessible via registry, all their methods must be concurrency-safe. For all `Snapshot`s, it is assumed that an individual exporter-thread has obtained a `meter` from the registry, and called the `Snapshot` method to obtain a readonly snapshot. This snapshot is _not_ guaranteed to be concurrency-safe. There's no need for a snapshot to be concurrency-safe, since exporters should not share snapshots. Note, though: that by happenstance a lot of the snapshots _are_ concurrency-safe, being unmutable minimal representations of a value. Only the more complex ones are _not_ threadsafe, those that lazily calculate things like `Variance()`, `Mean()`. Example of how a background exporter typically works, obtaining the snapshot and sequentially accessing the non-threadsafe methods in it: ```golang ms := metric.Snapshot() ... fields := map[string]interface{}{ "count": ms.Count(), "max": ms.Max(), "mean": ms.Mean(), "min": ms.Min(), "stddev": ms.StdDev(), "variance": ms.Variance(), ``` TLDR: `snapshots` are not guaranteed to be concurrency-safe (but often are). ### Sample changes I also changed the `Sample` type: previously, it iterated the samples fully every time `Mean()`,`Sum()`, `Min()` or `Max()` was invoked. Since we now have readonly base data, we can just iterate it once, in the constructor, and set all four values at once. The same thing has been done for runtimehistogram. ### ResettingTimer API Back when ResettingTImer was implemented, as part of https://github.com/ethereum/go-ethereum/pull/15910, Anton implemented a `Percentiles` on the new type. However, the method did not conform to the other existing types which also had a `Percentiles`. 1. The existing ones, on input, took `0.5` to mean `50%`. Anton used `50` to mean `50%`. 2. The existing ones returned `float64` outputs, thus interpolating between values. A value-set of `0, 10`, at `50%` would return `5`, whereas Anton's would return either `0` or `10`. This PR removes the 'new' version, and uses only the 'legacy' percentiles, also for the ResettingTimer type. The resetting timer snapshot was also defined so that it would expose the internal values. This has been removed, and getters for `Max, Min, Mean` have been added instead. ### Unexport types A lot of types were exported, but do not need to be. This PR unexports quite a lot of them.
2023-09-13 17:13:47 +00:00
func ReadRuntimeStats() *runtimeStats {
r := new(runtimeStats)
readRuntimeStats(r)
return r
}
func readRuntimeStats(v *runtimeStats) {
metrics.Read(runtimeSamples)
for _, s := range runtimeSamples {
// Skip invalid/unknown metrics. This is needed because some metrics
// are unavailable in older Go versions, and attempting to read a 'bad'
// metric panics.
if s.Value.Kind() == metrics.KindBad {
continue
}
switch s.Name {
case "/gc/pauses:seconds":
v.GCPauses = s.Value.Float64Histogram()
case "/gc/heap/allocs:bytes":
v.GCAllocBytes = s.Value.Uint64()
case "/gc/heap/frees:bytes":
v.GCFreedBytes = s.Value.Uint64()
case "/memory/classes/total:bytes":
v.MemTotal = s.Value.Uint64()
case "/memory/classes/heap/objects:bytes":
v.HeapObjects = s.Value.Uint64()
case "/memory/classes/heap/free:bytes":
v.HeapFree = s.Value.Uint64()
case "/memory/classes/heap/released:bytes":
v.HeapReleased = s.Value.Uint64()
case "/memory/classes/heap/unused:bytes":
v.HeapUnused = s.Value.Uint64()
case "/sched/goroutines:goroutines":
v.Goroutines = s.Value.Uint64()
case "/sched/latencies:seconds":
v.SchedLatency = s.Value.Float64Histogram()
}
}
}
// CollectProcessMetrics periodically collects various metrics about the running process.
func CollectProcessMetrics(refresh time.Duration) {
// Short circuit if the metrics system is disabled
if !Enabled {
return
}
// Create the various data collectors
var (
cpustats = make([]CPUStats, 2)
diskstats = make([]DiskStats, 2)
rstats = make([]runtimeStats, 2)
)
// This scale factor is used for the runtime's time metrics. It's useful to convert to
// ns here because the runtime gives times in float seconds, but runtimeHistogram can
// only provide integers for the minimum and maximum values.
const secondsToNs = float64(time.Second)
// Define the various metrics to collect
var (
cpuSysLoad = GetOrRegisterGauge("system/cpu/sysload", DefaultRegistry)
cpuSysWait = GetOrRegisterGauge("system/cpu/syswait", DefaultRegistry)
cpuProcLoad = GetOrRegisterGauge("system/cpu/procload", DefaultRegistry)
cpuSysLoadTotal = GetOrRegisterCounterFloat64("system/cpu/sysload/total", DefaultRegistry)
cpuSysWaitTotal = GetOrRegisterCounterFloat64("system/cpu/syswait/total", DefaultRegistry)
cpuProcLoadTotal = GetOrRegisterCounterFloat64("system/cpu/procload/total", DefaultRegistry)
cpuThreads = GetOrRegisterGauge("system/cpu/threads", DefaultRegistry)
cpuGoroutines = GetOrRegisterGauge("system/cpu/goroutines", DefaultRegistry)
cpuSchedLatency = getOrRegisterRuntimeHistogram("system/cpu/schedlatency", secondsToNs, nil)
memPauses = getOrRegisterRuntimeHistogram("system/memory/pauses", secondsToNs, nil)
memAllocs = GetOrRegisterMeter("system/memory/allocs", DefaultRegistry)
memFrees = GetOrRegisterMeter("system/memory/frees", DefaultRegistry)
memTotal = GetOrRegisterGauge("system/memory/held", DefaultRegistry)
heapUsed = GetOrRegisterGauge("system/memory/used", DefaultRegistry)
heapObjects = GetOrRegisterGauge("system/memory/objects", DefaultRegistry)
diskReads = GetOrRegisterMeter("system/disk/readcount", DefaultRegistry)
diskReadBytes = GetOrRegisterMeter("system/disk/readdata", DefaultRegistry)
diskReadBytesCounter = GetOrRegisterCounter("system/disk/readbytes", DefaultRegistry)
diskWrites = GetOrRegisterMeter("system/disk/writecount", DefaultRegistry)
diskWriteBytes = GetOrRegisterMeter("system/disk/writedata", DefaultRegistry)
diskWriteBytesCounter = GetOrRegisterCounter("system/disk/writebytes", DefaultRegistry)
)
var lastCollectTime time.Time
// Iterate loading the different stats and updating the meters.
now, prev := 0, 1
for ; ; now, prev = prev, now {
// Gather CPU times.
ReadCPUStats(&cpustats[now])
collectTime := time.Now()
secondsSinceLastCollect := collectTime.Sub(lastCollectTime).Seconds()
lastCollectTime = collectTime
if secondsSinceLastCollect > 0 {
sysLoad := cpustats[now].GlobalTime - cpustats[prev].GlobalTime
sysWait := cpustats[now].GlobalWait - cpustats[prev].GlobalWait
procLoad := cpustats[now].LocalTime - cpustats[prev].LocalTime
// Convert to integer percentage.
cpuSysLoad.Update(int64(sysLoad / secondsSinceLastCollect * 100))
cpuSysWait.Update(int64(sysWait / secondsSinceLastCollect * 100))
cpuProcLoad.Update(int64(procLoad / secondsSinceLastCollect * 100))
// increment counters (ms)
cpuSysLoadTotal.Inc(sysLoad)
cpuSysWaitTotal.Inc(sysWait)
cpuProcLoadTotal.Inc(procLoad)
}
// Threads
cpuThreads.Update(int64(threadCreateProfile.Count()))
// Go runtime metrics
readRuntimeStats(&rstats[now])
cpuGoroutines.Update(int64(rstats[now].Goroutines))
cpuSchedLatency.update(rstats[now].SchedLatency)
memPauses.update(rstats[now].GCPauses)
memAllocs.Mark(int64(rstats[now].GCAllocBytes - rstats[prev].GCAllocBytes))
memFrees.Mark(int64(rstats[now].GCFreedBytes - rstats[prev].GCFreedBytes))
memTotal.Update(int64(rstats[now].MemTotal))
heapUsed.Update(int64(rstats[now].MemTotal - rstats[now].HeapUnused - rstats[now].HeapFree - rstats[now].HeapReleased))
heapObjects.Update(int64(rstats[now].HeapObjects))
// Disk
if ReadDiskStats(&diskstats[now]) == nil {
diskReads.Mark(diskstats[now].ReadCount - diskstats[prev].ReadCount)
diskReadBytes.Mark(diskstats[now].ReadBytes - diskstats[prev].ReadBytes)
diskWrites.Mark(diskstats[now].WriteCount - diskstats[prev].WriteCount)
diskWriteBytes.Mark(diskstats[now].WriteBytes - diskstats[prev].WriteBytes)
diskReadBytesCounter.Inc(diskstats[now].ReadBytes - diskstats[prev].ReadBytes)
diskWriteBytesCounter.Inc(diskstats[now].WriteBytes - diskstats[prev].WriteBytes)
}
time.Sleep(refresh)
}
}