cmd/geth, eth/fetcher: polish metrics reporting, add some more

This commit is contained in:
Péter Szilágyi 2015-06-21 20:23:51 +03:00
parent 6994a3daaa
commit b426301467
2 changed files with 60 additions and 16 deletions

View File

@ -6,10 +6,9 @@ import (
"fmt"
"math/big"
"strconv"
"strings"
"time"
"github.com/rcrowley/go-metrics"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/cmd/utils"
@ -25,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/xeth"
"github.com/rcrowley/go-metrics"
"github.com/robertkrimen/otto"
"gopkg.in/fatih/set.v0"
)
@ -723,17 +723,56 @@ func (js *jsre) waitForBlocks(call otto.FunctionCall) otto.Value {
}
func (js *jsre) metrics(call otto.FunctionCall) otto.Value {
// Create a rate formatter
units := []string{"", "K", "M", "G", "T", "E", "P"}
round := func(value float64, prec int) string {
unit := 0
for value >= 1000 {
unit, value, prec = unit+1, value/1000, 2
}
return fmt.Sprintf(fmt.Sprintf("%%.%df%s", prec, units[unit]), value)
}
format := func(total float64, rate float64) string {
return fmt.Sprintf("%s (%s/s)", round(total, 0), round(rate, 2))
}
// Iterate over all the metrics, and just dump for now
counters := make(map[string]interface{})
metrics.DefaultRegistry.Each(func(name string, metric interface{}) {
// Create or retrieve the counter hierarchy for this metric
root, parts := counters, strings.Split(name, "/")
for _, part := range parts[:len(parts)-1] {
if _, ok := root[part]; !ok {
root[part] = make(map[string]interface{})
}
root = root[part].(map[string]interface{})
}
name = parts[len(parts)-1]
// Fill the counter with the metric details
switch metric := metric.(type) {
case metrics.Meter:
counters[name+"( 1 min)"] = int(metric.Rate1() * 60)
counters[name+"( 5 min)"] = int(metric.Rate5() * 300)
counters[name+"(15 min)"] = int(metric.Rate15() * 900)
root[name] = map[string]interface{}{
"Avg01Min": format(metric.Rate1()*60, metric.Rate1()),
"Avg05Min": format(metric.Rate5()*300, metric.Rate5()),
"Avg15Min": format(metric.Rate15()*900, metric.Rate15()),
"Overall": format(float64(metric.Count()), metric.RateMean()),
}
case metrics.Timer:
root[name] = map[string]interface{}{
"Avg01Min": format(metric.Rate1()*60, metric.Rate1()),
"Avg05Min": format(metric.Rate5()*300, metric.Rate5()),
"Avg15Min": format(metric.Rate15()*900, metric.Rate15()),
"Overall": format(float64(metric.Count()), metric.RateMean()),
"Perc01": round(metric.Percentile(1), 2),
"Perc05": round(metric.Percentile(5), 2),
"Perc25": round(metric.Percentile(25), 2),
"Perc90": round(metric.Percentile(90), 2),
}
default:
counters[name] = "Unknown metric type"
root[name] = "Unknown metric type"
}
})
// Flatten the counters into some metrics and return

View File

@ -7,12 +7,11 @@ import (
"math/rand"
"time"
"github.com/rcrowley/go-metrics"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/rcrowley/go-metrics"
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
)
@ -100,9 +99,11 @@ type Fetcher struct {
importedHook func(*types.Block) // Method to call upon successful block import
// Runtime metrics
announceStats metrics.Meter
broadcastStats metrics.Meter
discardStats metrics.Meter
announceMeter metrics.Meter // Counter for metering the inbound announcements
announceTimer metrics.Timer // Counter and timer for metering the announce forwarding
broadcastMeter metrics.Meter // Counter for metering the inbound propagations
broadcastTimer metrics.Timer // Counter and timer for metering the block forwarding
discardMeter metrics.Meter // Counter for metering the discarded blocks
}
// New creates a block fetcher to retrieve blocks based on hash announcements.
@ -125,9 +126,11 @@ func New(getBlock blockRetrievalFn, validateBlock blockValidatorFn, broadcastBlo
chainHeight: chainHeight,
insertChain: insertChain,
dropPeer: dropPeer,
announceStats: metrics.GetOrRegisterMeter("eth/Announced Blocks", metrics.DefaultRegistry),
broadcastStats: metrics.GetOrRegisterMeter("eth/Propagated Blocks", metrics.DefaultRegistry),
discardStats: metrics.GetOrRegisterMeter("eth/Discarded Blocks", metrics.DefaultRegistry),
announceMeter: metrics.GetOrRegisterMeter("eth/RemoteAnnounces", metrics.DefaultRegistry),
announceTimer: metrics.GetOrRegisterTimer("eth/LocalAnnounces", metrics.DefaultRegistry),
broadcastMeter: metrics.GetOrRegisterMeter("eth/RemoteBroadcasts", metrics.DefaultRegistry),
broadcastTimer: metrics.GetOrRegisterTimer("eth/LocalBroadcasts", metrics.DefaultRegistry),
discardMeter: metrics.GetOrRegisterMeter("eth/DiscardedBlocks", metrics.DefaultRegistry),
}
}
@ -239,7 +242,7 @@ func (f *Fetcher) loop() {
case notification := <-f.notify:
// A block was announced, make sure the peer isn't DOSing us
f.announceStats.Mark(1)
f.announceMeter.Mark(1)
count := f.announces[notification.origin] + 1
if count > hashLimit {
@ -258,7 +261,7 @@ func (f *Fetcher) loop() {
case op := <-f.inject:
// A direct block insertion was requested, try and fill any pending gaps
f.broadcastStats.Mark(1)
f.broadcastMeter.Mark(1)
f.enqueue(op.origin, op.block)
case hash := <-f.done:
@ -418,6 +421,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
f.dropPeer(peer)
return
}
f.broadcastTimer.UpdateSince(block.ReceivedAt)
go f.broadcastBlock(block, true)
// Run the actual import and log any issues
@ -426,6 +430,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
return
}
// If import succeeded, broadcast the block
f.announceTimer.UpdateSince(block.ReceivedAt)
go f.broadcastBlock(block, false)
// Invoke the testing hook if needed