piecereader: Add metrics
This commit is contained in:
parent
6c62e6d8e2
commit
326a0356a4
@ -58,6 +58,10 @@ var (
|
||||
ProtocolID, _ = tag.NewKey("proto")
|
||||
Direction, _ = tag.NewKey("direction")
|
||||
UseFD, _ = tag.NewKey("use_fd")
|
||||
|
||||
// piecereader
|
||||
PRReadType, _ = tag.NewKey("pr_type") // seq / rand
|
||||
PRReadSize, _ = tag.NewKey("pr_size") // small / big
|
||||
)
|
||||
|
||||
// Measures
|
||||
@ -148,8 +152,9 @@ var (
|
||||
SchedCycleOpenWindows = stats.Int64("sched/assigner_cycle_open_window", "Number of open windows in scheduling cycles", stats.UnitDimensionless)
|
||||
SchedCycleQueueSize = stats.Int64("sched/assigner_cycle_task_queue_entry", "Number of task queue entries in scheduling cycles", stats.UnitDimensionless)
|
||||
|
||||
DagStorePRInitCount = stats.Int64("dagstore/pr_init_count", "PieceReader init count", stats.UnitDimensionless)
|
||||
DagStorePRBytesRequested = stats.Int64("dagstore/pr_requested_bytes", "PieceReader requested bytes", stats.UnitBytes)
|
||||
DagStorePRInitCount = stats.Int64("dagstore/pr_init_count", "PieceReader init count", stats.UnitDimensionless)
|
||||
DagStorePRBytesRequested = stats.Int64("dagstore/pr_requested_bytes", "PieceReader requested bytes", stats.UnitBytes)
|
||||
|
||||
DagStorePRBytesDiscarded = stats.Int64("dagstore/pr_discarded_bytes", "PieceReader discarded bytes", stats.UnitBytes)
|
||||
DagStorePRDiscardCount = stats.Int64("dagstore/pr_discard_count", "PieceReader discard count", stats.UnitDimensionless)
|
||||
DagStorePRSeekBackCount = stats.Int64("dagstore/pr_seek_back_count", "PieceReader seek back count", stats.UnitDimensionless)
|
||||
@ -157,6 +162,12 @@ var (
|
||||
DagStorePRSeekBackBytes = stats.Int64("dagstore/pr_seek_back_bytes", "PieceReader seek back bytes", stats.UnitBytes)
|
||||
DagStorePRSeekForwardBytes = stats.Int64("dagstore/pr_seek_forward_bytes", "PieceReader seek forward bytes", stats.UnitBytes)
|
||||
|
||||
DagStorePRAtHitBytes = stats.Int64("dagstore/pr_at_hit_bytes", "PieceReader ReadAt bytes from cache", stats.UnitBytes)
|
||||
DagStorePRAtHitCount = stats.Int64("dagstore/pr_at_hit_count", "PieceReader ReadAt from cache hits", stats.UnitDimensionless)
|
||||
DagStorePRAtCacheFillCount = stats.Int64("dagstore/pr_at_cache_fill_count", "PieceReader ReadAt full cache fill count", stats.UnitDimensionless)
|
||||
DagStorePRAtReadBytes = stats.Int64("dagstore/pr_at_read_bytes", "PieceReader ReadAt bytes read from source", stats.UnitBytes) // PRReadSize tag
|
||||
DagStorePRAtReadCount = stats.Int64("dagstore/pr_at_read_count", "PieceReader ReadAt reads from source", stats.UnitDimensionless) // PRReadSize tag
|
||||
|
||||
// splitstore
|
||||
SplitstoreMiss = stats.Int64("splitstore/miss", "Number of misses in hotstre access", stats.UnitDimensionless)
|
||||
SplitstoreCompactionTimeSeconds = stats.Float64("splitstore/compaction_time", "Compaction time in seconds", stats.UnitSeconds)
|
||||
@ -472,6 +483,7 @@ var (
|
||||
DagStorePRBytesRequestedView = &view.View{
|
||||
Measure: DagStorePRBytesRequested,
|
||||
Aggregation: view.Sum(),
|
||||
TagKeys: []tag.Key{PRReadType},
|
||||
}
|
||||
DagStorePRBytesDiscardedView = &view.View{
|
||||
Measure: DagStorePRBytesDiscarded,
|
||||
@ -498,6 +510,29 @@ var (
|
||||
Aggregation: view.Sum(),
|
||||
}
|
||||
|
||||
DagStorePRAtHitBytesView = &view.View{
|
||||
Measure: DagStorePRAtHitBytes,
|
||||
Aggregation: view.Sum(),
|
||||
}
|
||||
DagStorePRAtHitCountView = &view.View{
|
||||
Measure: DagStorePRAtHitCount,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
DagStorePRAtCacheFillCountView = &view.View{
|
||||
Measure: DagStorePRAtCacheFillCount,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
DagStorePRAtReadBytesView = &view.View{
|
||||
Measure: DagStorePRAtReadBytes,
|
||||
Aggregation: view.Sum(),
|
||||
TagKeys: []tag.Key{PRReadSize},
|
||||
}
|
||||
DagStorePRAtReadCountView = &view.View{
|
||||
Measure: DagStorePRAtReadCount,
|
||||
Aggregation: view.Count(),
|
||||
TagKeys: []tag.Key{PRReadSize},
|
||||
}
|
||||
|
||||
// splitstore
|
||||
SplitstoreMissView = &view.View{
|
||||
Measure: SplitstoreMiss,
|
||||
@ -762,6 +797,11 @@ var MinerNodeViews = append([]*view.View{
|
||||
DagStorePRSeekForwardCountView,
|
||||
DagStorePRSeekBackBytesView,
|
||||
DagStorePRSeekForwardBytesView,
|
||||
DagStorePRAtHitBytesView,
|
||||
DagStorePRAtHitCountView,
|
||||
DagStorePRAtCacheFillCountView,
|
||||
DagStorePRAtReadBytesView,
|
||||
DagStorePRAtReadCountView,
|
||||
}, DefaultViews...)
|
||||
|
||||
var GatewayNodeViews = append([]*view.View{
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/ipfs/go-cid"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/tag"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
@ -33,6 +34,9 @@ type pieceReader struct {
|
||||
len abi.UnpaddedPieceSize
|
||||
onClose context.CancelFunc
|
||||
|
||||
seqMCtx context.Context
|
||||
atMCtx context.Context
|
||||
|
||||
closed bool
|
||||
seqAt int64 // next byte to be read by io.Reader
|
||||
|
||||
@ -52,6 +56,9 @@ type pieceReader struct {
|
||||
func (p *pieceReader) init() (_ *pieceReader, err error) {
|
||||
stats.Record(p.ctx, metrics.DagStorePRInitCount.M(1))
|
||||
|
||||
p.seqMCtx, _ = tag.New(p.ctx, tag.Upsert(metrics.PRReadType, "seq"))
|
||||
p.atMCtx, _ = tag.New(p.ctx, tag.Upsert(metrics.PRReadType, "rand"))
|
||||
|
||||
p.remReads, err = lru.New[int64, []byte](100)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -146,7 +153,7 @@ func (p *pieceReader) readSeqReader(b []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
stats.Record(p.ctx, metrics.DagStorePRBytesRequested.M(int64(len(b))))
|
||||
stats.Record(p.seqMCtx, metrics.DagStorePRBytesRequested.M(int64(len(b))))
|
||||
|
||||
// 1. Get the backing reader into the correct position
|
||||
|
||||
@ -164,9 +171,9 @@ func (p *pieceReader) readSeqReader(b []byte) (n int, err error) {
|
||||
log.Debugw("pieceReader new stream", "piece", p.pieceCid, "at", p.rAt, "off", off-p.rAt, "n", len(b))
|
||||
|
||||
if off > p.rAt {
|
||||
stats.Record(p.ctx, metrics.DagStorePRSeekForwardBytes.M(off-p.rAt), metrics.DagStorePRSeekForwardCount.M(1))
|
||||
stats.Record(p.seqMCtx, metrics.DagStorePRSeekForwardBytes.M(off-p.rAt), metrics.DagStorePRSeekForwardCount.M(1))
|
||||
} else {
|
||||
stats.Record(p.ctx, metrics.DagStorePRSeekBackBytes.M(p.rAt-off), metrics.DagStorePRSeekBackCount.M(1))
|
||||
stats.Record(p.seqMCtx, metrics.DagStorePRSeekBackBytes.M(p.rAt-off), metrics.DagStorePRSeekBackCount.M(1))
|
||||
}
|
||||
|
||||
p.rAt = off
|
||||
@ -179,7 +186,7 @@ func (p *pieceReader) readSeqReader(b []byte) (n int, err error) {
|
||||
|
||||
// 2. Check if we need to burn some bytes
|
||||
if off > p.rAt {
|
||||
stats.Record(p.ctx, metrics.DagStorePRBytesDiscarded.M(off-p.rAt), metrics.DagStorePRDiscardCount.M(1))
|
||||
stats.Record(p.seqMCtx, metrics.DagStorePRBytesDiscarded.M(off-p.rAt), metrics.DagStorePRDiscardCount.M(1))
|
||||
|
||||
n, err := io.CopyN(io.Discard, p.br, off-p.rAt)
|
||||
p.rAt += n
|
||||
@ -207,6 +214,10 @@ func (p *pieceReader) readSeqReader(b []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func (p *pieceReader) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
log.Errorw("ReadAt called on pieceReader", "piece", p.pieceCid, "off", off, "len", len(b))
|
||||
|
||||
stats.Record(p.atMCtx, metrics.DagStorePRBytesRequested.M(int64(len(b))))
|
||||
|
||||
var filled int64
|
||||
|
||||
// try to get a buf from lru
|
||||
@ -223,8 +234,13 @@ func (p *pieceReader) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
p.remReads.Remove(off)
|
||||
}
|
||||
}
|
||||
|
||||
stats.Record(p.atMCtx, metrics.DagStorePRAtHitBytes.M(int64(n)), metrics.DagStorePRAtHitCount.M(1))
|
||||
// dagstore/pr_at_hit_bytes, dagstore/pr_at_hit_count
|
||||
}
|
||||
if filled == int64(len(b)) {
|
||||
// dagstore/pr_at_cache_fill_count
|
||||
stats.Record(p.atMCtx, metrics.DagStorePRAtCacheFillCount.M(1))
|
||||
return n, nil
|
||||
}
|
||||
|
||||
@ -241,6 +257,8 @@ func (p *pieceReader) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return int(filled), err
|
||||
}
|
||||
|
||||
_ = stats.RecordWithTags(p.atMCtx, []tag.Mutator{tag.Insert(metrics.PRReadSize, "")}, metrics.DagStorePRAtReadBytes.M(int64(bn)), metrics.DagStorePRAtReadCount.M(1))
|
||||
|
||||
// reslice so that the slice is the data
|
||||
readBuf = readBuf[:bn]
|
||||
|
||||
@ -260,6 +278,8 @@ func (p *pieceReader) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return int(filled), err
|
||||
}
|
||||
filled += int64(bn)
|
||||
|
||||
_ = stats.RecordWithTags(p.atMCtx, []tag.Mutator{tag.Insert(metrics.PRReadSize, "")}, metrics.DagStorePRAtReadBytes.M(int64(bn)), metrics.DagStorePRAtReadCount.M(1))
|
||||
}
|
||||
|
||||
if filled < int64(len(b)) {
|
||||
|
Loading…
Reference in New Issue
Block a user