Tool for measuring partition summaries

This commit is contained in:
zenground0 2023-07-10 15:28:14 -06:00
parent d463abae2f
commit cff7716707

View File

@ -1,14 +1,18 @@
package main package main
import ( import (
"encoding/json"
"fmt" "fmt"
"os"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
) )
@ -17,6 +21,151 @@ var cronWcCmd = &cli.Command{
Description: "cron stats", Description: "cron stats",
Subcommands: []*cli.Command{ Subcommands: []*cli.Command{
minerDeadlineCronCountCmd, minerDeadlineCronCountCmd,
minerDeadlinePartitionMeasurementCmd,
},
}
type DeadlineRef struct {
Addr address.Address
Height abi.ChainEpoch
}
type DeadlineSummary struct {
Partitions []PartitionSummary
}
type PartitionSummary struct {
Live int
Dead int
Diff PartitionDiff
}
type PartitionDiff struct {
Faulted int
Recovered int
Killed int
}
var minerDeadlinePartitionMeasurementCmd = &cli.Command{
Name: "deadline-summary",
Description: "",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "json",
Usage: "read input as json",
Value: true,
},
&cli.StringFlag{
Name: "tipset",
Usage: "specify tipset state to search on (pass comma separated array of cids)",
},
},
Action: func(c *cli.Context) error {
// read in values to process
if !c.Bool("json") {
return xerrors.Errorf("unsupported non json input format")
}
var refStream []DeadlineRef
if err := json.NewDecoder(os.Stdin).Decode(&refStream); err != nil {
return xerrors.Errorf("failed to parse input: %w", err)
}
// go from height and sp addr to deadline partition data
n, acloser, err := lcli.GetFullNodeAPI(c)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(c)
dSummaries := make([]DeadlineSummary, len(refStream))
for _, ref := range refStream {
// get miner's deadline
tsBefore, err := n.ChainGetTipSetByHeight(ctx, ref.Height, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("failed to get tipset at epoch: %d: %w", ref.Height, err)
}
tsAfter, err := n.ChainGetTipSetByHeight(ctx, ref.Height+1, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("failed to get tipset at epoch %d: %w", ref.Height, err)
}
dline, err := n.StateMinerProvingDeadline(ctx, ref.Addr, tsBefore.Key())
if err != nil {
return xerrors.Errorf("failed to read proving deadline: %w", err)
}
// iterate through all partitions at epoch of processing
var pSummaries []PartitionSummary
psBefore, err := n.StateMinerPartitions(ctx, ref.Addr, dline.Index, tsBefore.Key())
if err != nil {
return xerrors.Errorf("failed to get partitions: %w", err)
}
psAfter, err := n.StateMinerPartitions(ctx, ref.Addr, dline.Index, tsAfter.Key())
if err != nil {
return xerrors.Errorf("failed to get partitions: %w", err)
}
if len(psBefore) != len(psAfter) {
return xerrors.Errorf("faield")
}
type partitionCount struct {
live int
dead int
faulty int
recovering int
}
countPartition := func(p api.Partition) (partitionCount, error) {
liveSectors, err := p.LiveSectors.All(abi.MaxSectorNumber)
if err != nil {
return partitionCount{}, xerrors.Errorf("failed to count live sectors in partition: %w", err)
}
allSectors, err := p.AllSectors.All(abi.MaxSectorNumber)
if err != nil {
return partitionCount{}, xerrors.Errorf("failed to count all sectors in partition: %w", err)
}
faultySectors, err := p.FaultySectors.All(abi.MaxSectorNumber)
if err != nil {
return partitionCount{}, xerrors.Errorf("failed to count faulty sectors in partition: %w", err)
}
recoveringSectors, err := p.RecoveringSectors.All(abi.MaxSectorNumber)
if err != nil {
return partitionCount{}, xerrors.Errorf("failed to count recovering sectors in partition: %w", err)
}
return partitionCount{
live: len(liveSectors),
dead: len(allSectors) - len(liveSectors),
faulty: len(faultySectors),
recovering: len(recoveringSectors),
}, nil
}
for i := 0; i < len(psBefore); i++ {
cntBefore, err := countPartition(psBefore[i])
if err != nil {
return err
}
cntAfter, err := countPartition(psAfter[i])
if err != nil {
return err
}
pSummaries = append(pSummaries, PartitionSummary{
Live: cntBefore.live,
Dead: cntBefore.dead,
Diff: PartitionDiff{
Faulted: cntAfter.faulty - cntBefore.faulty,
Recovered: cntBefore.recovering - cntAfter.recovering,
Killed: cntAfter.dead - cntBefore.dead,
},
})
}
dSummaries = append(dSummaries, DeadlineSummary{Partitions: pSummaries})
}
// output partition info
json.NewEncoder(os.Stdout).Encode(dSummaries)
return nil
}, },
} }