d3594835c4
This patch starts adding support for network upgrades. * It adds an actors abstraction layer for loading abstract (cross-version) actors. * It starts switching over to a shared deadline type. * It adds an abstraction for ADTs (hamt/amt). * It removes the callback-based API in the StateManager (difficult to abstract across actor versions). * It _does not_ actually add support for actors v2. We can do that in a followup patch but that should be relatively easy. This patch is heavily WIP and does not compile. Feel free to push changes directly to this branch. Notes: * State tree access now needs a network version, because the HAMT type will change. * I haven't figured out a nice way to abstract over changes to the _message_ types. However, many of them will be type aliased to actors v0 in actors v2 so we can likely continue using the v0 versions (or use the v2 versions everywhere). I've been renaming imports to `v0*` to make it clear that we're importing types from a _specific_ actors version. TODO: * Consider merging incremental improvements? We'd have to get this compiling again first but we could merge in the new abstractions, and slowly switch over. * Finish migrating to the new abstractions. * Remove all actor state types from the public API. See `miner.State.Info()` for the planned approach here. * Fix the tests. This is likely going to be a massive pain.
314 lines
7.6 KiB
Go
314 lines
7.6 KiB
Go
package main
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"os"
|
|
"text/tabwriter"
|
|
"time"
|
|
|
|
"github.com/fatih/color"
|
|
"github.com/urfave/cli/v2"
|
|
"golang.org/x/xerrors"
|
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
|
|
|
"github.com/filecoin-project/lotus/api/apibstore"
|
|
"github.com/filecoin-project/lotus/build"
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
|
"github.com/filecoin-project/lotus/chain/store"
|
|
"github.com/filecoin-project/lotus/chain/types"
|
|
lcli "github.com/filecoin-project/lotus/cli"
|
|
)
|
|
|
|
var provingCmd = &cli.Command{
|
|
Name: "proving",
|
|
Usage: "View proving information",
|
|
Subcommands: []*cli.Command{
|
|
provingInfoCmd,
|
|
provingDeadlinesCmd,
|
|
provingFaultsCmd,
|
|
},
|
|
}
|
|
|
|
var provingFaultsCmd = &cli.Command{
|
|
Name: "faults",
|
|
Usage: "View the currently known proving faulty sectors information",
|
|
Action: func(cctx *cli.Context) error {
|
|
color.NoColor = !cctx.Bool("color")
|
|
|
|
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer closer()
|
|
|
|
api, acloser, err := lcli.GetFullNodeAPI(cctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer acloser()
|
|
|
|
ctx := lcli.ReqContext(cctx)
|
|
|
|
stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(api))
|
|
|
|
maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
mas, err := miner.Load(stor, mact)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr))
|
|
|
|
tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
|
|
_, _ = fmt.Fprintln(tw, "deadline\tpartition\tsectors")
|
|
err = mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error {
|
|
dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error {
|
|
faults, err := part.Faults()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
faults.ForEach(func(num uint64) error {
|
|
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d\n", dlIdx, partIdx, num)
|
|
return nil
|
|
})
|
|
})
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return tw.Flush()
|
|
},
|
|
}
|
|
|
|
var provingInfoCmd = &cli.Command{
|
|
Name: "info",
|
|
Usage: "View current state information",
|
|
Action: func(cctx *cli.Context) error {
|
|
color.NoColor = !cctx.Bool("color")
|
|
|
|
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer closer()
|
|
|
|
api, acloser, err := lcli.GetFullNodeAPI(cctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer acloser()
|
|
|
|
ctx := lcli.ReqContext(cctx)
|
|
|
|
maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
head, err := api.ChainHead(ctx)
|
|
if err != nil {
|
|
return xerrors.Errorf("getting chain head: %w", err)
|
|
}
|
|
|
|
mact, err := api.StateGetActor(ctx, maddr, head.Key())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(api))
|
|
|
|
mas, err := miner.Load(stor, mact)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
cd, err := api.StateMinerProvingDeadline(ctx, maddr, head.Key())
|
|
if err != nil {
|
|
return xerrors.Errorf("getting miner info: %w", err)
|
|
}
|
|
|
|
fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr))
|
|
|
|
proving := uint64(0)
|
|
faults := uint64(0)
|
|
recovering := uint64(0)
|
|
curDeadlineSectors := uint64(0)
|
|
|
|
if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error {
|
|
return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error {
|
|
if bf, err := part.LiveSectors(); err != nil {
|
|
return err
|
|
} else if count, err := bf.Count(); err != nil {
|
|
return err
|
|
} else {
|
|
proving += count
|
|
if dlIdx == cd.Index {
|
|
curDeadlineSectors += count
|
|
}
|
|
}
|
|
|
|
if bf, err := part.Faults(); err != nil {
|
|
return err
|
|
} else if count, err := bf.Count(); err != nil {
|
|
return err
|
|
} else {
|
|
faults += count
|
|
}
|
|
|
|
if bf, err := part.Recovering(); err != nil {
|
|
return err
|
|
} else if count, err := bf.Count(); err != nil {
|
|
return err
|
|
} else {
|
|
recovering += count
|
|
}
|
|
})
|
|
}); err != nil {
|
|
return xerrors.Errorf("walking miner deadlines and partitions: %w", err)
|
|
}
|
|
|
|
var faultPerc float64
|
|
if proving > 0 {
|
|
faultPerc = float64(faults*10000/proving) / 100
|
|
}
|
|
|
|
fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch)
|
|
|
|
fmt.Printf("Proving Period Boundary: %d\n", cd.PeriodStart%cd.WPoStProvingPeriod)
|
|
fmt.Printf("Proving Period Start: %s\n", epochTime(cd.CurrentEpoch, cd.PeriodStart))
|
|
fmt.Printf("Next Period Start: %s\n\n", epochTime(cd.CurrentEpoch, cd.PeriodStart+cd.WPoStProvingPeriod))
|
|
|
|
fmt.Printf("Faults: %d (%.2f%%)\n", faults, faultPerc)
|
|
fmt.Printf("Recovering: %d\n", recovering)
|
|
|
|
fmt.Printf("Deadline Index: %d\n", cd.Index)
|
|
fmt.Printf("Deadline Sectors: %d\n", curDeadlineSectors)
|
|
fmt.Printf("Deadline Open: %s\n", epochTime(cd.CurrentEpoch, cd.Open))
|
|
fmt.Printf("Deadline Close: %s\n", epochTime(cd.CurrentEpoch, cd.Close))
|
|
fmt.Printf("Deadline Challenge: %s\n", epochTime(cd.CurrentEpoch, cd.Challenge))
|
|
fmt.Printf("Deadline FaultCutoff: %s\n", epochTime(cd.CurrentEpoch, cd.FaultCutoff))
|
|
return nil
|
|
},
|
|
}
|
|
|
|
func epochTime(curr, e abi.ChainEpoch) string {
|
|
switch {
|
|
case curr > e:
|
|
return fmt.Sprintf("%d (%s ago)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e)))
|
|
case curr == e:
|
|
return fmt.Sprintf("%d (now)", e)
|
|
case curr < e:
|
|
return fmt.Sprintf("%d (in %s)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr)))
|
|
}
|
|
|
|
panic("math broke")
|
|
}
|
|
|
|
var provingDeadlinesCmd = &cli.Command{
|
|
Name: "deadlines",
|
|
Usage: "View the current proving period deadlines information",
|
|
Action: func(cctx *cli.Context) error {
|
|
color.NoColor = !cctx.Bool("color")
|
|
|
|
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer closer()
|
|
|
|
api, acloser, err := lcli.GetFullNodeAPI(cctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer acloser()
|
|
|
|
ctx := lcli.ReqContext(cctx)
|
|
|
|
maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK)
|
|
if err != nil {
|
|
return xerrors.Errorf("getting deadlines: %w", err)
|
|
}
|
|
|
|
di, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
if err != nil {
|
|
return xerrors.Errorf("getting deadlines: %w", err)
|
|
}
|
|
|
|
var mas miner.State
|
|
{
|
|
mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
miner.Load
|
|
rmas, err := api.ChainReadObj(ctx, mact.Head)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := mas.UnmarshalCBOR(bytes.NewReader(rmas)); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr))
|
|
|
|
tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
|
|
_, _ = fmt.Fprintln(tw, "deadline\tpartitions\tsectors (faults)\tproven partitions")
|
|
|
|
for dlIdx, deadline := range deadlines {
|
|
partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK)
|
|
if err != nil {
|
|
return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err)
|
|
}
|
|
|
|
provenPartitions, err := deadline.PostSubmissions.Count()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
sectors := uint64(0)
|
|
faults := uint64(0)
|
|
|
|
for _, partition := range partitions {
|
|
sc, err := partition.Sectors.Count()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
sectors += sc
|
|
|
|
fc, err := partition.Faults.Count()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
faults += fc
|
|
}
|
|
|
|
var cur string
|
|
if di.Index == uint64(dlIdx) {
|
|
cur += "\t(current)"
|
|
}
|
|
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d (%d)\t%d%s\n", dlIdx, len(partitions), sectors, faults, provenPartitions, cur)
|
|
}
|
|
|
|
return tw.Flush()
|
|
},
|
|
}
|