feat: miner cli: Separate proving workers command

This commit is contained in:
Łukasz Magiera 2022-03-26 16:16:12 -04:00
parent c50b63c5f6
commit ad684ed0ea
4 changed files with 121 additions and 91 deletions

View File

@ -50,8 +50,13 @@ var infoAllCmd = &cli.Command{
fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Worker List")
if err := sealingWorkersCmd.Action(cctx); err != nil {
fmt.Println("\n#: Sealing Worker List")
if err := workersCmd(true).Action(cctx); err != nil {
fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Proving Worker List")
if err := workersCmd(false).Action(cctx); err != nil {
fmt.Println("ERROR: ", err)
}

View File

@ -30,6 +30,7 @@ var provingCmd = &cli.Command{
provingDeadlineInfoCmd,
provingFaultsCmd,
provingCheckProvableCmd,
workersCmd(false),
},
}

View File

@ -16,6 +16,7 @@ import (
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/chain/types"
@ -27,7 +28,7 @@ var sealingCmd = &cli.Command{
Usage: "interact with sealing pipeline",
Subcommands: []*cli.Command{
sealingJobsCmd,
sealingWorkersCmd,
workersCmd(true),
sealingSchedDiagCmd,
sealingAbortCmd,
},
@ -47,107 +48,115 @@ func barString(total, y, g float64) string {
return barString
}
var sealingWorkersCmd = &cli.Command{
Name: "workers",
Usage: "list workers",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "color",
Usage: "use color in display output",
DefaultText: "depends on output being a TTY",
func workersCmd(sealing bool) *cli.Command {
return &cli.Command{
Name: "workers",
Usage: "list workers",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "color",
Usage: "use color in display output",
DefaultText: "depends on output being a TTY",
},
},
},
Action: func(cctx *cli.Context) error {
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
stats, err := nodeApi.WorkerStats(ctx)
if err != nil {
return err
}
type sortableStat struct {
id uuid.UUID
storiface.WorkerStats
}
st := make([]sortableStat, 0, len(stats))
for id, stat := range stats {
st = append(st, sortableStat{id, stat})
}
sort.Slice(st, func(i, j int) bool {
return st[i].id.String() < st[j].id.String()
})
for _, stat := range st {
gpuUse := "not "
gpuCol := color.FgBlue
if stat.GpuUsed > 0 {
gpuCol = color.FgGreen
gpuUse = ""
Action: func(cctx *cli.Context) error {
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
var disabled string
if !stat.Enabled {
disabled = color.RedString(" (disabled)")
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
stats, err := nodeApi.WorkerStats(ctx)
if err != nil {
return err
}
fmt.Printf("Worker %s, host %s%s\n", stat.id, color.MagentaString(stat.Info.Hostname), disabled)
fmt.Printf("\tCPU: [%s] %d/%d core(s) in use\n",
barString(float64(stat.Info.Resources.CPUs), 0, float64(stat.CpuUse)), stat.CpuUse, stat.Info.Resources.CPUs)
ramTotal := stat.Info.Resources.MemPhysical
ramTasks := stat.MemUsedMin
ramUsed := stat.Info.Resources.MemUsed
var ramReserved uint64 = 0
if ramUsed > ramTasks {
ramReserved = ramUsed - ramTasks
type sortableStat struct {
id uuid.UUID
storiface.WorkerStats
}
ramBar := barString(float64(ramTotal), float64(ramReserved), float64(ramTasks))
fmt.Printf("\tRAM: [%s] %d%% %s/%s\n", ramBar,
(ramTasks+ramReserved)*100/stat.Info.Resources.MemPhysical,
types.SizeStr(types.NewInt(ramTasks+ramUsed)),
types.SizeStr(types.NewInt(stat.Info.Resources.MemPhysical)))
st := make([]sortableStat, 0, len(stats))
for id, stat := range stats {
if len(stat.Tasks) > 0 {
if (stat.Tasks[0].WorkerType() != sealtasks.WorkerSealing) == sealing {
continue
}
}
vmemTotal := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap
vmemTasks := stat.MemUsedMax
vmemUsed := stat.Info.Resources.MemUsed + stat.Info.Resources.MemSwapUsed
var vmemReserved uint64 = 0
if vmemUsed > vmemTasks {
vmemReserved = vmemUsed - vmemTasks
st = append(st, sortableStat{id, stat})
}
vmemBar := barString(float64(vmemTotal), float64(vmemReserved), float64(vmemTasks))
fmt.Printf("\tVMEM: [%s] %d%% %s/%s\n", vmemBar,
(vmemTasks+vmemReserved)*100/vmemTotal,
types.SizeStr(types.NewInt(vmemTasks+vmemReserved)),
types.SizeStr(types.NewInt(vmemTotal)))
sort.Slice(st, func(i, j int) bool {
return st[i].id.String() < st[j].id.String()
})
if len(stat.Info.Resources.GPUs) > 0 {
gpuBar := barString(float64(len(stat.Info.Resources.GPUs)), 0, stat.GpuUsed)
fmt.Printf("\tGPU: [%s] %.f%% %.2f/%d gpu(s) in use\n", color.GreenString(gpuBar),
stat.GpuUsed*100/float64(len(stat.Info.Resources.GPUs)),
stat.GpuUsed, len(stat.Info.Resources.GPUs))
for _, stat := range st {
gpuUse := "not "
gpuCol := color.FgBlue
if stat.GpuUsed > 0 {
gpuCol = color.FgGreen
gpuUse = ""
}
var disabled string
if !stat.Enabled {
disabled = color.RedString(" (disabled)")
}
fmt.Printf("Worker %s, host %s%s\n", stat.id, color.MagentaString(stat.Info.Hostname), disabled)
fmt.Printf("\tCPU: [%s] %d/%d core(s) in use\n",
barString(float64(stat.Info.Resources.CPUs), 0, float64(stat.CpuUse)), stat.CpuUse, stat.Info.Resources.CPUs)
ramTotal := stat.Info.Resources.MemPhysical
ramTasks := stat.MemUsedMin
ramUsed := stat.Info.Resources.MemUsed
var ramReserved uint64 = 0
if ramUsed > ramTasks {
ramReserved = ramUsed - ramTasks
}
ramBar := barString(float64(ramTotal), float64(ramReserved), float64(ramTasks))
fmt.Printf("\tRAM: [%s] %d%% %s/%s\n", ramBar,
(ramTasks+ramReserved)*100/stat.Info.Resources.MemPhysical,
types.SizeStr(types.NewInt(ramTasks+ramUsed)),
types.SizeStr(types.NewInt(stat.Info.Resources.MemPhysical)))
vmemTotal := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap
vmemTasks := stat.MemUsedMax
vmemUsed := stat.Info.Resources.MemUsed + stat.Info.Resources.MemSwapUsed
var vmemReserved uint64 = 0
if vmemUsed > vmemTasks {
vmemReserved = vmemUsed - vmemTasks
}
vmemBar := barString(float64(vmemTotal), float64(vmemReserved), float64(vmemTasks))
fmt.Printf("\tVMEM: [%s] %d%% %s/%s\n", vmemBar,
(vmemTasks+vmemReserved)*100/vmemTotal,
types.SizeStr(types.NewInt(vmemTasks+vmemReserved)),
types.SizeStr(types.NewInt(vmemTotal)))
if len(stat.Info.Resources.GPUs) > 0 {
gpuBar := barString(float64(len(stat.Info.Resources.GPUs)), 0, stat.GpuUsed)
fmt.Printf("\tGPU: [%s] %.f%% %.2f/%d gpu(s) in use\n", color.GreenString(gpuBar),
stat.GpuUsed*100/float64(len(stat.Info.Resources.GPUs)),
stat.GpuUsed, len(stat.Info.Resources.GPUs))
}
for _, gpu := range stat.Info.Resources.GPUs {
fmt.Printf("\tGPU: %s\n", color.New(gpuCol).Sprintf("%s, %sused", gpu, gpuUse))
}
}
for _, gpu := range stat.Info.Resources.GPUs {
fmt.Printf("\tGPU: %s\n", color.New(gpuCol).Sprintf("%s, %sused", gpu, gpuUse))
}
}
return nil
},
return nil
},
}
}
var sealingJobsCmd = &cli.Command{

View File

@ -2040,6 +2040,7 @@ COMMANDS:
deadline View the current proving period deadline information by its index
faults View the currently known proving faulty sectors information
check Check sectors provable
workers list workers
help, h Shows a list of commands or help for one command
OPTIONS:
@ -2116,6 +2117,20 @@ OPTIONS:
```
### lotus-miner proving workers
```
NAME:
lotus-miner proving workers - list workers
USAGE:
lotus-miner proving workers [command options] [arguments...]
OPTIONS:
--color use color in display output (default: depends on output being a TTY)
--help, -h show help (default: false)
```
## lotus-miner storage
```
NAME: