Merge remote-tracking branch 'origin/master' into feat/storage-redeclare
This commit is contained in:
commit
819314de46
@ -587,14 +587,26 @@ jobs:
|
||||
name: install snapcraft
|
||||
command: sudo snap install snapcraft --classic
|
||||
- run:
|
||||
name: build snap
|
||||
name: build `lotus-filecoin` snap
|
||||
command: snapcraft --use-lxd --debug
|
||||
- run:
|
||||
name: publish snap
|
||||
name: publish `lotus-filecoin` snap
|
||||
shell: /bin/bash -o pipefail
|
||||
command: |
|
||||
pwd
|
||||
ls *.snap
|
||||
snapcraft upload lotus-filecoin_latest_amd64.snap --release << parameters.channel >>
|
||||
- run:
|
||||
name: Rewrite snapcraft.yaml to use `lotus` name
|
||||
command:
|
||||
cat snap/snapcraft.yaml | sed 's/lotus-filecoin/lotus/' > lotus-snapcraft.yaml
|
||||
mv lotus-snapcrat.yaml snap/snapcraft.yaml
|
||||
- run:
|
||||
name: build `lotus` snap
|
||||
command: snapcraft --use-lxd --debug
|
||||
- run:
|
||||
name: publish `lotus` snap
|
||||
shell: /bin/bash -o pipefail
|
||||
command: |
|
||||
snapcraft upload lotus_latest_amd64.snap --release << parameters.channel >>
|
||||
|
||||
build-and-push-image:
|
||||
description: build and push docker images to public AWS ECR registry
|
||||
@ -746,26 +758,32 @@ jobs:
|
||||
name: docker build
|
||||
command: |
|
||||
docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus .
|
||||
docker build --target lotus-gateway -t filecoin/lotus-gateway:<< parameters.tag >> -f Dockerfile.lotus .
|
||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus .
|
||||
if [[ ! -z $CIRCLE_SHA1 ]]; then
|
||||
docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||
docker build --target lotus-gateway -t filecoin/lotus-gateway:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||
fi
|
||||
if [[ ! -z $CIRCLE_TAG ]]; then
|
||||
docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||
docker build --target lotus-gateway -t filecoin/lotus-gateway:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||
fi
|
||||
- run:
|
||||
name: docker push
|
||||
command: |
|
||||
docker push filecoin/lotus:<< parameters.tag >>
|
||||
docker push filecoin/lotus-gateway:<< parameters.tag >>
|
||||
docker push filecoin/lotus-all-in-one:<< parameters.tag >>
|
||||
if [[ ! -z $CIRCLE_SHA1 ]]; then
|
||||
docker push filecoin/lotus:$CIRCLE_SHA1
|
||||
docker push filecoin/lotus-gateway:$CIRCLE_SHA1
|
||||
docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1
|
||||
fi
|
||||
if [[ ! -z $CIRCLE_TAG ]]; then
|
||||
docker push filecoin/lotus:$CIRCLE_TAG
|
||||
docker push filecoin/lotus-gateway:$CIRCLE_TAG
|
||||
docker push filecoin/lotus-all-in-one:$CIRCLE_TAG
|
||||
fi
|
||||
|
||||
|
@ -587,14 +587,26 @@ jobs:
|
||||
name: install snapcraft
|
||||
command: sudo snap install snapcraft --classic
|
||||
- run:
|
||||
name: build snap
|
||||
name: build `lotus-filecoin` snap
|
||||
command: snapcraft --use-lxd --debug
|
||||
- run:
|
||||
name: publish snap
|
||||
name: publish `lotus-filecoin` snap
|
||||
shell: /bin/bash -o pipefail
|
||||
command: |
|
||||
pwd
|
||||
ls *.snap
|
||||
snapcraft upload lotus-filecoin_latest_amd64.snap --release << parameters.channel >>
|
||||
- run:
|
||||
name: Rewrite snapcraft.yaml to use `lotus` name
|
||||
command:
|
||||
cat snap/snapcraft.yaml | sed 's/lotus-filecoin/lotus/' > lotus-snapcraft.yaml
|
||||
mv lotus-snapcrat.yaml snap/snapcraft.yaml
|
||||
- run:
|
||||
name: build `lotus` snap
|
||||
command: snapcraft --use-lxd --debug
|
||||
- run:
|
||||
name: publish `lotus` snap
|
||||
shell: /bin/bash -o pipefail
|
||||
command: |
|
||||
snapcraft upload lotus_latest_amd64.snap --release << parameters.channel >>
|
||||
|
||||
build-and-push-image:
|
||||
description: build and push docker images to public AWS ECR registry
|
||||
@ -746,26 +758,32 @@ jobs:
|
||||
name: docker build
|
||||
command: |
|
||||
docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus .
|
||||
docker build --target lotus-gateway -t filecoin/lotus-gateway:<< parameters.tag >> -f Dockerfile.lotus .
|
||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus .
|
||||
if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then
|
||||
docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||
docker build --target lotus-gateway -t filecoin/lotus-gateway:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||
fi
|
||||
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||
docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||
docker build --target lotus-gateway -t filecoin/lotus-gateway:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||
fi
|
||||
- run:
|
||||
name: docker push
|
||||
command: |
|
||||
docker push filecoin/lotus:<< parameters.tag >>
|
||||
docker push filecoin/lotus-gateway:<< parameters.tag >>
|
||||
docker push filecoin/lotus-all-in-one:<< parameters.tag >>
|
||||
if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then
|
||||
docker push filecoin/lotus:$CIRCLE_SHA1
|
||||
docker push filecoin/lotus-gateway:$CIRCLE_SHA1
|
||||
docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1
|
||||
fi
|
||||
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||
docker push filecoin/lotus:$CIRCLE_TAG
|
||||
docker push filecoin/lotus-gateway:$CIRCLE_TAG
|
||||
docker push filecoin/lotus-all-in-one:$CIRCLE_TAG
|
||||
fi
|
||||
|
||||
|
@ -129,7 +129,6 @@ release:
|
||||
owner: filecoin-project
|
||||
name: lotus
|
||||
prerelease: auto
|
||||
mode: append
|
||||
name_template: "Release v{{.Version}}"
|
||||
|
||||
|
||||
|
4
Makefile
4
Makefile
@ -174,8 +174,8 @@ lotus-pond-app: lotus-pond-front lotus-pond
|
||||
|
||||
lotus-fountain:
|
||||
rm -f lotus-fountain
|
||||
go build $(GOFLAGS) -o lotus-fountain ./cmd/lotus-fountain
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build
|
||||
$(GOCC) build $(GOFLAGS) -o lotus-fountain ./cmd/lotus-fountain
|
||||
$(GOCC) run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build
|
||||
.PHONY: lotus-fountain
|
||||
BINS+=lotus-fountain
|
||||
|
||||
|
@ -48,6 +48,11 @@ type StorageMiner interface {
|
||||
ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) //perm:read
|
||||
ActorAddressConfig(ctx context.Context) (AddressConfig, error) //perm:read
|
||||
|
||||
// WithdrawBalance allows to withdraw balance from miner actor to owner address
|
||||
// Specify amount as "0" to withdraw full balance. This method returns a message CID
|
||||
// and does not wait for message execution
|
||||
ActorWithdrawBalance(ctx context.Context, amount abi.TokenAmount) (cid.Cid, error) //perm:admin
|
||||
|
||||
MiningBase(context.Context) (*types.TipSet, error) //perm:read
|
||||
|
||||
ComputeWindowPoSt(ctx context.Context, dlIdx uint64, tsk types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) //perm:admin
|
||||
|
@ -79,6 +79,10 @@ type Worker interface {
|
||||
|
||||
// Like ProcessSession, but returns an error when worker is disabled
|
||||
Session(context.Context) (uuid.UUID, error) //perm:admin
|
||||
|
||||
// Trigger shutdown
|
||||
Shutdown(context.Context) error //perm:admin
|
||||
|
||||
}
|
||||
|
||||
var _ storiface.WorkerCalls = *new(Worker)
|
||||
|
@ -656,6 +656,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"`
|
||||
|
||||
ActorWithdrawBalance func(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) `perm:"admin"`
|
||||
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
|
||||
ComputeDataCid func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data) (abi.PieceInfo, error) `perm:"admin"`
|
||||
@ -971,6 +973,8 @@ type WorkerStruct struct {
|
||||
|
||||
SetEnabled func(p0 context.Context, p1 bool) error `perm:"admin"`
|
||||
|
||||
Shutdown func(p0 context.Context) error `perm:"admin"`
|
||||
|
||||
StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
|
||||
StorageDetachLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
@ -3966,6 +3970,17 @@ func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Addres
|
||||
return *new(abi.SectorSize), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) ActorWithdrawBalance(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) {
|
||||
if s.Internal.ActorWithdrawBalance == nil {
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ActorWithdrawBalance(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) ActorWithdrawBalance(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) {
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||
if s.Internal.CheckProvable == nil {
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
@ -5605,6 +5620,17 @@ func (s *WorkerStub) SetEnabled(p0 context.Context, p1 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) Shutdown(p0 context.Context) error {
|
||||
if s.Internal.Shutdown == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.Shutdown(p0)
|
||||
}
|
||||
|
||||
func (s *WorkerStub) Shutdown(p0 context.Context) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) StorageAddLocal(p0 context.Context, p1 string) error {
|
||||
if s.Internal.StorageAddLocal == nil {
|
||||
return ErrNotSupported
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -185,6 +185,10 @@ func GetDealFees(deal market{{.latestVersion}}.DealProposal, height abi.ChainEpo
|
||||
return ef, big.Sub(tf, ef)
|
||||
}
|
||||
|
||||
func IsDealActive(state market{{.latestVersion}}.DealState) bool {
|
||||
return state.SectorStartEpoch > -1 && state.SlashEpoch == -1
|
||||
}
|
||||
|
||||
func labelFromGoString(s string) (market{{.latestVersion}}.DealLabel, error) {
|
||||
if utf8.ValidString(s) {
|
||||
return market{{.latestVersion}}.NewLabelFromString(s)
|
||||
|
@ -240,6 +240,10 @@ func GetDealFees(deal market8.DealProposal, height abi.ChainEpoch) (abi.TokenAmo
|
||||
return ef, big.Sub(tf, ef)
|
||||
}
|
||||
|
||||
func IsDealActive(state market8.DealState) bool {
|
||||
return state.SectorStartEpoch > -1 && state.SlashEpoch == -1
|
||||
}
|
||||
|
||||
func labelFromGoString(s string) (market8.DealLabel, error) {
|
||||
if utf8.ValidString(s) {
|
||||
return market8.NewLabelFromString(s)
|
||||
|
@ -218,6 +218,17 @@ var actorWithdrawCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
amount := abi.NewTokenAmount(0)
|
||||
|
||||
if cctx.Args().Present() {
|
||||
f, err := types.ParseFIL(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing 'amount' argument: %w", err)
|
||||
}
|
||||
|
||||
amount = abi.TokenAmount(f)
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -232,67 +243,19 @@ var actorWithdrawCmd = &cli.Command{
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
maddr, err := nodeApi.ActorAddress(ctx)
|
||||
res, err := nodeApi.ActorWithdrawBalance(ctx, amount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
available, err := api.StateMinerAvailableBalance(ctx, maddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
amount := available
|
||||
if cctx.Args().Present() {
|
||||
f, err := types.ParseFIL(cctx.Args().First())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing 'amount' argument: %w", err)
|
||||
}
|
||||
|
||||
amount = abi.TokenAmount(f)
|
||||
|
||||
if amount.GreaterThan(available) {
|
||||
return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", types.FIL(amount), types.FIL(available))
|
||||
}
|
||||
}
|
||||
|
||||
params, err := actors.SerializeParams(&miner.WithdrawBalanceParams{
|
||||
AmountRequested: amount, // Default to attempting to withdraw all the extra funds in the miner actor
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
|
||||
To: maddr,
|
||||
From: mi.Owner,
|
||||
Value: types.NewInt(0),
|
||||
Method: builtin.MethodsMiner.WithdrawBalance,
|
||||
Params: params,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Requested rewards withdrawal in message %s\n", smsg.Cid())
|
||||
|
||||
// wait for it to get mined into a block
|
||||
fmt.Printf("waiting for %d epochs for confirmation..\n", uint64(cctx.Int("confidence")))
|
||||
|
||||
wait, err := api.StateWaitMsg(ctx, smsg.Cid(), uint64(cctx.Int("confidence")))
|
||||
wait, err := api.StateWaitMsg(ctx, res, uint64(cctx.Int("confidence")))
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("Timeout waiting for withdrawal message %s", wait.Message)
|
||||
}
|
||||
|
||||
// check it executed successfully
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
fmt.Println(cctx.App.Writer, "withdrawal failed!")
|
||||
return err
|
||||
return xerrors.Errorf("Failed to execute withdrawal message %s: %w", wait.Message, wait.Receipt.ExitCode.Error())
|
||||
}
|
||||
|
||||
nv, err := api.StateNetworkVersion(ctx, wait.TipSet)
|
||||
|
@ -1,10 +1,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
@ -13,6 +15,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
@ -197,6 +200,13 @@ var provingInfoCmd = &cli.Command{
|
||||
var provingDeadlinesCmd = &cli.Command{
|
||||
Name: "deadlines",
|
||||
Usage: "View the current proving period deadlines information",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "all",
|
||||
Usage: "Count all sectors (only live sectors are counted by default)",
|
||||
Aliases: []string{"a"},
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, acloser, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
@ -239,14 +249,29 @@ var provingDeadlinesCmd = &cli.Command{
|
||||
|
||||
sectors := uint64(0)
|
||||
faults := uint64(0)
|
||||
var partitionCount int
|
||||
|
||||
for _, partition := range partitions {
|
||||
sc, err := partition.AllSectors.Count()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !cctx.Bool("all") {
|
||||
sc, err := partition.LiveSectors.Count()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sectors += sc
|
||||
if sc > 0 {
|
||||
partitionCount++
|
||||
}
|
||||
|
||||
sectors += sc
|
||||
} else {
|
||||
sc, err := partition.AllSectors.Count()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partitionCount++
|
||||
sectors += sc
|
||||
}
|
||||
|
||||
fc, err := partition.FaultySectors.Count()
|
||||
if err != nil {
|
||||
@ -260,7 +285,7 @@ var provingDeadlinesCmd = &cli.Command{
|
||||
if di.Index == uint64(dlIdx) {
|
||||
cur += "\t(current)"
|
||||
}
|
||||
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d (%d)\t%d%s\n", dlIdx, len(partitions), sectors, faults, provenPartitions, cur)
|
||||
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d (%d)\t%d%s\n", dlIdx, partitionCount, sectors, faults, provenPartitions, cur)
|
||||
}
|
||||
|
||||
return tw.Flush()
|
||||
@ -276,6 +301,11 @@ var provingDeadlineInfoCmd = &cli.Command{
|
||||
Aliases: []string{"n"},
|
||||
Usage: "Print sector/fault numbers belonging to this deadline",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bitfield",
|
||||
Aliases: []string{"b"},
|
||||
Usage: "Print partition bitfield stats",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "<deadlineIdx>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
@ -328,34 +358,75 @@ var provingDeadlineInfoCmd = &cli.Command{
|
||||
fmt.Printf("Current: %t\n\n", di.Index == dlIdx)
|
||||
|
||||
for pIdx, partition := range partitions {
|
||||
sectorCount, err := partition.AllSectors.Count()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sectorNumbers, err := partition.AllSectors.All(sectorCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
faultsCount, err := partition.FaultySectors.Count()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fn, err := partition.FaultySectors.All(faultsCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Partition Index: %d\n", pIdx)
|
||||
fmt.Printf("\tSectors: %d\n", sectorCount)
|
||||
if cctx.Bool("sector-nums") {
|
||||
fmt.Printf("\tSector Numbers: %v\n", sectorNumbers)
|
||||
|
||||
printStats := func(bf bitfield.BitField, name string) error {
|
||||
count, err := bf.Count()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rit, err := bf.RunIterator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cctx.Bool("bitfield") {
|
||||
var ones, zeros, oneRuns, zeroRuns, invalid uint64
|
||||
for rit.HasNext() {
|
||||
r, err := rit.NextRun()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("next run: %w", err)
|
||||
}
|
||||
if !r.Valid() {
|
||||
invalid++
|
||||
}
|
||||
if r.Val {
|
||||
ones += r.Len
|
||||
oneRuns++
|
||||
} else {
|
||||
zeros += r.Len
|
||||
zeroRuns++
|
||||
}
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := bf.MarshalCBOR(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
sz := len(buf.Bytes())
|
||||
szstr := types.SizeStr(types.NewInt(uint64(sz)))
|
||||
|
||||
fmt.Printf("\t%s Sectors:%s%d (bitfield - runs %d+%d=%d - %d 0s %d 1s - %d inv - %s %dB)\n", name, strings.Repeat(" ", 18-len(name)), count, zeroRuns, oneRuns, zeroRuns+oneRuns, zeros, ones, invalid, szstr, sz)
|
||||
} else {
|
||||
fmt.Printf("\t%s Sectors:%s%d\n", name, strings.Repeat(" ", 18-len(name)), count)
|
||||
}
|
||||
|
||||
if cctx.Bool("sector-nums") {
|
||||
nums, err := bf.All(count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("\t%s Sector Numbers:%s%v\n", name, strings.Repeat(" ", 12-len(name)), nums)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
fmt.Printf("\tFaults: %d\n", faultsCount)
|
||||
if cctx.Bool("sector-nums") {
|
||||
fmt.Printf("\tFaulty Sectors: %d\n", fn)
|
||||
|
||||
if err := printStats(partition.AllSectors, "All"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := printStats(partition.LiveSectors, "Live"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := printStats(partition.ActiveSectors, "Active"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := printStats(partition.FaultySectors, "Faulty"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := printStats(partition.RecoveringSectors, "Recovering"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/backupds"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
@ -32,6 +33,7 @@ var marketCmd = &cli.Command{
|
||||
marketDealFeesCmd,
|
||||
marketExportDatastoreCmd,
|
||||
marketImportDatastoreCmd,
|
||||
marketDealsTotalStorageCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -283,6 +285,42 @@ var marketImportDatastoreCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var marketDealsTotalStorageCmd = &cli.Command{
|
||||
Name: "get-deals-total-storage",
|
||||
Usage: "View the total storage available in all active market deals",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
deals, err := api.StateMarketDeals(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
total := big.Zero()
|
||||
count := 0
|
||||
|
||||
for _, deal := range deals {
|
||||
if market.IsDealActive(deal.State) {
|
||||
dealStorage := big.NewIntUnsigned(uint64(deal.Proposal.PieceSize))
|
||||
total = big.Add(total, dealStorage)
|
||||
count++
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
fmt.Println("Total deals: ", count)
|
||||
fmt.Println("Total storage: ", total)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func openLockedRepo(path string) (repo.LockedRepo, error) {
|
||||
// Open the repo at the repo path
|
||||
rpo, err := repo.NewFS(path)
|
||||
|
@ -55,6 +55,7 @@ func main() {
|
||||
|
||||
local := []*cli.Command{
|
||||
runCmd,
|
||||
stopCmd,
|
||||
infoCmd,
|
||||
storageCmd,
|
||||
setCmd,
|
||||
@ -115,6 +116,27 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
var stopCmd = &cli.Command{
|
||||
Name: "stop",
|
||||
Usage: "Stop a running lotus worker",
|
||||
Flags: []cli.Flag{},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := lcli.GetWorkerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
err = api.Shutdown(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var runCmd = &cli.Command{
|
||||
Name: "run",
|
||||
Usage: "Start lotus worker",
|
||||
@ -137,6 +159,12 @@ var runCmd = &cli.Command{
|
||||
Usage: "don't use swap",
|
||||
Value: false,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "custom worker name",
|
||||
EnvVars: []string{"LOTUS_WORKER_NAME"},
|
||||
DefaultText: "hostname",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "addpiece",
|
||||
Usage: "enable addpiece",
|
||||
@ -491,6 +519,7 @@ var runCmd = &cli.Command{
|
||||
NoSwap: cctx.Bool("no-swap"),
|
||||
MaxParallelChallengeReads: cctx.Int("post-parallel-reads"),
|
||||
ChallengeReadTimeout: cctx.Duration("post-read-timeout"),
|
||||
Name: cctx.String("name"),
|
||||
}, remote, localStore, nodeApi, nodeApi, wsts),
|
||||
LocalStore: localStore,
|
||||
Storage: lr,
|
||||
@ -623,6 +652,17 @@ var runCmd = &cli.Command{
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
<-workerApi.Done()
|
||||
// Wait 20s to allow the miner to unregister the worker on next heartbeat
|
||||
time.Sleep(20 * time.Second)
|
||||
log.Warn("Shutting down...")
|
||||
if err := srv.Shutdown(context.TODO()); err != nil {
|
||||
log.Errorf("shutting down RPC server failed: %s", err)
|
||||
}
|
||||
log.Warn("Graceful shutdown successful")
|
||||
}()
|
||||
|
||||
return srv.Serve(nl)
|
||||
},
|
||||
}
|
||||
|
@ -184,5 +184,9 @@ func (w *Worker) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error)
|
||||
return build.OpenRPCDiscoverJSON_Worker(), nil
|
||||
}
|
||||
|
||||
func (w *Worker) Shutdown(ctx context.Context) error {
|
||||
return w.LocalWorker.Close()
|
||||
}
|
||||
|
||||
var _ storiface.WorkerCalls = &Worker{}
|
||||
var _ api.Worker = &Worker{}
|
||||
|
@ -9,6 +9,7 @@
|
||||
* [ActorAddress](#ActorAddress)
|
||||
* [ActorAddressConfig](#ActorAddressConfig)
|
||||
* [ActorSectorSize](#ActorSectorSize)
|
||||
* [ActorWithdrawBalance](#ActorWithdrawBalance)
|
||||
* [Auth](#Auth)
|
||||
* [AuthNew](#AuthNew)
|
||||
* [AuthVerify](#AuthVerify)
|
||||
@ -295,6 +296,28 @@ Inputs:
|
||||
|
||||
Response: `34359738368`
|
||||
|
||||
### ActorWithdrawBalance
|
||||
WithdrawBalance allows to withdraw balance from miner actor to owner address
|
||||
Specify amount as "0" to withdraw full balance. This method returns a message CID
|
||||
and does not wait for message execution
|
||||
|
||||
|
||||
Perms: admin
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
"0"
|
||||
]
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
}
|
||||
```
|
||||
|
||||
## Auth
|
||||
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
* [Paths](#Paths)
|
||||
* [Remove](#Remove)
|
||||
* [Session](#Session)
|
||||
* [Shutdown](#Shutdown)
|
||||
* [Version](#Version)
|
||||
* [Add](#Add)
|
||||
* [AddPiece](#AddPiece)
|
||||
@ -1456,6 +1457,16 @@ Inputs: `null`
|
||||
|
||||
Response: `"07070707-0707-0707-0707-070707070707"`
|
||||
|
||||
### Shutdown
|
||||
Trigger shutdown
|
||||
|
||||
|
||||
Perms: admin
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response: `{}`
|
||||
|
||||
### Version
|
||||
|
||||
|
||||
|
@ -2055,7 +2055,7 @@ USAGE:
|
||||
lotus-miner proving deadlines [command options] [arguments...]
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help (default: false)
|
||||
--all, -a Count all sectors (only live sectors are counted by default) (default: false)
|
||||
|
||||
```
|
||||
|
||||
@ -2068,6 +2068,7 @@ USAGE:
|
||||
lotus-miner proving deadline [command options] <deadlineIdx>
|
||||
|
||||
OPTIONS:
|
||||
--bitfield, -b Print partition bitfield stats (default: false)
|
||||
--sector-nums, -n Print sector/fault numbers belonging to this deadline (default: false)
|
||||
|
||||
```
|
||||
|
@ -11,6 +11,7 @@ VERSION:
|
||||
|
||||
COMMANDS:
|
||||
run Start lotus worker
|
||||
stop Stop a running lotus worker
|
||||
info Print worker info
|
||||
storage manage sector storage
|
||||
set Manage worker settings
|
||||
@ -40,6 +41,7 @@ OPTIONS:
|
||||
--addpiece enable addpiece (default: true)
|
||||
--commit enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap) (default: true)
|
||||
--listen value host address and port the worker api will listen on (default: "0.0.0.0:3456")
|
||||
--name value custom worker name (default: hostname) [$LOTUS_WORKER_NAME]
|
||||
--no-default disable all default compute tasks, use the worker for storage/fetching only (default: false)
|
||||
--no-local-storage don't use storageminer repo for sector storage (default: false)
|
||||
--no-swap don't use swap (default: false)
|
||||
@ -58,6 +60,19 @@ OPTIONS:
|
||||
|
||||
```
|
||||
|
||||
## lotus-worker stop
|
||||
```
|
||||
NAME:
|
||||
lotus-worker stop - Stop a running lotus worker
|
||||
|
||||
USAGE:
|
||||
lotus-worker stop [command options] [arguments...]
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help (default: false)
|
||||
|
||||
```
|
||||
|
||||
## lotus-worker info
|
||||
```
|
||||
NAME:
|
||||
|
@ -622,6 +622,13 @@
|
||||
# env var: LOTUS_STORAGE_ALLOWREGENSECTORKEY
|
||||
#AllowRegenSectorKey = true
|
||||
|
||||
# LocalWorkerName specifies a custom name for the builtin worker.
|
||||
# If set to an empty string (default) os hostname will be used
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_STORAGE_LOCALWORKERNAME
|
||||
#LocalWorkerName = ""
|
||||
|
||||
# Assigner specifies the worker assigner to use when scheduling tasks.
|
||||
# "utilization" (default) - assign tasks to workers with lowest utilization.
|
||||
# "spread" - assign tasks to as many distinct workers as possible.
|
||||
|
@ -730,6 +730,7 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
||||
TaskTypes: m.options.workerTasks,
|
||||
NoSwap: false,
|
||||
Name: m.options.workerName,
|
||||
}, store, localStore, m.MinerNode, m.MinerNode, wsts),
|
||||
LocalStore: localStore,
|
||||
Storage: lr,
|
||||
|
@ -47,6 +47,7 @@ type nodeOpts struct {
|
||||
|
||||
workerTasks []sealtasks.TaskType
|
||||
workerStorageOpt func(paths.Store) paths.Store
|
||||
workerName string
|
||||
}
|
||||
|
||||
// DefaultNodeOpts are the default options that will be applied to test nodes.
|
||||
@ -219,6 +220,13 @@ func WithTaskTypes(tt []sealtasks.TaskType) NodeOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithWorkerName(n string) NodeOpt {
|
||||
return func(opts *nodeOpts) error {
|
||||
opts.workerName = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var WithSealWorkerTasks = WithTaskTypes([]sealtasks.TaskType{sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, sealtasks.TTUnseal})
|
||||
|
||||
func WithWorkerStorage(transform func(paths.Store) paths.Store) NodeOpt {
|
||||
|
@ -401,3 +401,28 @@ func TestWindowPostWorkerManualPoSt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, lastPending, 0)
|
||||
}
|
||||
|
||||
func TestWorkerName(t *testing.T) {
|
||||
name := "thisstringisprobablynotahostnameihope"
|
||||
|
||||
ctx := context.Background()
|
||||
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithWorkerName(name))
|
||||
|
||||
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||
|
||||
e, err := worker.Info(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, name, e.Hostname)
|
||||
|
||||
ws, err := miner.WorkerStats(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
var found bool
|
||||
for _, stats := range ws {
|
||||
if stats.Info.Hostname == name {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, found)
|
||||
}
|
||||
|
@ -844,6 +844,13 @@ This parameter is ONLY applicable if the retrieval pricing policy strategy has b
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "LocalWorkerName",
|
||||
Type: "string",
|
||||
|
||||
Comment: `LocalWorkerName specifies a custom name for the builtin worker.
|
||||
If set to an empty string (default) os hostname will be used`,
|
||||
},
|
||||
{
|
||||
Name: "Assigner",
|
||||
Type: "string",
|
||||
|
@ -65,6 +65,8 @@ func (c *StorageMiner) StorageManager() sealer.Config {
|
||||
ResourceFiltering: c.Storage.ResourceFiltering,
|
||||
DisallowRemoteFinalize: c.Storage.DisallowRemoteFinalize,
|
||||
|
||||
LocalWorkerName: c.Storage.LocalWorkerName,
|
||||
|
||||
Assigner: c.Storage.Assigner,
|
||||
|
||||
ParallelCheckLimit: c.Proving.ParallelCheckLimit,
|
||||
|
@ -401,6 +401,10 @@ type SealerConfig struct {
|
||||
AllowProveReplicaUpdate2 bool
|
||||
AllowRegenSectorKey bool
|
||||
|
||||
// LocalWorkerName specifies a custom name for the builtin worker.
|
||||
// If set to an empty string (default) os hostname will be used
|
||||
LocalWorkerName string
|
||||
|
||||
// Assigner specifies the worker assigner to use when scheduling tasks.
|
||||
// "utilization" (default) - assign tasks to workers with lowest utilization.
|
||||
// "spread" - assign tasks to as many distinct workers as possible.
|
||||
|
@ -32,12 +32,15 @@ import (
|
||||
filmktsstore "github.com/filecoin-project/go-fil-markets/stores"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
||||
minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -1295,3 +1298,43 @@ func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.Exten
|
||||
func (sm *StorageMinerAPI) RuntimeSubsystems(context.Context) (res api.MinerSubsystems, err error) {
|
||||
return sm.EnabledSubsystems, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) ActorWithdrawBalance(ctx context.Context, amount abi.TokenAmount) (cid.Cid, error) {
|
||||
available, err := sm.Full.StateMinerAvailableBalance(ctx, sm.Miner.Address(), types.EmptyTSK)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("Error getting miner balance: %w", err)
|
||||
}
|
||||
|
||||
if amount.GreaterThan(available) {
|
||||
return cid.Undef, xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", types.FIL(amount), types.FIL(available))
|
||||
}
|
||||
|
||||
if amount.Equals(big.Zero()) {
|
||||
amount = available
|
||||
}
|
||||
|
||||
params, err := actors.SerializeParams(&minertypes.WithdrawBalanceParams{
|
||||
AmountRequested: amount,
|
||||
})
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
mi, err := sm.Full.StateMinerInfo(ctx, sm.Miner.Address(), types.EmptyTSK)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("Error getting miner's owner address: %w", err)
|
||||
}
|
||||
|
||||
smsg, err := sm.Full.MpoolPushMessage(ctx, &types.Message{
|
||||
To: sm.Miner.Address(),
|
||||
From: mi.Owner,
|
||||
Value: types.NewInt(0),
|
||||
Method: builtintypes.MethodsMiner.WithdrawBalance,
|
||||
Params: params,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
return smsg.Cid(), nil
|
||||
}
|
||||
|
@ -116,6 +116,8 @@ type Config struct {
|
||||
AllowProveReplicaUpdate2 bool
|
||||
AllowRegenSectorKey bool
|
||||
|
||||
LocalWorkerName string
|
||||
|
||||
// ResourceFiltering instructs the system which resource filtering strategy
|
||||
// to use when evaluating tasks against this worker. An empty value defaults
|
||||
// to "hardware".
|
||||
@ -207,6 +209,7 @@ func New(ctx context.Context, lstor *paths.Local, stor paths.Store, ls paths.Loc
|
||||
wcfg := WorkerConfig{
|
||||
IgnoreResourceFiltering: sc.ResourceFiltering == ResourceFilteringDisabled,
|
||||
TaskTypes: localTasks,
|
||||
Name: sc.LocalWorkerName,
|
||||
}
|
||||
worker := NewLocalWorker(wcfg, stor, lstor, si, m, wss)
|
||||
err = m.AddWorker(ctx, worker)
|
||||
|
@ -34,6 +34,9 @@ type WorkerConfig struct {
|
||||
TaskTypes []sealtasks.TaskType
|
||||
NoSwap bool
|
||||
|
||||
// os.Hostname if not set
|
||||
Name string
|
||||
|
||||
// IgnoreResourceFiltering enables task distribution to happen on this
|
||||
// worker regardless of its currently available resources. Used in testing
|
||||
// with the local worker.
|
||||
@ -56,6 +59,8 @@ type LocalWorker struct {
|
||||
noSwap bool
|
||||
envLookup EnvFunc
|
||||
|
||||
name string
|
||||
|
||||
// see equivalent field on WorkerConfig.
|
||||
ignoreResources bool
|
||||
|
||||
@ -83,6 +88,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, envLookup EnvFunc,
|
||||
localStore: local,
|
||||
sindex: sindex,
|
||||
ret: ret,
|
||||
name: wcfg.Name,
|
||||
|
||||
ct: &workerCallTracker{
|
||||
st: cst,
|
||||
@ -97,6 +103,14 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, envLookup EnvFunc,
|
||||
closing: make(chan struct{}),
|
||||
}
|
||||
|
||||
if w.name == "" {
|
||||
var err error
|
||||
w.name, err = os.Hostname()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if wcfg.MaxParallelChallengeReads > 0 {
|
||||
w.challengeThrottle = make(chan struct{}, wcfg.MaxParallelChallengeReads)
|
||||
}
|
||||
@ -113,13 +127,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, envLookup EnvFunc,
|
||||
|
||||
go func() {
|
||||
for _, call := range unfinished {
|
||||
hostname, osErr := os.Hostname()
|
||||
if osErr != nil {
|
||||
log.Errorf("get hostname err: %+v", err)
|
||||
hostname = ""
|
||||
}
|
||||
|
||||
err := storiface.Err(storiface.ErrTempWorkerRestart, xerrors.Errorf("worker [Hostname: %s] restarted", hostname))
|
||||
err := storiface.Err(storiface.ErrTempWorkerRestart, xerrors.Errorf("worker [name: %s] restarted", w.name))
|
||||
|
||||
// TODO: Handle restarting PC1 once support is merged
|
||||
|
||||
@ -283,12 +291,7 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector storiface.SectorRef,
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
hostname, osErr := os.Hostname()
|
||||
if osErr != nil {
|
||||
log.Errorf("get hostname err: %+v", err)
|
||||
}
|
||||
|
||||
err = xerrors.Errorf("%w [Hostname: %s]", err, hostname)
|
||||
err = xerrors.Errorf("%w [name: %s]", err, l.name)
|
||||
}
|
||||
|
||||
if doReturn(ctx, rt, ci, l.ret, res, toCallError(err)) {
|
||||
@ -774,11 +777,6 @@ func (l *LocalWorker) memInfo() (memPhysical, memUsed, memSwap, memSwapUsed uint
|
||||
}
|
||||
|
||||
func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) {
|
||||
hostname, err := os.Hostname() // TODO: allow overriding from config
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
gpus, err := ffi.GetGPUDevices()
|
||||
if err != nil {
|
||||
log.Errorf("getting gpu devices failed: %+v", err)
|
||||
@ -797,7 +795,7 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) {
|
||||
}
|
||||
|
||||
return storiface.WorkerInfo{
|
||||
Hostname: hostname,
|
||||
Hostname: l.name,
|
||||
IgnoreResources: l.ignoreResources,
|
||||
Resources: storiface.WorkerResources{
|
||||
MemPhysical: memPhysical,
|
||||
@ -829,6 +827,10 @@ func (l *LocalWorker) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LocalWorker) Done() <-chan struct{} {
|
||||
return l.closing
|
||||
}
|
||||
|
||||
// WaitQuiet blocks as long as there are tasks running
|
||||
func (l *LocalWorker) WaitQuiet() {
|
||||
l.running.Wait()
|
||||
|
Loading…
Reference in New Issue
Block a user