Merge pull request #7971 from filecoin-project/feat/post-worker
feat: PoSt workers
This commit is contained in:
commit
7401fa234e
@ -972,6 +972,11 @@ workflows:
|
|||||||
suite: itest-wdpost
|
suite: itest-wdpost
|
||||||
target: "./itests/wdpost_test.go"
|
target: "./itests/wdpost_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-worker
|
||||||
|
suite: itest-worker
|
||||||
|
target: "./itests/worker_test.go"
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-unit-cli
|
name: test-unit-cli
|
||||||
suite: utest-unit-cli
|
suite: utest-unit-cli
|
||||||
|
2
Makefile
2
Makefile
@ -97,7 +97,7 @@ BINS+=lotus-miner
|
|||||||
|
|
||||||
lotus-worker: $(BUILD_DEPS)
|
lotus-worker: $(BUILD_DEPS)
|
||||||
rm -f lotus-worker
|
rm -f lotus-worker
|
||||||
$(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-seal-worker
|
$(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker
|
||||||
.PHONY: lotus-worker
|
.PHONY: lotus-worker
|
||||||
BINS+=lotus-worker
|
BINS+=lotus-worker
|
||||||
|
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
)
|
)
|
||||||
@ -143,21 +142,21 @@ type StorageMiner interface {
|
|||||||
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
|
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
|
||||||
SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin
|
SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin
|
||||||
|
|
||||||
//stores.SectorIndex
|
// SectorIndex
|
||||||
StorageAttach(context.Context, stores.StorageInfo, fsutil.FsStat) error //perm:admin
|
StorageAttach(context.Context, storiface.StorageInfo, fsutil.FsStat) error //perm:admin
|
||||||
StorageInfo(context.Context, stores.ID) (stores.StorageInfo, error) //perm:admin
|
StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error) //perm:admin
|
||||||
StorageReportHealth(context.Context, stores.ID, stores.HealthReport) error //perm:admin
|
StorageReportHealth(context.Context, storiface.ID, storiface.HealthReport) error //perm:admin
|
||||||
StorageDeclareSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin
|
StorageDeclareSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin
|
||||||
StorageDropSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin
|
StorageDropSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin
|
||||||
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) //perm:admin
|
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error) //perm:admin
|
||||||
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) //perm:admin
|
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]storiface.StorageInfo, error) //perm:admin
|
||||||
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
||||||
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
||||||
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin
|
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
|
||||||
StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) //perm:admin
|
StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) //perm:admin
|
||||||
|
|
||||||
StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin
|
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
|
||||||
StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin
|
StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) //perm:admin
|
||||||
|
|
||||||
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
|
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
|
||||||
MarketListDeals(ctx context.Context) ([]MarketDeal, error) //perm:read
|
MarketListDeals(ctx context.Context) ([]MarketDeal, error) //perm:read
|
||||||
@ -266,13 +265,12 @@ type StorageMiner interface {
|
|||||||
// the path specified when calling CreateBackup is within the base path
|
// the path specified when calling CreateBackup is within the base path
|
||||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||||
|
|
||||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
||||||
|
|
||||||
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
|
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ storiface.WorkerReturn = *new(StorageMiner)
|
var _ storiface.WorkerReturn = *new(StorageMiner)
|
||||||
var _ stores.SectorIndex = *new(StorageMiner)
|
|
||||||
|
|
||||||
type SealRes struct {
|
type SealRes struct {
|
||||||
Err string
|
Err string
|
||||||
@ -296,19 +294,20 @@ type SectorPiece struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type SectorInfo struct {
|
type SectorInfo struct {
|
||||||
SectorID abi.SectorNumber
|
SectorID abi.SectorNumber
|
||||||
State SectorState
|
State SectorState
|
||||||
CommD *cid.Cid
|
CommD *cid.Cid
|
||||||
CommR *cid.Cid
|
CommR *cid.Cid
|
||||||
Proof []byte
|
Proof []byte
|
||||||
Deals []abi.DealID
|
Deals []abi.DealID
|
||||||
Pieces []SectorPiece
|
Pieces []SectorPiece
|
||||||
Ticket SealTicket
|
Ticket SealTicket
|
||||||
Seed SealSeed
|
Seed SealSeed
|
||||||
PreCommitMsg *cid.Cid
|
PreCommitMsg *cid.Cid
|
||||||
CommitMsg *cid.Cid
|
CommitMsg *cid.Cid
|
||||||
Retries uint64
|
Retries uint64
|
||||||
ToUpgrade bool
|
ToUpgrade bool
|
||||||
|
ReplicaUpdateMessage *cid.Cid
|
||||||
|
|
||||||
LastErr string
|
LastErr string
|
||||||
|
|
||||||
|
@ -7,8 +7,9 @@ import (
|
|||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
)
|
)
|
||||||
@ -29,7 +30,7 @@ type Worker interface {
|
|||||||
|
|
||||||
// TaskType -> Weight
|
// TaskType -> Weight
|
||||||
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) //perm:admin
|
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) //perm:admin
|
||||||
Paths(context.Context) ([]stores.StoragePath, error) //perm:admin
|
Paths(context.Context) ([]storiface.StoragePath, error) //perm:admin
|
||||||
Info(context.Context) (storiface.WorkerInfo, error) //perm:admin
|
Info(context.Context) (storiface.WorkerInfo, error) //perm:admin
|
||||||
|
|
||||||
// storiface.WorkerCalls
|
// storiface.WorkerCalls
|
||||||
@ -49,6 +50,9 @@ type Worker interface {
|
|||||||
UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin
|
UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin
|
||||||
Fetch(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin
|
Fetch(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin
|
||||||
|
|
||||||
|
GenerateWinningPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, randomness abi.PoStRandomness) ([]proof.PoStProof, error) //perm:admin
|
||||||
|
GenerateWindowPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, partitionIdx int, randomness abi.PoStRandomness) (storiface.WindowPoStResult, error) //perm:admin
|
||||||
|
|
||||||
TaskDisable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
|
TaskDisable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
|
||||||
TaskEnable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
|
TaskEnable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
|
||||||
|
|
||||||
|
@ -40,7 +40,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
@ -199,10 +198,10 @@ func init() {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
addExample(api.SectorState(sealing.Proving))
|
addExample(api.SectorState(sealing.Proving))
|
||||||
addExample(stores.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"))
|
addExample(storiface.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"))
|
||||||
addExample(storiface.FTUnsealed)
|
addExample(storiface.FTUnsealed)
|
||||||
addExample(storiface.PathSealing)
|
addExample(storiface.PathSealing)
|
||||||
addExample(map[stores.ID][]stores.Decl{
|
addExample(map[storiface.ID][]storiface.Decl{
|
||||||
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": {
|
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": {
|
||||||
{
|
{
|
||||||
SectorID: abi.SectorID{Miner: 1000, Number: 100},
|
SectorID: abi.SectorID{Miner: 1000, Number: 100},
|
||||||
@ -210,7 +209,7 @@ func init() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
addExample(map[stores.ID]string{
|
addExample(map[storiface.ID]string{
|
||||||
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path",
|
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path",
|
||||||
})
|
})
|
||||||
addExample(map[uuid.UUID][]storiface.WorkerJob{
|
addExample(map[uuid.UUID][]storiface.WorkerJob{
|
||||||
|
126
api/proxy_gen.go
126
api/proxy_gen.go
@ -25,12 +25,12 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
"github.com/filecoin-project/lotus/journal/alerting"
|
"github.com/filecoin-project/lotus/journal/alerting"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
|
"github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
@ -637,7 +637,7 @@ type StorageMinerStruct struct {
|
|||||||
|
|
||||||
ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"`
|
ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"`
|
||||||
|
|
||||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||||
|
|
||||||
ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"`
|
ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"`
|
||||||
|
|
||||||
@ -825,29 +825,29 @@ type StorageMinerStruct struct {
|
|||||||
|
|
||||||
StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||||
|
|
||||||
StorageAttach func(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error `perm:"admin"`
|
StorageAttach func(p0 context.Context, p1 storiface.StorageInfo, p2 fsutil.FsStat) error `perm:"admin"`
|
||||||
|
|
||||||
StorageBestAlloc func(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) `perm:"admin"`
|
StorageBestAlloc func(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
StorageDeclareSector func(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"`
|
StorageDeclareSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"`
|
||||||
|
|
||||||
StorageDropSector func(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error `perm:"admin"`
|
StorageDropSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error `perm:"admin"`
|
||||||
|
|
||||||
StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) `perm:"admin"`
|
StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
StorageGetLocks func(p0 context.Context) (storiface.SectorLocks, error) `perm:"admin"`
|
StorageGetLocks func(p0 context.Context) (storiface.SectorLocks, error) `perm:"admin"`
|
||||||
|
|
||||||
StorageInfo func(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) `perm:"admin"`
|
StorageInfo func(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
StorageList func(p0 context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"`
|
StorageList func(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) `perm:"admin"`
|
||||||
|
|
||||||
StorageLocal func(p0 context.Context) (map[stores.ID]string, error) `perm:"admin"`
|
StorageLocal func(p0 context.Context) (map[storiface.ID]string, error) `perm:"admin"`
|
||||||
|
|
||||||
StorageLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error `perm:"admin"`
|
StorageLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error `perm:"admin"`
|
||||||
|
|
||||||
StorageReportHealth func(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error `perm:"admin"`
|
StorageReportHealth func(p0 context.Context, p1 storiface.ID, p2 storiface.HealthReport) error `perm:"admin"`
|
||||||
|
|
||||||
StorageStat func(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) `perm:"admin"`
|
StorageStat func(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) `perm:"admin"`
|
||||||
|
|
||||||
StorageTryLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) (bool, error) `perm:"admin"`
|
StorageTryLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) (bool, error) `perm:"admin"`
|
||||||
|
|
||||||
@ -900,11 +900,15 @@ type WorkerStruct struct {
|
|||||||
|
|
||||||
GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||||
|
|
||||||
|
GenerateWindowPoSt func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 int, p5 abi.PoStRandomness) (storiface.WindowPoStResult, error) `perm:"admin"`
|
||||||
|
|
||||||
|
GenerateWinningPoSt func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 abi.PoStRandomness) ([]proof.PoStProof, error) `perm:"admin"`
|
||||||
|
|
||||||
Info func(p0 context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
|
Info func(p0 context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
MoveStorage func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
MoveStorage func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
||||||
|
|
||||||
Paths func(p0 context.Context) ([]stores.StoragePath, error) `perm:"admin"`
|
Paths func(p0 context.Context) ([]storiface.StoragePath, error) `perm:"admin"`
|
||||||
|
|
||||||
ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"`
|
ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"`
|
||||||
|
|
||||||
@ -3831,14 +3835,14 @@ func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Addres
|
|||||||
return *new(abi.SectorSize), ErrNotSupported
|
return *new(abi.SectorSize), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) {
|
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||||
if s.Internal.CheckProvable == nil {
|
if s.Internal.CheckProvable == nil {
|
||||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.CheckProvable(p0, p1, p2, p3, p4)
|
return s.Internal.CheckProvable(p0, p1, p2, p3)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) {
|
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4865,59 +4869,59 @@ func (s *StorageMinerStub) StorageAddLocal(p0 context.Context, p1 string) error
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageAttach(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error {
|
func (s *StorageMinerStruct) StorageAttach(p0 context.Context, p1 storiface.StorageInfo, p2 fsutil.FsStat) error {
|
||||||
if s.Internal.StorageAttach == nil {
|
if s.Internal.StorageAttach == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.StorageAttach(p0, p1, p2)
|
return s.Internal.StorageAttach(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) StorageAttach(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error {
|
func (s *StorageMinerStub) StorageAttach(p0 context.Context, p1 storiface.StorageInfo, p2 fsutil.FsStat) error {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) {
|
func (s *StorageMinerStruct) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) {
|
||||||
if s.Internal.StorageBestAlloc == nil {
|
if s.Internal.StorageBestAlloc == nil {
|
||||||
return *new([]stores.StorageInfo), ErrNotSupported
|
return *new([]storiface.StorageInfo), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.StorageBestAlloc(p0, p1, p2, p3)
|
return s.Internal.StorageBestAlloc(p0, p1, p2, p3)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) {
|
func (s *StorageMinerStub) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) {
|
||||||
return *new([]stores.StorageInfo), ErrNotSupported
|
return *new([]storiface.StorageInfo), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageDeclareSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error {
|
func (s *StorageMinerStruct) StorageDeclareSector(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error {
|
||||||
if s.Internal.StorageDeclareSector == nil {
|
if s.Internal.StorageDeclareSector == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.StorageDeclareSector(p0, p1, p2, p3, p4)
|
return s.Internal.StorageDeclareSector(p0, p1, p2, p3, p4)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) StorageDeclareSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error {
|
func (s *StorageMinerStub) StorageDeclareSector(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageDropSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
|
func (s *StorageMinerStruct) StorageDropSector(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
|
||||||
if s.Internal.StorageDropSector == nil {
|
if s.Internal.StorageDropSector == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.StorageDropSector(p0, p1, p2, p3)
|
return s.Internal.StorageDropSector(p0, p1, p2, p3)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) StorageDropSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
|
func (s *StorageMinerStub) StorageDropSector(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) {
|
func (s *StorageMinerStruct) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) {
|
||||||
if s.Internal.StorageFindSector == nil {
|
if s.Internal.StorageFindSector == nil {
|
||||||
return *new([]stores.SectorStorageInfo), ErrNotSupported
|
return *new([]storiface.SectorStorageInfo), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.StorageFindSector(p0, p1, p2, p3, p4)
|
return s.Internal.StorageFindSector(p0, p1, p2, p3, p4)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) {
|
func (s *StorageMinerStub) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) {
|
||||||
return *new([]stores.SectorStorageInfo), ErrNotSupported
|
return *new([]storiface.SectorStorageInfo), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageGetLocks(p0 context.Context) (storiface.SectorLocks, error) {
|
func (s *StorageMinerStruct) StorageGetLocks(p0 context.Context) (storiface.SectorLocks, error) {
|
||||||
@ -4931,37 +4935,37 @@ func (s *StorageMinerStub) StorageGetLocks(p0 context.Context) (storiface.Sector
|
|||||||
return *new(storiface.SectorLocks), ErrNotSupported
|
return *new(storiface.SectorLocks), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) {
|
func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) {
|
||||||
if s.Internal.StorageInfo == nil {
|
if s.Internal.StorageInfo == nil {
|
||||||
return *new(stores.StorageInfo), ErrNotSupported
|
return *new(storiface.StorageInfo), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.StorageInfo(p0, p1)
|
return s.Internal.StorageInfo(p0, p1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) {
|
func (s *StorageMinerStub) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) {
|
||||||
return *new(stores.StorageInfo), ErrNotSupported
|
return *new(storiface.StorageInfo), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageList(p0 context.Context) (map[stores.ID][]stores.Decl, error) {
|
func (s *StorageMinerStruct) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) {
|
||||||
if s.Internal.StorageList == nil {
|
if s.Internal.StorageList == nil {
|
||||||
return *new(map[stores.ID][]stores.Decl), ErrNotSupported
|
return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.StorageList(p0)
|
return s.Internal.StorageList(p0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) StorageList(p0 context.Context) (map[stores.ID][]stores.Decl, error) {
|
func (s *StorageMinerStub) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) {
|
||||||
return *new(map[stores.ID][]stores.Decl), ErrNotSupported
|
return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageLocal(p0 context.Context) (map[stores.ID]string, error) {
|
func (s *StorageMinerStruct) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
|
||||||
if s.Internal.StorageLocal == nil {
|
if s.Internal.StorageLocal == nil {
|
||||||
return *new(map[stores.ID]string), ErrNotSupported
|
return *new(map[storiface.ID]string), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.StorageLocal(p0)
|
return s.Internal.StorageLocal(p0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) StorageLocal(p0 context.Context) (map[stores.ID]string, error) {
|
func (s *StorageMinerStub) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
|
||||||
return *new(map[stores.ID]string), ErrNotSupported
|
return *new(map[storiface.ID]string), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error {
|
func (s *StorageMinerStruct) StorageLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error {
|
||||||
@ -4975,25 +4979,25 @@ func (s *StorageMinerStub) StorageLock(p0 context.Context, p1 abi.SectorID, p2 s
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageReportHealth(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error {
|
func (s *StorageMinerStruct) StorageReportHealth(p0 context.Context, p1 storiface.ID, p2 storiface.HealthReport) error {
|
||||||
if s.Internal.StorageReportHealth == nil {
|
if s.Internal.StorageReportHealth == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.StorageReportHealth(p0, p1, p2)
|
return s.Internal.StorageReportHealth(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) StorageReportHealth(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error {
|
func (s *StorageMinerStub) StorageReportHealth(p0 context.Context, p1 storiface.ID, p2 storiface.HealthReport) error {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageStat(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) {
|
func (s *StorageMinerStruct) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) {
|
||||||
if s.Internal.StorageStat == nil {
|
if s.Internal.StorageStat == nil {
|
||||||
return *new(fsutil.FsStat), ErrNotSupported
|
return *new(fsutil.FsStat), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.StorageStat(p0, p1)
|
return s.Internal.StorageStat(p0, p1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) StorageStat(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) {
|
func (s *StorageMinerStub) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) {
|
||||||
return *new(fsutil.FsStat), ErrNotSupported
|
return *new(fsutil.FsStat), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5184,6 +5188,28 @@ func (s *WorkerStub) GenerateSectorKeyFromData(p0 context.Context, p1 storage.Se
|
|||||||
return *new(storiface.CallID), ErrNotSupported
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStruct) GenerateWindowPoSt(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 int, p5 abi.PoStRandomness) (storiface.WindowPoStResult, error) {
|
||||||
|
if s.Internal.GenerateWindowPoSt == nil {
|
||||||
|
return *new(storiface.WindowPoStResult), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.GenerateWindowPoSt(p0, p1, p2, p3, p4, p5)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStub) GenerateWindowPoSt(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 int, p5 abi.PoStRandomness) (storiface.WindowPoStResult, error) {
|
||||||
|
return *new(storiface.WindowPoStResult), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStruct) GenerateWinningPoSt(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||||
|
if s.Internal.GenerateWinningPoSt == nil {
|
||||||
|
return *new([]proof.PoStProof), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.GenerateWinningPoSt(p0, p1, p2, p3, p4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStub) GenerateWinningPoSt(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||||
|
return *new([]proof.PoStProof), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) {
|
func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) {
|
||||||
if s.Internal.Info == nil {
|
if s.Internal.Info == nil {
|
||||||
return *new(storiface.WorkerInfo), ErrNotSupported
|
return *new(storiface.WorkerInfo), ErrNotSupported
|
||||||
@ -5206,15 +5232,15 @@ func (s *WorkerStub) MoveStorage(p0 context.Context, p1 storage.SectorRef, p2 st
|
|||||||
return *new(storiface.CallID), ErrNotSupported
|
return *new(storiface.CallID), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *WorkerStruct) Paths(p0 context.Context) ([]stores.StoragePath, error) {
|
func (s *WorkerStruct) Paths(p0 context.Context) ([]storiface.StoragePath, error) {
|
||||||
if s.Internal.Paths == nil {
|
if s.Internal.Paths == nil {
|
||||||
return *new([]stores.StoragePath), ErrNotSupported
|
return *new([]storiface.StoragePath), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.Paths(p0)
|
return s.Internal.Paths(p0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *WorkerStub) Paths(p0 context.Context) ([]stores.StoragePath, error) {
|
func (s *WorkerStub) Paths(p0 context.Context) ([]storiface.StoragePath, error) {
|
||||||
return *new([]stores.StoragePath), ErrNotSupported
|
return *new([]storiface.StoragePath), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *WorkerStruct) ProcessSession(p0 context.Context) (uuid.UUID, error) {
|
func (s *WorkerStruct) ProcessSession(p0 context.Context) (uuid.UUID, error) {
|
||||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -35,6 +35,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/journal/alerting"
|
"github.com/filecoin-project/lotus/journal/alerting"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -343,6 +344,41 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.Full
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
ws, err := nodeApi.WorkerStats(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting worker stats: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
workersByType := map[string]int{
|
||||||
|
sealtasks.WorkerSealing: 0,
|
||||||
|
sealtasks.WorkerWindowPoSt: 0,
|
||||||
|
sealtasks.WorkerWinningPoSt: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
wloop:
|
||||||
|
for _, st := range ws {
|
||||||
|
if !st.Enabled {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, task := range st.Tasks {
|
||||||
|
if task.WorkerType() != sealtasks.WorkerSealing {
|
||||||
|
workersByType[task.WorkerType()]++
|
||||||
|
continue wloop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
workersByType[sealtasks.WorkerSealing]++
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Workers: Seal(%d) WdPoSt(%d) WinPoSt(%d)\n",
|
||||||
|
workersByType[sealtasks.WorkerSealing],
|
||||||
|
workersByType[sealtasks.WorkerWindowPoSt],
|
||||||
|
workersByType[sealtasks.WorkerWinningPoSt])
|
||||||
|
}
|
||||||
|
|
||||||
if cctx.IsSet("blocks") {
|
if cctx.IsSet("blocks") {
|
||||||
fmt.Println("Produced newest blocks:")
|
fmt.Println("Produced newest blocks:")
|
||||||
err = producedBlocks(ctx, cctx.Int("blocks"), maddr, fullapi)
|
err = producedBlocks(ctx, cctx.Int("blocks"), maddr, fullapi)
|
||||||
@ -350,9 +386,6 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.Full
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO: grab actr state / info
|
|
||||||
// * Sealed sectors (count / bytes)
|
|
||||||
// * Power
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -34,6 +34,7 @@ import (
|
|||||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
|
||||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||||
@ -231,7 +232,7 @@ var initCmd = &cli.Command{
|
|||||||
|
|
||||||
if !cctx.Bool("no-local-storage") {
|
if !cctx.Bool("no-local-storage") {
|
||||||
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
||||||
ID: stores.ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
Weight: 10,
|
Weight: 10,
|
||||||
CanSeal: true,
|
CanSeal: true,
|
||||||
CanStore: true,
|
CanStore: true,
|
||||||
|
@ -17,7 +17,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -424,7 +424,7 @@ var provingCheckProvableCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
decls := sl[stores.ID(cctx.String("storage-id"))]
|
decls := sl[storiface.ID(cctx.String("storage-id"))]
|
||||||
|
|
||||||
filter = map[abi.SectorID]struct{}{}
|
filter = map[abi.SectorID]struct{}{}
|
||||||
for _, decl := range decls {
|
for _, decl := range decls {
|
||||||
@ -473,7 +473,6 @@ var provingCheckProvableCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
var tocheck []storage.SectorRef
|
var tocheck []storage.SectorRef
|
||||||
var update []bool
|
|
||||||
for _, info := range sectorInfos {
|
for _, info := range sectorInfos {
|
||||||
si := abi.SectorID{
|
si := abi.SectorID{
|
||||||
Miner: abi.ActorID(mid),
|
Miner: abi.ActorID(mid),
|
||||||
@ -491,10 +490,9 @@ var provingCheckProvableCmd = &cli.Command{
|
|||||||
ProofType: info.SealProof,
|
ProofType: info.SealProof,
|
||||||
ID: si,
|
ID: si,
|
||||||
})
|
})
|
||||||
update = append(update, info.SectorKeyCID != nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, update, cctx.Bool("slow"))
|
bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -147,7 +147,7 @@ over time
|
|||||||
}
|
}
|
||||||
|
|
||||||
cfg := &stores.LocalStorageMeta{
|
cfg := &stores.LocalStorageMeta{
|
||||||
ID: stores.ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
Weight: cctx.Uint64("weight"),
|
Weight: cctx.Uint64("weight"),
|
||||||
CanSeal: cctx.Bool("seal"),
|
CanSeal: cctx.Bool("seal"),
|
||||||
CanStore: cctx.Bool("store"),
|
CanStore: cctx.Bool("store"),
|
||||||
@ -210,8 +210,8 @@ var storageListCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
type fsInfo struct {
|
type fsInfo struct {
|
||||||
stores.ID
|
storiface.ID
|
||||||
sectors []stores.Decl
|
sectors []storiface.Decl
|
||||||
stat fsutil.FsStat
|
stat fsutil.FsStat
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -365,8 +365,8 @@ var storageListCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
type storedSector struct {
|
type storedSector struct {
|
||||||
id stores.ID
|
id storiface.ID
|
||||||
store stores.SectorStorageInfo
|
store storiface.SectorStorageInfo
|
||||||
|
|
||||||
unsealed, sealed, cache bool
|
unsealed, sealed, cache bool
|
||||||
update, updatecache bool
|
update, updatecache bool
|
||||||
@ -433,7 +433,7 @@ var storageFindCmd = &cli.Command{
|
|||||||
return xerrors.Errorf("finding cache: %w", err)
|
return xerrors.Errorf("finding cache: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
byId := map[stores.ID]*storedSector{}
|
byId := map[storiface.ID]*storedSector{}
|
||||||
for _, info := range u {
|
for _, info := range u {
|
||||||
sts, ok := byId[info.ID]
|
sts, ok := byId[info.ID]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -600,7 +600,7 @@ var storageListSectorsCmd = &cli.Command{
|
|||||||
|
|
||||||
type entry struct {
|
type entry struct {
|
||||||
id abi.SectorNumber
|
id abi.SectorNumber
|
||||||
storage stores.ID
|
storage storiface.ID
|
||||||
ft storiface.SectorFileType
|
ft storiface.SectorFileType
|
||||||
urls string
|
urls string
|
||||||
|
|
||||||
|
@ -1,85 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/mitchellh/go-homedir"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
|
||||||
)
|
|
||||||
|
|
||||||
type worker struct {
|
|
||||||
*sectorstorage.LocalWorker
|
|
||||||
|
|
||||||
localStore *stores.Local
|
|
||||||
ls stores.LocalStorage
|
|
||||||
|
|
||||||
disabled int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *worker) Version(context.Context) (api.Version, error) {
|
|
||||||
return api.WorkerAPIVersion0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *worker) StorageAddLocal(ctx context.Context, path string) error {
|
|
||||||
path, err := homedir.Expand(path)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("expanding local path: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.localStore.OpenPath(ctx, path); err != nil {
|
|
||||||
return xerrors.Errorf("opening local path: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.ls.SetStorage(func(sc *stores.StorageConfig) {
|
|
||||||
sc.StoragePaths = append(sc.StoragePaths, stores.LocalPath{Path: path})
|
|
||||||
}); err != nil {
|
|
||||||
return xerrors.Errorf("get storage config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *worker) SetEnabled(ctx context.Context, enabled bool) error {
|
|
||||||
disabled := int64(1)
|
|
||||||
if enabled {
|
|
||||||
disabled = 0
|
|
||||||
}
|
|
||||||
atomic.StoreInt64(&w.disabled, disabled)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *worker) Enabled(ctx context.Context) (bool, error) {
|
|
||||||
return atomic.LoadInt64(&w.disabled) == 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *worker) WaitQuiet(ctx context.Context) error {
|
|
||||||
w.LocalWorker.WaitQuiet() // uses WaitGroup under the hood so no ctx :/
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *worker) ProcessSession(ctx context.Context) (uuid.UUID, error) {
|
|
||||||
return w.LocalWorker.Session(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *worker) Session(ctx context.Context) (uuid.UUID, error) {
|
|
||||||
if atomic.LoadInt64(&w.disabled) == 1 {
|
|
||||||
return uuid.UUID{}, xerrors.Errorf("worker disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
return w.LocalWorker.Session(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *worker) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
|
|
||||||
return build.OpenRPCDiscoverJSON_Worker(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ storiface.WorkerCalls = &worker{}
|
|
@ -129,7 +129,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
|||||||
|
|
||||||
{
|
{
|
||||||
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
||||||
ID: stores.ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
Weight: 0, // read-only
|
Weight: 0, // read-only
|
||||||
CanSeal: false,
|
CanSeal: false,
|
||||||
CanStore: false,
|
CanStore: false,
|
||||||
|
@ -13,7 +13,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/gorilla/mux"
|
|
||||||
"github.com/ipfs/go-datastore/namespace"
|
"github.com/ipfs/go-datastore/namespace"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
manet "github.com/multiformats/go-multiaddr/net"
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
@ -22,7 +21,6 @@ import (
|
|||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-jsonrpc"
|
|
||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
paramfetch "github.com/filecoin-project/go-paramfetch"
|
paramfetch "github.com/filecoin-project/go-paramfetch"
|
||||||
"github.com/filecoin-project/go-statestore"
|
"github.com/filecoin-project/go-statestore"
|
||||||
@ -31,13 +29,13 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-worker/sealworker"
|
||||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
|
||||||
"github.com/filecoin-project/lotus/metrics"
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
"github.com/filecoin-project/lotus/metrics/proxy"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules"
|
"github.com/filecoin-project/lotus/node/modules"
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
)
|
)
|
||||||
@ -178,11 +176,32 @@ var runCmd = &cli.Command{
|
|||||||
Usage: "enable regen sector key",
|
Usage: "enable regen sector key",
|
||||||
Value: true,
|
Value: true,
|
||||||
},
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "windowpost",
|
||||||
|
Usage: "enable window post",
|
||||||
|
Value: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "winningpost",
|
||||||
|
Usage: "enable winning post",
|
||||||
|
Value: false,
|
||||||
|
},
|
||||||
&cli.IntFlag{
|
&cli.IntFlag{
|
||||||
Name: "parallel-fetch-limit",
|
Name: "parallel-fetch-limit",
|
||||||
Usage: "maximum fetch operations to run in parallel",
|
Usage: "maximum fetch operations to run in parallel",
|
||||||
Value: 5,
|
Value: 5,
|
||||||
},
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "post-parallel-reads",
|
||||||
|
Usage: "maximum number of parallel challenge reads (0 = no limit)",
|
||||||
|
Value: 0,
|
||||||
|
},
|
||||||
|
&cli.DurationFlag{
|
||||||
|
Name: "post-read-timeout",
|
||||||
|
Usage: "time limit for reading PoSt challenges (0 = no limit)",
|
||||||
|
Value: 0,
|
||||||
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "timeout",
|
Name: "timeout",
|
||||||
Usage: "used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function",
|
Usage: "used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function",
|
||||||
@ -265,37 +284,55 @@ var runCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
var taskTypes []sealtasks.TaskType
|
var taskTypes []sealtasks.TaskType
|
||||||
|
var workerType string
|
||||||
|
|
||||||
taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFinalizeReplicaUpdate)
|
if cctx.Bool("windowpost") {
|
||||||
|
workerType = sealtasks.WorkerWindowPoSt
|
||||||
|
taskTypes = append(taskTypes, sealtasks.TTGenerateWindowPoSt)
|
||||||
|
}
|
||||||
|
if cctx.Bool("winningpost") {
|
||||||
|
workerType = sealtasks.WorkerWinningPoSt
|
||||||
|
taskTypes = append(taskTypes, sealtasks.TTGenerateWinningPoSt)
|
||||||
|
}
|
||||||
|
|
||||||
if cctx.Bool("addpiece") {
|
if workerType == "" {
|
||||||
|
workerType = sealtasks.WorkerSealing
|
||||||
|
taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFinalizeReplicaUpdate)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (workerType != sealtasks.WorkerSealing || cctx.IsSet("addpiece")) && cctx.Bool("addpiece") {
|
||||||
taskTypes = append(taskTypes, sealtasks.TTAddPiece)
|
taskTypes = append(taskTypes, sealtasks.TTAddPiece)
|
||||||
}
|
}
|
||||||
if cctx.Bool("precommit1") {
|
if (workerType != sealtasks.WorkerSealing || cctx.IsSet("precommit1")) && cctx.Bool("precommit1") {
|
||||||
taskTypes = append(taskTypes, sealtasks.TTPreCommit1)
|
taskTypes = append(taskTypes, sealtasks.TTPreCommit1)
|
||||||
}
|
}
|
||||||
if cctx.Bool("unseal") {
|
if (workerType != sealtasks.WorkerSealing || cctx.IsSet("unseal")) && cctx.Bool("unseal") {
|
||||||
taskTypes = append(taskTypes, sealtasks.TTUnseal)
|
taskTypes = append(taskTypes, sealtasks.TTUnseal)
|
||||||
}
|
}
|
||||||
if cctx.Bool("precommit2") {
|
if (workerType != sealtasks.WorkerSealing || cctx.IsSet("precommit2")) && cctx.Bool("precommit2") {
|
||||||
taskTypes = append(taskTypes, sealtasks.TTPreCommit2)
|
taskTypes = append(taskTypes, sealtasks.TTPreCommit2)
|
||||||
}
|
}
|
||||||
if cctx.Bool("commit") {
|
if (workerType != sealtasks.WorkerSealing || cctx.IsSet("commit")) && cctx.Bool("commit") {
|
||||||
taskTypes = append(taskTypes, sealtasks.TTCommit2)
|
taskTypes = append(taskTypes, sealtasks.TTCommit2)
|
||||||
}
|
}
|
||||||
if cctx.Bool("replica-update") {
|
if (workerType != sealtasks.WorkerSealing || cctx.IsSet("replica-update")) && cctx.Bool("replica-update") {
|
||||||
taskTypes = append(taskTypes, sealtasks.TTReplicaUpdate)
|
taskTypes = append(taskTypes, sealtasks.TTReplicaUpdate)
|
||||||
}
|
}
|
||||||
if cctx.Bool("prove-replica-update2") {
|
if (workerType != sealtasks.WorkerSealing || cctx.IsSet("prove-replica-update2")) && cctx.Bool("prove-replica-update2") {
|
||||||
taskTypes = append(taskTypes, sealtasks.TTProveReplicaUpdate2)
|
taskTypes = append(taskTypes, sealtasks.TTProveReplicaUpdate2)
|
||||||
}
|
}
|
||||||
if cctx.Bool("regen-sector-key") {
|
if (workerType != sealtasks.WorkerSealing || cctx.IsSet("regen-sector-key")) && cctx.Bool("regen-sector-key") {
|
||||||
taskTypes = append(taskTypes, sealtasks.TTRegenSectorKey)
|
taskTypes = append(taskTypes, sealtasks.TTRegenSectorKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(taskTypes) == 0 {
|
if len(taskTypes) == 0 {
|
||||||
return xerrors.Errorf("no task types specified")
|
return xerrors.Errorf("no task types specified")
|
||||||
}
|
}
|
||||||
|
for _, taskType := range taskTypes {
|
||||||
|
if taskType.WorkerType() != workerType {
|
||||||
|
return xerrors.Errorf("expected all task types to be for %s worker, but task %s is for %s worker", workerType, taskType, taskType.WorkerType())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Open repo
|
// Open repo
|
||||||
|
|
||||||
@ -323,7 +360,7 @@ var runCmd = &cli.Command{
|
|||||||
|
|
||||||
if !cctx.Bool("no-local-storage") {
|
if !cctx.Bool("no-local-storage") {
|
||||||
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
||||||
ID: stores.ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
Weight: 10,
|
Weight: 10,
|
||||||
CanSeal: true,
|
CanSeal: true,
|
||||||
CanStore: false,
|
CanStore: false,
|
||||||
@ -420,35 +457,21 @@ var runCmd = &cli.Command{
|
|||||||
|
|
||||||
wsts := statestore.New(namespace.Wrap(ds, modules.WorkerCallsPrefix))
|
wsts := statestore.New(namespace.Wrap(ds, modules.WorkerCallsPrefix))
|
||||||
|
|
||||||
workerApi := &worker{
|
workerApi := &sealworker.Worker{
|
||||||
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
||||||
TaskTypes: taskTypes,
|
TaskTypes: taskTypes,
|
||||||
NoSwap: cctx.Bool("no-swap"),
|
NoSwap: cctx.Bool("no-swap"),
|
||||||
|
MaxParallelChallengeReads: cctx.Int("post-parallel-reads"),
|
||||||
|
ChallengeReadTimeout: cctx.Duration("post-read-timeout"),
|
||||||
}, remote, localStore, nodeApi, nodeApi, wsts),
|
}, remote, localStore, nodeApi, nodeApi, wsts),
|
||||||
localStore: localStore,
|
LocalStore: localStore,
|
||||||
ls: lr,
|
Storage: lr,
|
||||||
}
|
}
|
||||||
|
|
||||||
mux := mux.NewRouter()
|
|
||||||
|
|
||||||
log.Info("Setting up control endpoint at " + address)
|
log.Info("Setting up control endpoint at " + address)
|
||||||
|
|
||||||
readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder()
|
|
||||||
rpcServer := jsonrpc.NewServer(readerServerOpt)
|
|
||||||
rpcServer.Register("Filecoin", api.PermissionedWorkerAPI(proxy.MetricedWorkerAPI(workerApi)))
|
|
||||||
|
|
||||||
mux.Handle("/rpc/v0", rpcServer)
|
|
||||||
mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler)
|
|
||||||
mux.PathPrefix("/remote").HandlerFunc(remoteHandler)
|
|
||||||
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
|
|
||||||
|
|
||||||
ah := &auth.Handler{
|
|
||||||
Verify: nodeApi.AuthVerify,
|
|
||||||
Next: mux.ServeHTTP,
|
|
||||||
}
|
|
||||||
|
|
||||||
srv := &http.Server{
|
srv := &http.Server{
|
||||||
Handler: ah,
|
Handler: sealworker.WorkerHandler(nodeApi.AuthVerify, remoteHandler, workerApi, true),
|
||||||
BaseContext: func(listener net.Listener) context.Context {
|
BaseContext: func(listener net.Listener) context.Context {
|
||||||
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker"))
|
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker"))
|
||||||
return ctx
|
return ctx
|
120
cmd/lotus-worker/sealworker/rpc.go
Normal file
120
cmd/lotus-worker/sealworker/rpc.go
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
package sealworker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||||
|
"github.com/filecoin-project/lotus/metrics/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
func WorkerHandler(authv func(ctx context.Context, token string) ([]auth.Permission, error), remote http.HandlerFunc, a api.Worker, permissioned bool) http.Handler {
|
||||||
|
mux := mux.NewRouter()
|
||||||
|
readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder()
|
||||||
|
rpcServer := jsonrpc.NewServer(readerServerOpt)
|
||||||
|
|
||||||
|
wapi := proxy.MetricedWorkerAPI(a)
|
||||||
|
if permissioned {
|
||||||
|
wapi = api.PermissionedWorkerAPI(wapi)
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcServer.Register("Filecoin", wapi)
|
||||||
|
|
||||||
|
mux.Handle("/rpc/v0", rpcServer)
|
||||||
|
mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler)
|
||||||
|
mux.PathPrefix("/remote").HandlerFunc(remote)
|
||||||
|
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
|
||||||
|
|
||||||
|
if !permissioned {
|
||||||
|
return mux
|
||||||
|
}
|
||||||
|
|
||||||
|
ah := &auth.Handler{
|
||||||
|
Verify: authv,
|
||||||
|
Next: mux.ServeHTTP,
|
||||||
|
}
|
||||||
|
return ah
|
||||||
|
}
|
||||||
|
|
||||||
|
type Worker struct {
|
||||||
|
*sectorstorage.LocalWorker
|
||||||
|
|
||||||
|
LocalStore *stores.Local
|
||||||
|
Storage stores.LocalStorage
|
||||||
|
|
||||||
|
disabled int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Worker) Version(context.Context) (api.Version, error) {
|
||||||
|
return api.WorkerAPIVersion0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Worker) StorageAddLocal(ctx context.Context, path string) error {
|
||||||
|
path, err := homedir.Expand(path)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("expanding local path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.LocalStore.OpenPath(ctx, path); err != nil {
|
||||||
|
return xerrors.Errorf("opening local path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.Storage.SetStorage(func(sc *stores.StorageConfig) {
|
||||||
|
sc.StoragePaths = append(sc.StoragePaths, stores.LocalPath{Path: path})
|
||||||
|
}); err != nil {
|
||||||
|
return xerrors.Errorf("get storage config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Worker) SetEnabled(ctx context.Context, enabled bool) error {
|
||||||
|
disabled := int64(1)
|
||||||
|
if enabled {
|
||||||
|
disabled = 0
|
||||||
|
}
|
||||||
|
atomic.StoreInt64(&w.disabled, disabled)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Worker) Enabled(ctx context.Context) (bool, error) {
|
||||||
|
return atomic.LoadInt64(&w.disabled) == 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Worker) WaitQuiet(ctx context.Context) error {
|
||||||
|
w.LocalWorker.WaitQuiet() // uses WaitGroup under the hood so no ctx :/
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Worker) ProcessSession(ctx context.Context) (uuid.UUID, error) {
|
||||||
|
return w.LocalWorker.Session(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Worker) Session(ctx context.Context) (uuid.UUID, error) {
|
||||||
|
if atomic.LoadInt64(&w.disabled) == 1 {
|
||||||
|
return uuid.UUID{}, xerrors.Errorf("worker disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.LocalWorker.Session(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Worker) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
|
||||||
|
return build.OpenRPCDiscoverJSON_Worker(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ storiface.WorkerCalls = &Worker{}
|
@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
const metaFile = "sectorstore.json"
|
const metaFile = "sectorstore.json"
|
||||||
@ -101,7 +102,7 @@ var storageAttachCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
cfg := &stores.LocalStorageMeta{
|
cfg := &stores.LocalStorageMeta{
|
||||||
ID: stores.ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
Weight: cctx.Uint64("weight"),
|
Weight: cctx.Uint64("weight"),
|
||||||
CanSeal: cctx.Bool("seal"),
|
CanSeal: cctx.Bool("seal"),
|
||||||
CanStore: cctx.Bool("store"),
|
CanStore: cctx.Bool("store"),
|
@ -346,9 +346,6 @@ Inputs:
|
|||||||
"ProofType": 8
|
"ProofType": 8
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
[
|
|
||||||
true
|
|
||||||
],
|
|
||||||
true
|
true
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
@ -3062,6 +3059,7 @@ Response:
|
|||||||
"CommitMsg": null,
|
"CommitMsg": null,
|
||||||
"Retries": 42,
|
"Retries": 42,
|
||||||
"ToUpgrade": true,
|
"ToUpgrade": true,
|
||||||
|
"ReplicaUpdateMessage": null,
|
||||||
"LastErr": "string value",
|
"LastErr": "string value",
|
||||||
"Log": [
|
"Log": [
|
||||||
{
|
{
|
||||||
@ -3154,7 +3152,7 @@ Inputs:
|
|||||||
Response: `{}`
|
Response: `{}`
|
||||||
|
|
||||||
### StorageAttach
|
### StorageAttach
|
||||||
stores.SectorIndex
|
SectorIndex
|
||||||
|
|
||||||
|
|
||||||
Perms: admin
|
Perms: admin
|
||||||
@ -3292,6 +3290,9 @@ Response:
|
|||||||
"URLs": [
|
"URLs": [
|
||||||
"string value"
|
"string value"
|
||||||
],
|
],
|
||||||
|
"BaseURLs": [
|
||||||
|
"string value"
|
||||||
|
],
|
||||||
"Weight": 42,
|
"Weight": 42,
|
||||||
"CanSeal": true,
|
"CanSeal": true,
|
||||||
"CanStore": true,
|
"CanStore": true,
|
||||||
@ -3562,6 +3563,170 @@ Response:
|
|||||||
"aGPU 1337"
|
"aGPU 1337"
|
||||||
],
|
],
|
||||||
"Resources": {
|
"Resources": {
|
||||||
|
"post/v0/windowproof": {
|
||||||
|
"0": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 2048
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"MinMemory": 8388608,
|
||||||
|
"MaxMemory": 8388608,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 8388608
|
||||||
|
},
|
||||||
|
"2": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1610612736,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 10737418240
|
||||||
|
},
|
||||||
|
"3": {
|
||||||
|
"MinMemory": 32212254720,
|
||||||
|
"MaxMemory": 103079215104,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 34359738368
|
||||||
|
},
|
||||||
|
"4": {
|
||||||
|
"MinMemory": 64424509440,
|
||||||
|
"MaxMemory": 128849018880,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 68719476736
|
||||||
|
},
|
||||||
|
"5": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 2048
|
||||||
|
},
|
||||||
|
"6": {
|
||||||
|
"MinMemory": 8388608,
|
||||||
|
"MaxMemory": 8388608,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 8388608
|
||||||
|
},
|
||||||
|
"7": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1610612736,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 10737418240
|
||||||
|
},
|
||||||
|
"8": {
|
||||||
|
"MinMemory": 32212254720,
|
||||||
|
"MaxMemory": 103079215104,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 34359738368
|
||||||
|
},
|
||||||
|
"9": {
|
||||||
|
"MinMemory": 64424509440,
|
||||||
|
"MaxMemory": 128849018880,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 68719476736
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"post/v0/winningproof": {
|
||||||
|
"0": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 2048
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"MinMemory": 8388608,
|
||||||
|
"MaxMemory": 8388608,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 8388608
|
||||||
|
},
|
||||||
|
"2": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 10737418240
|
||||||
|
},
|
||||||
|
"3": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1073741824,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 34359738368
|
||||||
|
},
|
||||||
|
"4": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1073741824,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 68719476736
|
||||||
|
},
|
||||||
|
"5": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 2048
|
||||||
|
},
|
||||||
|
"6": {
|
||||||
|
"MinMemory": 8388608,
|
||||||
|
"MaxMemory": 8388608,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 8388608
|
||||||
|
},
|
||||||
|
"7": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 10737418240
|
||||||
|
},
|
||||||
|
"8": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1073741824,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 34359738368
|
||||||
|
},
|
||||||
|
"9": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1073741824,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 68719476736
|
||||||
|
}
|
||||||
|
},
|
||||||
"seal/v0/addpiece": {
|
"seal/v0/addpiece": {
|
||||||
"0": {
|
"0": {
|
||||||
"MinMemory": 2048,
|
"MinMemory": 2048,
|
||||||
@ -4467,6 +4632,7 @@ Response:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"Tasks": null,
|
||||||
"Enabled": true,
|
"Enabled": true,
|
||||||
"MemUsedMin": 0,
|
"MemUsedMin": 0,
|
||||||
"MemUsedMax": 0,
|
"MemUsedMax": 0,
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
* [FinalizeSector](#FinalizeSector)
|
* [FinalizeSector](#FinalizeSector)
|
||||||
* [Generate](#Generate)
|
* [Generate](#Generate)
|
||||||
* [GenerateSectorKeyFromData](#GenerateSectorKeyFromData)
|
* [GenerateSectorKeyFromData](#GenerateSectorKeyFromData)
|
||||||
|
* [GenerateWindowPoSt](#GenerateWindowPoSt)
|
||||||
|
* [GenerateWinningPoSt](#GenerateWinningPoSt)
|
||||||
* [Move](#Move)
|
* [Move](#Move)
|
||||||
* [MoveStorage](#MoveStorage)
|
* [MoveStorage](#MoveStorage)
|
||||||
* [Process](#Process)
|
* [Process](#Process)
|
||||||
@ -108,6 +110,170 @@ Response:
|
|||||||
"string value"
|
"string value"
|
||||||
],
|
],
|
||||||
"Resources": {
|
"Resources": {
|
||||||
|
"post/v0/windowproof": {
|
||||||
|
"0": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 2048
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"MinMemory": 8388608,
|
||||||
|
"MaxMemory": 8388608,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 8388608
|
||||||
|
},
|
||||||
|
"2": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1610612736,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 10737418240
|
||||||
|
},
|
||||||
|
"3": {
|
||||||
|
"MinMemory": 32212254720,
|
||||||
|
"MaxMemory": 103079215104,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 34359738368
|
||||||
|
},
|
||||||
|
"4": {
|
||||||
|
"MinMemory": 64424509440,
|
||||||
|
"MaxMemory": 128849018880,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 68719476736
|
||||||
|
},
|
||||||
|
"5": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 2048
|
||||||
|
},
|
||||||
|
"6": {
|
||||||
|
"MinMemory": 8388608,
|
||||||
|
"MaxMemory": 8388608,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 8388608
|
||||||
|
},
|
||||||
|
"7": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1610612736,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 10737418240
|
||||||
|
},
|
||||||
|
"8": {
|
||||||
|
"MinMemory": 32212254720,
|
||||||
|
"MaxMemory": 103079215104,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 34359738368
|
||||||
|
},
|
||||||
|
"9": {
|
||||||
|
"MinMemory": 64424509440,
|
||||||
|
"MaxMemory": 128849018880,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 68719476736
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"post/v0/winningproof": {
|
||||||
|
"0": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 2048
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"MinMemory": 8388608,
|
||||||
|
"MaxMemory": 8388608,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 8388608
|
||||||
|
},
|
||||||
|
"2": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 10737418240
|
||||||
|
},
|
||||||
|
"3": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1073741824,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 34359738368
|
||||||
|
},
|
||||||
|
"4": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1073741824,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 68719476736
|
||||||
|
},
|
||||||
|
"5": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 2048
|
||||||
|
},
|
||||||
|
"6": {
|
||||||
|
"MinMemory": 8388608,
|
||||||
|
"MaxMemory": 8388608,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 8388608
|
||||||
|
},
|
||||||
|
"7": {
|
||||||
|
"MinMemory": 2048,
|
||||||
|
"MaxMemory": 2048,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": 1,
|
||||||
|
"MaxParallelismGPU": 0,
|
||||||
|
"BaseMinMemory": 10737418240
|
||||||
|
},
|
||||||
|
"8": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1073741824,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 34359738368
|
||||||
|
},
|
||||||
|
"9": {
|
||||||
|
"MinMemory": 1073741824,
|
||||||
|
"MaxMemory": 1073741824,
|
||||||
|
"GPUUtilization": 1,
|
||||||
|
"MaxParallelism": -1,
|
||||||
|
"MaxParallelismGPU": 6,
|
||||||
|
"BaseMinMemory": 68719476736
|
||||||
|
}
|
||||||
|
},
|
||||||
"seal/v0/addpiece": {
|
"seal/v0/addpiece": {
|
||||||
"0": {
|
"0": {
|
||||||
"MinMemory": 2048,
|
"MinMemory": 2048,
|
||||||
@ -1218,6 +1384,87 @@ Response:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### GenerateWindowPoSt
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
8,
|
||||||
|
1000,
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"SealProof": 8,
|
||||||
|
"SectorNumber": 9,
|
||||||
|
"SealedCID": {
|
||||||
|
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||||
|
},
|
||||||
|
"Challenge": [
|
||||||
|
42
|
||||||
|
],
|
||||||
|
"Update": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
123,
|
||||||
|
"Bw=="
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"PoStProofs": {
|
||||||
|
"PoStProof": 8,
|
||||||
|
"ProofBytes": "Ynl0ZSBhcnJheQ=="
|
||||||
|
},
|
||||||
|
"Skipped": [
|
||||||
|
{
|
||||||
|
"Miner": 1000,
|
||||||
|
"Number": 9
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### GenerateWinningPoSt
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
8,
|
||||||
|
1000,
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"SealProof": 8,
|
||||||
|
"SectorNumber": 9,
|
||||||
|
"SealedCID": {
|
||||||
|
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||||
|
},
|
||||||
|
"Challenge": [
|
||||||
|
42
|
||||||
|
],
|
||||||
|
"Update": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Bw=="
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"PoStProof": 8,
|
||||||
|
"ProofBytes": "Ynl0ZSBhcnJheQ=="
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
## Move
|
## Move
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,7 +47,11 @@ OPTIONS:
|
|||||||
--replica-update enable replica update (default: true)
|
--replica-update enable replica update (default: true)
|
||||||
--prove-replica-update2 enable prove replica update 2 (default: true)
|
--prove-replica-update2 enable prove replica update 2 (default: true)
|
||||||
--regen-sector-key enable regen sector key (default: true)
|
--regen-sector-key enable regen sector key (default: true)
|
||||||
|
--windowpost enable window post (default: false)
|
||||||
|
--winningpost enable winning post (default: false)
|
||||||
--parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5)
|
--parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5)
|
||||||
|
--post-parallel-reads value maximum number of parallel challenge reads (0 = no limit) (default: 0)
|
||||||
|
--post-read-timeout value time limit for reading PoSt challenges (0 = no limit) (default: 0s)
|
||||||
--timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m")
|
--timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m")
|
||||||
--help, -h show help (default: false)
|
--help, -h show help (default: false)
|
||||||
|
|
||||||
|
188
extern/sector-storage/faults.go
vendored
188
extern/sector-storage/faults.go
vendored
@ -4,151 +4,92 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"time"
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var PostCheckTimeout = 160 * time.Second
|
||||||
|
|
||||||
// FaultTracker TODO: Track things more actively
|
// FaultTracker TODO: Track things more actively
|
||||||
type FaultTracker interface {
|
type FaultTracker interface {
|
||||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error)
|
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckProvable returns unprovable sectors
|
// CheckProvable returns unprovable sectors
|
||||||
func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
||||||
var bad = make(map[abi.SectorID]string)
|
if rg == nil {
|
||||||
|
return nil, xerrors.Errorf("rg is nil")
|
||||||
ssize, err := pp.SectorSize()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: More better checks
|
var bad = make(map[abi.SectorID]string)
|
||||||
for i, sector := range sectors {
|
|
||||||
|
for _, sector := range sectors {
|
||||||
err := func() error {
|
err := func() error {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
var fReplica string
|
|
||||||
var fCache string
|
|
||||||
|
|
||||||
if update[i] {
|
commr, update, err := rg(ctx, sector.ID)
|
||||||
lockedUpdate, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone)
|
if err != nil {
|
||||||
if err != nil {
|
log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", "err", err)
|
||||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
bad[sector.ID] = fmt.Sprintf("getting commR: %s", err)
|
||||||
}
|
|
||||||
if !lockedUpdate {
|
|
||||||
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock on update replica", "sector", sector)
|
|
||||||
bad[sector.ID] = fmt.Sprint("can't acquire read lock")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnw("CheckProvable Sector FAULT: acquire sector update replica in checkProvable", "sector", sector, "error", err)
|
|
||||||
bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
fReplica, fCache = lp.Update, lp.UpdateCache
|
|
||||||
} else {
|
|
||||||
locked, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTNone)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !locked {
|
|
||||||
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector)
|
|
||||||
bad[sector.ID] = fmt.Sprint("can't acquire read lock")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
|
|
||||||
bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
fReplica, fCache = lp.Sealed, lp.Cache
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if fReplica == "" || fCache == "" {
|
|
||||||
log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", fReplica, "cache", fCache)
|
|
||||||
bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", fCache, fReplica)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
toCheck := map[string]int64{
|
toLock := storiface.FTSealed | storiface.FTCache
|
||||||
fReplica: 1,
|
if update {
|
||||||
filepath.Join(fCache, "p_aux"): 0,
|
toLock = storiface.FTUpdate | storiface.FTUpdateCache
|
||||||
}
|
}
|
||||||
|
|
||||||
addCachePathsForSectorSize(toCheck, fCache, ssize)
|
locked, err := m.index.StorageTryLock(ctx, sector.ID, toLock, storiface.FTNone)
|
||||||
|
if err != nil {
|
||||||
for p, sz := range toCheck {
|
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
st, err := os.Stat(p)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", fReplica, "cache", fCache, "file", p, "err", err)
|
|
||||||
bad[sector.ID] = fmt.Sprintf("%s", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if sz != 0 {
|
|
||||||
if st.Size() != int64(ssize)*sz {
|
|
||||||
log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", fReplica, "cache", fCache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz)
|
|
||||||
bad[sector.ID] = fmt.Sprintf("%s is wrong size (got %d, expect %d)", p, st.Size(), int64(ssize)*sz)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if rg != nil {
|
if !locked {
|
||||||
wpp, err := sector.ProofType.RegisteredWindowPoStProof()
|
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector)
|
||||||
if err != nil {
|
bad[sector.ID] = fmt.Sprint("can't acquire read lock")
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var pr abi.PoStRandomness = make([]byte, abi.RandomnessLength)
|
wpp, err := sector.ProofType.RegisteredWindowPoStProof()
|
||||||
_, _ = rand.Read(pr)
|
if err != nil {
|
||||||
pr[31] &= 0x3f
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
ch, err := ffi.GeneratePoStFallbackSectorChallenges(wpp, sector.ID.Miner, pr, []abi.SectorNumber{
|
var pr abi.PoStRandomness = make([]byte, abi.RandomnessLength)
|
||||||
sector.ID.Number,
|
_, _ = rand.Read(pr)
|
||||||
})
|
pr[31] &= 0x3f
|
||||||
if err != nil {
|
|
||||||
log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err)
|
|
||||||
bad[sector.ID] = fmt.Sprintf("generating fallback challenges: %s", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
commr, err := rg(ctx, sector.ID)
|
ch, err := ffi.GeneratePoStFallbackSectorChallenges(wpp, sector.ID.Miner, pr, []abi.SectorNumber{
|
||||||
if err != nil {
|
sector.ID.Number,
|
||||||
log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err)
|
})
|
||||||
bad[sector.ID] = fmt.Sprintf("getting commR: %s", err)
|
if err != nil {
|
||||||
return nil
|
log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "err", err)
|
||||||
}
|
bad[sector.ID] = fmt.Sprintf("generating fallback challenges: %s", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
_, err = ffi.GenerateSingleVanillaProof(ffi.PrivateSectorInfo{
|
vctx, cancel2 := context.WithTimeout(ctx, PostCheckTimeout)
|
||||||
SectorInfo: proof.SectorInfo{
|
defer cancel2()
|
||||||
SealProof: sector.ProofType,
|
|
||||||
SectorNumber: sector.ID.Number,
|
_, err = m.storage.GenerateSingleVanillaProof(vctx, sector.ID.Miner, storiface.PostSectorChallenge{
|
||||||
SealedCID: commr,
|
SealProof: sector.ProofType,
|
||||||
},
|
SectorNumber: sector.ID.Number,
|
||||||
CacheDirPath: fCache,
|
SealedCID: commr,
|
||||||
PoStProofType: wpp,
|
Challenge: ch.Challenges[sector.ID.Number],
|
||||||
SealedSectorPath: fReplica,
|
Update: update,
|
||||||
}, ch.Challenges[sector.ID.Number])
|
}, wpp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err)
|
log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "err", err)
|
||||||
bad[sector.ID] = fmt.Sprintf("generating vanilla proof: %s", err)
|
bad[sector.ID] = fmt.Sprintf("generating vanilla proof: %s", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -161,25 +102,4 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
|
|||||||
return bad, nil
|
return bad, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func addCachePathsForSectorSize(chk map[string]int64, cacheDir string, ssize abi.SectorSize) {
|
|
||||||
switch ssize {
|
|
||||||
case 2 << 10:
|
|
||||||
fallthrough
|
|
||||||
case 8 << 20:
|
|
||||||
fallthrough
|
|
||||||
case 512 << 20:
|
|
||||||
chk[filepath.Join(cacheDir, "sc-02-data-tree-r-last.dat")] = 0
|
|
||||||
case 32 << 30:
|
|
||||||
for i := 0; i < 8; i++ {
|
|
||||||
chk[filepath.Join(cacheDir, fmt.Sprintf("sc-02-data-tree-r-last-%d.dat", i))] = 0
|
|
||||||
}
|
|
||||||
case 64 << 30:
|
|
||||||
for i := 0; i < 16; i++ {
|
|
||||||
chk[filepath.Join(cacheDir, fmt.Sprintf("sc-02-data-tree-r-last-%d.dat", i))] = 0
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
log.Warnf("not checking cache files of %s sectors for faults", ssize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ FaultTracker = &Manager{}
|
var _ FaultTracker = &Manager{}
|
||||||
|
31
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
31
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
@ -18,15 +18,16 @@ import (
|
|||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/detailyang/go-fallocate"
|
||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
|
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
|
||||||
commcid "github.com/filecoin-project/go-fil-commcid"
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
|
||||||
|
|
||||||
"github.com/detailyang/go-fallocate"
|
|
||||||
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
|
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
|
||||||
"github.com/filecoin-project/go-commp-utils/zerocomm"
|
"github.com/filecoin-project/go-commp-utils/zerocomm"
|
||||||
|
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
||||||
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
|
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
@ -977,3 +978,23 @@ func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceIn
|
|||||||
|
|
||||||
return ffi.GenerateUnsealedCID(proofType, allPieces)
|
return ffi.GenerateUnsealedCID(proofType, allPieces)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sb *Sealer) GenerateWinningPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, vanillas [][]byte) ([]proof5.PoStProof, error) {
|
||||||
|
return ffi.GenerateWinningPoStWithVanilla(proofType, minerID, randomness, vanillas)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *Sealer) GenerateWindowPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte, partitionIdx int) (proof5.PoStProof, error) {
|
||||||
|
pp, err := ffi.GenerateSinglePartitionWindowPoStWithVanilla(proofType, minerID, randomness, proofs, uint(partitionIdx))
|
||||||
|
if err != nil {
|
||||||
|
return proof5.PoStProof{}, err
|
||||||
|
}
|
||||||
|
if pp == nil {
|
||||||
|
// should be impossible, but just in case do not panic
|
||||||
|
return proof5.PoStProof{}, xerrors.New("postproof was nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
return proof5.PoStProof{
|
||||||
|
PoStProof: pp.PoStProof,
|
||||||
|
ProofBytes: pp.ProofBytes,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
54
extern/sector-storage/ffiwrapper/sealer_test.go
vendored
54
extern/sector-storage/ffiwrapper/sealer_test.go
vendored
@ -944,3 +944,57 @@ func TestMulticoreSDR(t *testing.T) {
|
|||||||
|
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPoStChallengeAssumptions(t *testing.T) {
|
||||||
|
var r [32]byte
|
||||||
|
rand.Read(r[:])
|
||||||
|
r[31] &= 0x3f
|
||||||
|
|
||||||
|
// behaves like a pure function
|
||||||
|
{
|
||||||
|
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 3, 4})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
c2, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 3, 4})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, c1, c2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// doesn't sort, challenges position dependant
|
||||||
|
{
|
||||||
|
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 3, 4})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
c2, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{4, 2, 3, 1})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NotEqual(t, c1, c2)
|
||||||
|
|
||||||
|
require.Equal(t, c1.Challenges[2], c2.Challenges[2])
|
||||||
|
require.Equal(t, c1.Challenges[3], c2.Challenges[3])
|
||||||
|
|
||||||
|
require.NotEqual(t, c1.Challenges[1], c2.Challenges[1])
|
||||||
|
require.NotEqual(t, c1.Challenges[4], c2.Challenges[4])
|
||||||
|
}
|
||||||
|
|
||||||
|
// length doesn't matter
|
||||||
|
{
|
||||||
|
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
c2, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NotEqual(t, c1, c2)
|
||||||
|
require.Equal(t, c1.Challenges[1], c2.Challenges[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate dedupes
|
||||||
|
{
|
||||||
|
c1, err := ffi.GeneratePoStFallbackSectorChallenges(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 1000, r[:], []abi.SectorNumber{1, 2, 1, 4})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, c1.Sectors, 3)
|
||||||
|
require.Len(t, c1.Challenges, 3)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
55
extern/sector-storage/manager.go
vendored
55
extern/sector-storage/manager.go
vendored
@ -36,7 +36,7 @@ type Worker interface {
|
|||||||
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
|
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
|
||||||
|
|
||||||
// Returns paths accessible to the worker
|
// Returns paths accessible to the worker
|
||||||
Paths(context.Context) ([]stores.StoragePath, error)
|
Paths(context.Context) ([]storiface.StoragePath, error)
|
||||||
|
|
||||||
Info(context.Context) (storiface.WorkerInfo, error)
|
Info(context.Context) (storiface.WorkerInfo, error)
|
||||||
|
|
||||||
@ -56,14 +56,16 @@ var ClosedWorkerID = uuid.UUID{}
|
|||||||
|
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
ls stores.LocalStorage
|
ls stores.LocalStorage
|
||||||
storage *stores.Remote
|
storage stores.Store
|
||||||
localStore *stores.Local
|
localStore *stores.Local
|
||||||
remoteHnd *stores.FetchHandler
|
remoteHnd *stores.FetchHandler
|
||||||
index stores.SectorIndex
|
index stores.SectorIndex
|
||||||
|
|
||||||
sched *scheduler
|
sched *scheduler
|
||||||
|
windowPoStSched *poStScheduler
|
||||||
|
winningPoStSched *poStScheduler
|
||||||
|
|
||||||
storage.Prover
|
localProver storage.Prover
|
||||||
|
|
||||||
workLk sync.Mutex
|
workLk sync.Mutex
|
||||||
work *statestore.StateStore
|
work *statestore.StateStore
|
||||||
@ -76,6 +78,8 @@ type Manager struct {
|
|||||||
waitRes map[WorkID]chan struct{}
|
waitRes map[WorkID]chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ storage.Prover = &Manager{}
|
||||||
|
|
||||||
type result struct {
|
type result struct {
|
||||||
r interface{}
|
r interface{}
|
||||||
err error
|
err error
|
||||||
@ -119,7 +123,7 @@ type StorageAuth http.Header
|
|||||||
type WorkerStateStore *statestore.StateStore
|
type WorkerStateStore *statestore.StateStore
|
||||||
type ManagerStateStore *statestore.StateStore
|
type ManagerStateStore *statestore.StateStore
|
||||||
|
|
||||||
func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls stores.LocalStorage, si stores.SectorIndex, sc SealerConfig, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) {
|
func New(ctx context.Context, lstor *stores.Local, stor stores.Store, ls stores.LocalStorage, si stores.SectorIndex, sc SealerConfig, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) {
|
||||||
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si})
|
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("creating prover instance: %w", err)
|
return nil, xerrors.Errorf("creating prover instance: %w", err)
|
||||||
@ -132,9 +136,11 @@ func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls store
|
|||||||
remoteHnd: &stores.FetchHandler{Local: lstor, PfHandler: &stores.DefaultPartialFileHandler{}},
|
remoteHnd: &stores.FetchHandler{Local: lstor, PfHandler: &stores.DefaultPartialFileHandler{}},
|
||||||
index: si,
|
index: si,
|
||||||
|
|
||||||
sched: newScheduler(),
|
sched: newScheduler(),
|
||||||
|
windowPoStSched: newPoStScheduler(sealtasks.TTGenerateWindowPoSt),
|
||||||
|
winningPoStSched: newPoStScheduler(sealtasks.TTGenerateWinningPoSt),
|
||||||
|
|
||||||
Prover: prover,
|
localProver: prover,
|
||||||
|
|
||||||
work: mss,
|
work: mss,
|
||||||
callToWork: map[storiface.CallID]WorkID{},
|
callToWork: map[storiface.CallID]WorkID{},
|
||||||
@ -207,7 +213,32 @@ func (m *Manager) AddLocalStorage(ctx context.Context, path string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) AddWorker(ctx context.Context, w Worker) error {
|
func (m *Manager) AddWorker(ctx context.Context, w Worker) error {
|
||||||
return m.sched.runWorker(ctx, w)
|
sessID, err := w.Session(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting worker session: %w", err)
|
||||||
|
}
|
||||||
|
if sessID == ClosedWorkerID {
|
||||||
|
return xerrors.Errorf("worker already closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
wid := storiface.WorkerID(sessID)
|
||||||
|
|
||||||
|
whnd, err := newWorkerHandle(ctx, w)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tasks, err := w.TaskTypes(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting worker tasks: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.windowPoStSched.MaybeAddWorker(wid, tasks, whnd) ||
|
||||||
|
m.winningPoStSched.MaybeAddWorker(wid, tasks, whnd) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.sched.runWorker(ctx, wid, whnd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -980,13 +1011,13 @@ func (m *Manager) ReturnFetch(ctx context.Context, callID storiface.CallID, err
|
|||||||
return m.returnResult(ctx, callID, nil, err)
|
return m.returnResult(ctx, callID, nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
|
func (m *Manager) StorageLocal(ctx context.Context) (map[storiface.ID]string, error) {
|
||||||
l, err := m.localStore.Local(ctx)
|
l, err := m.localStore.Local(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
out := map[stores.ID]string{}
|
out := map[storiface.ID]string{}
|
||||||
for _, st := range l {
|
for _, st := range l {
|
||||||
out[st.ID] = st.LocalPath
|
out[st.ID] = st.LocalPath
|
||||||
}
|
}
|
||||||
@ -994,7 +1025,7 @@ func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) {
|
func (m *Manager) FsStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) {
|
||||||
return m.storage.FsStat(ctx, id)
|
return m.storage.FsStat(ctx, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1052,6 +1083,8 @@ func (m *Manager) SchedDiag(ctx context.Context, doSched bool) (interface{}, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Close(ctx context.Context) error {
|
func (m *Manager) Close(ctx context.Context) error {
|
||||||
|
m.windowPoStSched.schedClose()
|
||||||
|
m.winningPoStSched.schedClose()
|
||||||
return m.sched.Close(ctx)
|
return m.sched.Close(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
241
extern/sector-storage/manager_post.go
vendored
Normal file
241
extern/sector-storage/manager_post.go
vendored
Normal file
@ -0,0 +1,241 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"go.uber.org/multierr"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||||
|
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (m *Manager) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||||
|
if !m.winningPoStSched.CanSched(ctx) {
|
||||||
|
log.Info("GenerateWinningPoSt run at lotus-miner")
|
||||||
|
return m.localProver.GenerateWinningPoSt(ctx, minerID, sectorInfo, randomness)
|
||||||
|
}
|
||||||
|
return m.generateWinningPoSt(ctx, minerID, sectorInfo, randomness)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) generateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||||
|
randomness[31] &= 0x3f
|
||||||
|
|
||||||
|
sectorNums := make([]abi.SectorNumber, len(sectorInfo))
|
||||||
|
for i, s := range sectorInfo {
|
||||||
|
sectorNums[i] = s.SectorNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sectorInfo) == 0 {
|
||||||
|
return nil, xerrors.New("generate window post len(sectorInfo)=0")
|
||||||
|
}
|
||||||
|
|
||||||
|
spt := sectorInfo[0].SealProof
|
||||||
|
|
||||||
|
ppt, err := spt.RegisteredWinningPoStProof()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
postChallenges, err := ffi.GeneratePoStFallbackSectorChallenges(ppt, minerID, randomness, sectorNums)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("generating fallback challenges: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sectorChallenges := make([]storiface.PostSectorChallenge, len(sectorInfo))
|
||||||
|
for i, s := range sectorInfo {
|
||||||
|
sectorChallenges[i] = storiface.PostSectorChallenge{
|
||||||
|
SealProof: s.SealProof,
|
||||||
|
SectorNumber: s.SectorNumber,
|
||||||
|
SealedCID: s.SealedCID,
|
||||||
|
Challenge: postChallenges.Challenges[s.SectorNumber],
|
||||||
|
Update: s.SectorKey != nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var proofs []proof.PoStProof
|
||||||
|
err = m.winningPoStSched.Schedule(ctx, false, spt, func(ctx context.Context, w Worker) error {
|
||||||
|
out, err := w.GenerateWinningPoSt(ctx, ppt, minerID, sectorChallenges, randomness)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
proofs = out
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return proofs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) {
|
||||||
|
if !m.windowPoStSched.CanSched(ctx) {
|
||||||
|
log.Info("GenerateWindowPoSt run at lotus-miner")
|
||||||
|
return m.localProver.GenerateWindowPoSt(ctx, minerID, sectorInfo, randomness)
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.generateWindowPoSt(ctx, minerID, sectorInfo, randomness)
|
||||||
|
}
|
||||||
|
|
||||||
|
func dedupeSectorInfo(sectorInfo []proof.ExtendedSectorInfo) []proof.ExtendedSectorInfo {
|
||||||
|
out := make([]proof.ExtendedSectorInfo, 0, len(sectorInfo))
|
||||||
|
seen := map[abi.SectorNumber]struct{}{}
|
||||||
|
for _, info := range sectorInfo {
|
||||||
|
if _, seen := seen[info.SectorNumber]; seen {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[info.SectorNumber] = struct{}{}
|
||||||
|
out = append(out, info)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) generateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) {
|
||||||
|
var retErr error = nil
|
||||||
|
randomness[31] &= 0x3f
|
||||||
|
|
||||||
|
out := make([]proof.PoStProof, 0)
|
||||||
|
|
||||||
|
if len(sectorInfo) == 0 {
|
||||||
|
return nil, nil, xerrors.New("generate window post len(sectorInfo)=0")
|
||||||
|
}
|
||||||
|
|
||||||
|
spt := sectorInfo[0].SealProof
|
||||||
|
|
||||||
|
ppt, err := spt.RegisteredWindowPoStProof()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
maxPartitionSize, err := builtin.PoStProofWindowPoStPartitionSectors(ppt) // todo proxy through chain/actors
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, xerrors.Errorf("get sectors count of partition failed:%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're supplied the list of sectors that the miner actor expects - this
|
||||||
|
// list contains substitutes for skipped sectors - but we don't care about
|
||||||
|
// those for the purpose of the proof, so for things to work, we need to
|
||||||
|
// dedupe here.
|
||||||
|
sectorInfo = dedupeSectorInfo(sectorInfo)
|
||||||
|
|
||||||
|
// The partitions number of this batch
|
||||||
|
// ceil(sectorInfos / maxPartitionSize)
|
||||||
|
partitionCount := uint64((len(sectorInfo) + int(maxPartitionSize) - 1) / int(maxPartitionSize))
|
||||||
|
|
||||||
|
log.Infof("generateWindowPoSt maxPartitionSize:%d partitionCount:%d", maxPartitionSize, partitionCount)
|
||||||
|
|
||||||
|
var skipped []abi.SectorID
|
||||||
|
var flk sync.Mutex
|
||||||
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
sort.Slice(sectorInfo, func(i, j int) bool {
|
||||||
|
return sectorInfo[i].SectorNumber < sectorInfo[j].SectorNumber
|
||||||
|
})
|
||||||
|
|
||||||
|
sectorNums := make([]abi.SectorNumber, len(sectorInfo))
|
||||||
|
sectorMap := make(map[abi.SectorNumber]proof.ExtendedSectorInfo)
|
||||||
|
for i, s := range sectorInfo {
|
||||||
|
sectorNums[i] = s.SectorNumber
|
||||||
|
sectorMap[s.SectorNumber] = s
|
||||||
|
}
|
||||||
|
|
||||||
|
postChallenges, err := ffi.GeneratePoStFallbackSectorChallenges(ppt, minerID, randomness, sectorNums)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, xerrors.Errorf("generating fallback challenges: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
proofList := make([]ffi.PartitionProof, partitionCount)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(int(partitionCount))
|
||||||
|
|
||||||
|
for partIdx := uint64(0); partIdx < partitionCount; partIdx++ {
|
||||||
|
go func(partIdx uint64) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
sectors := make([]storiface.PostSectorChallenge, 0)
|
||||||
|
for i := uint64(0); i < maxPartitionSize; i++ {
|
||||||
|
si := i + partIdx*maxPartitionSize
|
||||||
|
if si >= uint64(len(postChallenges.Sectors)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
snum := postChallenges.Sectors[si]
|
||||||
|
sinfo := sectorMap[snum]
|
||||||
|
|
||||||
|
sectors = append(sectors, storiface.PostSectorChallenge{
|
||||||
|
SealProof: sinfo.SealProof,
|
||||||
|
SectorNumber: snum,
|
||||||
|
SealedCID: sinfo.SealedCID,
|
||||||
|
Challenge: postChallenges.Challenges[snum],
|
||||||
|
Update: sinfo.SectorKey != nil,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
p, sk, err := m.generatePartitionWindowPost(cctx, spt, ppt, minerID, int(partIdx), sectors, randomness)
|
||||||
|
if err != nil || len(sk) > 0 {
|
||||||
|
log.Errorf("generateWindowPost part:%d, skipped:%d, sectors: %d, err: %+v", partIdx, len(sk), len(sectors), err)
|
||||||
|
flk.Lock()
|
||||||
|
skipped = append(skipped, sk...)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
retErr = multierr.Append(retErr, xerrors.Errorf("partitionCount:%d err:%+v", partIdx, err))
|
||||||
|
}
|
||||||
|
flk.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
proofList[partIdx] = ffi.PartitionProof(p)
|
||||||
|
}(partIdx)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if len(skipped) > 0 {
|
||||||
|
return nil, skipped, multierr.Append(xerrors.Errorf("some sectors (%d) were skipped", len(skipped)), retErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
postProofs, err := ffi.MergeWindowPoStPartitionProofs(ppt, proofList)
|
||||||
|
if err != nil {
|
||||||
|
return nil, skipped, xerrors.Errorf("merge windowPoSt partition proofs: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out = append(out, *postProofs)
|
||||||
|
return out, skipped, retErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) generatePartitionWindowPost(ctx context.Context, spt abi.RegisteredSealProof, ppt abi.RegisteredPoStProof, minerID abi.ActorID, partIndex int, sc []storiface.PostSectorChallenge, randomness abi.PoStRandomness) (proof.PoStProof, []abi.SectorID, error) {
|
||||||
|
log.Infow("generateWindowPost", "index", partIndex)
|
||||||
|
|
||||||
|
var result storiface.WindowPoStResult
|
||||||
|
err := m.windowPoStSched.Schedule(ctx, true, spt, func(ctx context.Context, w Worker) error {
|
||||||
|
out, err := w.GenerateWindowPoSt(ctx, ppt, minerID, sc, partIndex, randomness)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
result = out
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
log.Warnf("generateWindowPost partition:%d, get skip count:%d", partIndex, len(result.Skipped))
|
||||||
|
|
||||||
|
return result.PoStProofs, result.Skipped, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) GenerateWinningPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte) ([]proof.PoStProof, error) {
|
||||||
|
//TODO implement me
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) GenerateWindowPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte, partitionIdx int) (proof.PoStProof, error) {
|
||||||
|
//TODO implement me
|
||||||
|
panic("implement me")
|
||||||
|
}
|
26
extern/sector-storage/manager_test.go
vendored
26
extern/sector-storage/manager_test.go
vendored
@ -50,7 +50,7 @@ func newTestStorage(t *testing.T) *testStorage {
|
|||||||
|
|
||||||
{
|
{
|
||||||
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
||||||
ID: stores.ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
Weight: 1,
|
Weight: 1,
|
||||||
CanSeal: true,
|
CanSeal: true,
|
||||||
CanStore: true,
|
CanStore: true,
|
||||||
@ -116,9 +116,11 @@ func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Man
|
|||||||
remoteHnd: &stores.FetchHandler{Local: lstor},
|
remoteHnd: &stores.FetchHandler{Local: lstor},
|
||||||
index: si,
|
index: si,
|
||||||
|
|
||||||
sched: newScheduler(),
|
sched: newScheduler(),
|
||||||
|
windowPoStSched: newPoStScheduler(sealtasks.TTGenerateWindowPoSt),
|
||||||
|
winningPoStSched: newPoStScheduler(sealtasks.TTGenerateWinningPoSt),
|
||||||
|
|
||||||
Prover: prover,
|
localProver: prover,
|
||||||
|
|
||||||
work: statestore.New(ds),
|
work: statestore.New(ds),
|
||||||
callToWork: map[storiface.CallID]WorkID{},
|
callToWork: map[storiface.CallID]WorkID{},
|
||||||
@ -511,7 +513,7 @@ func TestRestartWorker(t *testing.T) {
|
|||||||
|
|
||||||
//stm: @WORKER_STATS_001
|
//stm: @WORKER_STATS_001
|
||||||
for {
|
for {
|
||||||
if len(m.WorkerStats()) == 0 {
|
if len(m.WorkerStats(ctx)) == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -574,13 +576,13 @@ func TestReenableWorker(t *testing.T) {
|
|||||||
|
|
||||||
//stm: @WORKER_STATS_001
|
//stm: @WORKER_STATS_001
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
if !m.WorkerStats()[w.session].Enabled {
|
if !m.WorkerStats(ctx)[w.session].Enabled {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(time.Millisecond * 3)
|
time.Sleep(time.Millisecond * 3)
|
||||||
}
|
}
|
||||||
require.False(t, m.WorkerStats()[w.session].Enabled)
|
require.False(t, m.WorkerStats(ctx)[w.session].Enabled)
|
||||||
|
|
||||||
i, _ = m.sched.Info(ctx)
|
i, _ = m.sched.Info(ctx)
|
||||||
require.Len(t, i.(SchedDiagInfo).OpenWindows, 0)
|
require.Len(t, i.(SchedDiagInfo).OpenWindows, 0)
|
||||||
@ -589,13 +591,13 @@ func TestReenableWorker(t *testing.T) {
|
|||||||
atomic.StoreInt64(&w.testDisable, 0)
|
atomic.StoreInt64(&w.testDisable, 0)
|
||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
if m.WorkerStats()[w.session].Enabled {
|
if m.WorkerStats(ctx)[w.session].Enabled {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(time.Millisecond * 3)
|
time.Sleep(time.Millisecond * 3)
|
||||||
}
|
}
|
||||||
require.True(t, m.WorkerStats()[w.session].Enabled)
|
require.True(t, m.WorkerStats(ctx)[w.session].Enabled)
|
||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
info, _ := m.sched.Info(ctx)
|
info, _ := m.sched.Info(ctx)
|
||||||
@ -651,7 +653,7 @@ func TestResUse(t *testing.T) {
|
|||||||
|
|
||||||
l:
|
l:
|
||||||
for {
|
for {
|
||||||
st := m.WorkerStats()
|
st := m.WorkerStats(ctx)
|
||||||
require.Len(t, st, 1)
|
require.Len(t, st, 1)
|
||||||
for _, w := range st {
|
for _, w := range st {
|
||||||
if w.MemUsedMax > 0 {
|
if w.MemUsedMax > 0 {
|
||||||
@ -661,7 +663,7 @@ l:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
st := m.WorkerStats()
|
st := m.WorkerStats(ctx)
|
||||||
require.Len(t, st, 1)
|
require.Len(t, st, 1)
|
||||||
for _, w := range st {
|
for _, w := range st {
|
||||||
require.Equal(t, storiface.ResourceTable[sealtasks.TTAddPiece][abi.RegisteredSealProof_StackedDrg2KiBV1].MaxMemory, w.MemUsedMax)
|
require.Equal(t, storiface.ResourceTable[sealtasks.TTAddPiece][abi.RegisteredSealProof_StackedDrg2KiBV1].MaxMemory, w.MemUsedMax)
|
||||||
@ -713,7 +715,7 @@ func TestResOverride(t *testing.T) {
|
|||||||
|
|
||||||
l:
|
l:
|
||||||
for {
|
for {
|
||||||
st := m.WorkerStats()
|
st := m.WorkerStats(ctx)
|
||||||
require.Len(t, st, 1)
|
require.Len(t, st, 1)
|
||||||
for _, w := range st {
|
for _, w := range st {
|
||||||
if w.MemUsedMax > 0 {
|
if w.MemUsedMax > 0 {
|
||||||
@ -723,7 +725,7 @@ l:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
st := m.WorkerStats()
|
st := m.WorkerStats(ctx)
|
||||||
require.Len(t, st, 1)
|
require.Len(t, st, 1)
|
||||||
for _, w := range st {
|
for _, w := range st {
|
||||||
require.Equal(t, uint64(99999), w.MemUsedMax)
|
require.Equal(t, uint64(99999), w.MemUsedMax)
|
||||||
|
10
extern/sector-storage/mock/mock.go
vendored
10
extern/sector-storage/mock/mock.go
vendored
@ -425,6 +425,14 @@ func generateFakePoSt(sectorInfo []proof.SectorInfo, rpt func(abi.RegisteredSeal
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) GenerateWinningPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte) ([]proof.PoStProof, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) GenerateWindowPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte, partitionIdx int) (proof.PoStProof, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) {
|
func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) {
|
||||||
off := storiface.UnpaddedByteIndex(0)
|
off := storiface.UnpaddedByteIndex(0)
|
||||||
var piece cid.Cid
|
var piece cid.Cid
|
||||||
@ -513,7 +521,7 @@ func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) erro
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
||||||
bad := map[abi.SectorID]string{}
|
bad := map[abi.SectorID]string{}
|
||||||
|
|
||||||
for _, sid := range ids {
|
for _, sid := range ids {
|
||||||
|
232
extern/sector-storage/sched_post.go
vendored
Normal file
232
extern/sector-storage/sched_post.go
vendored
Normal file
@ -0,0 +1,232 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
xerrors "golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
sealtasks "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type poStScheduler struct {
|
||||||
|
lk sync.RWMutex
|
||||||
|
workers map[storiface.WorkerID]*workerHandle
|
||||||
|
cond *sync.Cond
|
||||||
|
|
||||||
|
postType sealtasks.TaskType
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPoStScheduler(t sealtasks.TaskType) *poStScheduler {
|
||||||
|
ps := &poStScheduler{
|
||||||
|
workers: map[storiface.WorkerID]*workerHandle{},
|
||||||
|
postType: t,
|
||||||
|
}
|
||||||
|
ps.cond = sync.NewCond(&ps.lk)
|
||||||
|
return ps
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *poStScheduler) MaybeAddWorker(wid storiface.WorkerID, tasks map[sealtasks.TaskType]struct{}, w *workerHandle) bool {
|
||||||
|
if _, ok := tasks[ps.postType]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
ps.lk.Lock()
|
||||||
|
defer ps.lk.Unlock()
|
||||||
|
|
||||||
|
ps.workers[wid] = w
|
||||||
|
|
||||||
|
go ps.watch(wid, w)
|
||||||
|
|
||||||
|
ps.cond.Broadcast()
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *poStScheduler) delWorker(wid storiface.WorkerID) *workerHandle {
|
||||||
|
ps.lk.Lock()
|
||||||
|
defer ps.lk.Unlock()
|
||||||
|
var w *workerHandle = nil
|
||||||
|
if wh, ok := ps.workers[wid]; ok {
|
||||||
|
w = wh
|
||||||
|
delete(ps.workers, wid)
|
||||||
|
}
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *poStScheduler) CanSched(ctx context.Context) bool {
|
||||||
|
ps.lk.RLock()
|
||||||
|
defer ps.lk.RUnlock()
|
||||||
|
if len(ps.workers) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, w := range ps.workers {
|
||||||
|
if w.enabled {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *poStScheduler) Schedule(ctx context.Context, primary bool, spt abi.RegisteredSealProof, work WorkerAction) error {
|
||||||
|
ps.lk.Lock()
|
||||||
|
defer ps.lk.Unlock()
|
||||||
|
|
||||||
|
if len(ps.workers) == 0 {
|
||||||
|
return xerrors.Errorf("can't find %s post worker", ps.postType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get workers by resource
|
||||||
|
canDo, candidates := ps.readyWorkers(spt)
|
||||||
|
for !canDo {
|
||||||
|
//if primary is true, it must be dispatched to a worker
|
||||||
|
if primary {
|
||||||
|
ps.cond.Wait()
|
||||||
|
canDo, candidates = ps.readyWorkers(spt)
|
||||||
|
} else {
|
||||||
|
return xerrors.Errorf("can't find %s post worker", ps.postType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if ps.cond != nil {
|
||||||
|
ps.cond.Broadcast()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
selected := candidates[0]
|
||||||
|
worker := ps.workers[selected.id]
|
||||||
|
|
||||||
|
return worker.active.withResources(selected.id, worker.info, selected.res, &ps.lk, func() error {
|
||||||
|
ps.lk.Unlock()
|
||||||
|
defer ps.lk.Lock()
|
||||||
|
|
||||||
|
return work(ctx, worker.workerRpc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type candidateWorker struct {
|
||||||
|
id storiface.WorkerID
|
||||||
|
res storiface.Resources
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *poStScheduler) readyWorkers(spt abi.RegisteredSealProof) (bool, []candidateWorker) {
|
||||||
|
var accepts []candidateWorker
|
||||||
|
//if the gpus of the worker are insufficient or it's disabled, it cannot be scheduled
|
||||||
|
for wid, wr := range ps.workers {
|
||||||
|
needRes := wr.info.Resources.ResourceSpec(spt, ps.postType)
|
||||||
|
|
||||||
|
if !wr.active.canHandleRequest(needRes, wid, "post-readyWorkers", wr.info) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
accepts = append(accepts, candidateWorker{
|
||||||
|
id: wid,
|
||||||
|
res: needRes,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// todo: round robin or something
|
||||||
|
rand.Shuffle(len(accepts), func(i, j int) {
|
||||||
|
accepts[i], accepts[j] = accepts[j], accepts[i]
|
||||||
|
})
|
||||||
|
|
||||||
|
return len(accepts) != 0, accepts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *poStScheduler) disable(wid storiface.WorkerID) {
|
||||||
|
ps.lk.Lock()
|
||||||
|
defer ps.lk.Unlock()
|
||||||
|
ps.workers[wid].enabled = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *poStScheduler) enable(wid storiface.WorkerID) {
|
||||||
|
ps.lk.Lock()
|
||||||
|
defer ps.lk.Unlock()
|
||||||
|
ps.workers[wid].enabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *poStScheduler) watch(wid storiface.WorkerID, worker *workerHandle) {
|
||||||
|
heartbeatTimer := time.NewTicker(stores.HeartbeatInterval)
|
||||||
|
defer heartbeatTimer.Stop()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.TODO())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
defer close(worker.closedMgr)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
log.Warnw("Worker closing", "WorkerID", wid)
|
||||||
|
ps.delWorker(wid)
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
sctx, scancel := context.WithTimeout(ctx, stores.HeartbeatInterval/2)
|
||||||
|
curSes, err := worker.workerRpc.Session(sctx)
|
||||||
|
scancel()
|
||||||
|
if err != nil {
|
||||||
|
// Likely temporary error
|
||||||
|
log.Warnw("failed to check worker session", "error", err)
|
||||||
|
ps.disable(wid)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-heartbeatTimer.C:
|
||||||
|
continue
|
||||||
|
case <-worker.closingMgr:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if storiface.WorkerID(curSes) != wid {
|
||||||
|
if curSes != ClosedWorkerID {
|
||||||
|
// worker restarted
|
||||||
|
log.Warnw("worker session changed (worker restarted?)", "initial", wid, "current", curSes)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ps.enable(wid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *poStScheduler) workerCleanup(wid storiface.WorkerID, w *workerHandle) {
|
||||||
|
select {
|
||||||
|
case <-w.closingMgr:
|
||||||
|
default:
|
||||||
|
close(w.closingMgr)
|
||||||
|
}
|
||||||
|
|
||||||
|
ps.lk.Unlock()
|
||||||
|
select {
|
||||||
|
case <-w.closedMgr:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
log.Errorf("timeout closing worker manager goroutine %s", wid)
|
||||||
|
}
|
||||||
|
ps.lk.Lock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *poStScheduler) schedClose() {
|
||||||
|
ps.lk.Lock()
|
||||||
|
defer ps.lk.Unlock()
|
||||||
|
log.Debugf("closing scheduler")
|
||||||
|
|
||||||
|
for i, w := range ps.workers {
|
||||||
|
ps.workerCleanup(i, w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *poStScheduler) WorkerStats(ctx context.Context, cb func(ctx context.Context, wid storiface.WorkerID, worker *workerHandle)) {
|
||||||
|
ps.lk.RLock()
|
||||||
|
defer ps.lk.RUnlock()
|
||||||
|
for id, w := range ps.workers {
|
||||||
|
cb(ctx, id, w)
|
||||||
|
}
|
||||||
|
}
|
27
extern/sector-storage/sched_test.go
vendored
27
extern/sector-storage/sched_test.go
vendored
@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
@ -58,7 +59,7 @@ var constrainedWorkerResources = storiface.WorkerResources{
|
|||||||
type schedTestWorker struct {
|
type schedTestWorker struct {
|
||||||
name string
|
name string
|
||||||
taskTypes map[sealtasks.TaskType]struct{}
|
taskTypes map[sealtasks.TaskType]struct{}
|
||||||
paths []stores.StoragePath
|
paths []storiface.StoragePath
|
||||||
|
|
||||||
closed bool
|
closed bool
|
||||||
session uuid.UUID
|
session uuid.UUID
|
||||||
@ -139,11 +140,19 @@ func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id st
|
|||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) GenerateWinningPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *schedTestWorker) GenerateWindowPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, partitionIdx int, randomness abi.PoStRandomness) (storiface.WindowPoStResult, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
func (s *schedTestWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
||||||
return s.taskTypes, nil
|
return s.taskTypes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
|
func (s *schedTestWorker) Paths(ctx context.Context) ([]storiface.StoragePath, error) {
|
||||||
return s.paths, nil
|
return s.paths, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,7 +183,7 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str
|
|||||||
w := &schedTestWorker{
|
w := &schedTestWorker{
|
||||||
name: name,
|
name: name,
|
||||||
taskTypes: taskTypes,
|
taskTypes: taskTypes,
|
||||||
paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "<octopus>food</octopus>", CanSeal: true, CanStore: true}},
|
paths: []storiface.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "<octopus>food</octopus>", CanSeal: true, CanStore: true}},
|
||||||
|
|
||||||
session: uuid.New(),
|
session: uuid.New(),
|
||||||
|
|
||||||
@ -183,7 +192,7 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, path := range w.paths {
|
for _, path := range w.paths {
|
||||||
err := index.StorageAttach(context.TODO(), stores.StorageInfo{
|
err := index.StorageAttach(context.TODO(), storiface.StorageInfo{
|
||||||
ID: path.ID,
|
ID: path.ID,
|
||||||
URLs: nil,
|
URLs: nil,
|
||||||
Weight: path.Weight,
|
Weight: path.Weight,
|
||||||
@ -198,7 +207,15 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, sched.runWorker(context.TODO(), w))
|
sessID, err := w.Session(context.TODO())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
wid := storiface.WorkerID(sessID)
|
||||||
|
|
||||||
|
wh, err := newWorkerHandle(context.TODO(), w)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, sched.runWorker(context.TODO(), wid, wh))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSchedStartStop(t *testing.T) {
|
func TestSchedStartStop(t *testing.T) {
|
||||||
|
18
extern/sector-storage/sched_worker.go
vendored
18
extern/sector-storage/sched_worker.go
vendored
@ -24,19 +24,10 @@ type schedWorker struct {
|
|||||||
windowsRequested int
|
windowsRequested int
|
||||||
}
|
}
|
||||||
|
|
||||||
// context only used for startup
|
func newWorkerHandle(ctx context.Context, w Worker) (*workerHandle, error) {
|
||||||
func (sh *scheduler) runWorker(ctx context.Context, w Worker) error {
|
|
||||||
info, err := w.Info(ctx)
|
info, err := w.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting worker info: %w", err)
|
return nil, xerrors.Errorf("getting worker info: %w", err)
|
||||||
}
|
|
||||||
|
|
||||||
sessID, err := w.Session(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("getting worker session: %w", err)
|
|
||||||
}
|
|
||||||
if sessID == ClosedWorkerID {
|
|
||||||
return xerrors.Errorf("worker already closed")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
worker := &workerHandle{
|
worker := &workerHandle{
|
||||||
@ -51,8 +42,11 @@ func (sh *scheduler) runWorker(ctx context.Context, w Worker) error {
|
|||||||
closedMgr: make(chan struct{}),
|
closedMgr: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
wid := storiface.WorkerID(sessID)
|
return worker, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// context only used for startup
|
||||||
|
func (sh *scheduler) runWorker(ctx context.Context, wid storiface.WorkerID, worker *workerHandle) error {
|
||||||
sh.workersLk.Lock()
|
sh.workersLk.Lock()
|
||||||
_, exist := sh.workers[wid]
|
_, exist := sh.workers[wid]
|
||||||
if exist {
|
if exist {
|
||||||
|
31
extern/sector-storage/sealtasks/task.go
vendored
31
extern/sector-storage/sealtasks/task.go
vendored
@ -19,6 +19,9 @@ const (
|
|||||||
TTProveReplicaUpdate2 TaskType = "seal/v0/provereplicaupdate/2"
|
TTProveReplicaUpdate2 TaskType = "seal/v0/provereplicaupdate/2"
|
||||||
TTRegenSectorKey TaskType = "seal/v0/regensectorkey"
|
TTRegenSectorKey TaskType = "seal/v0/regensectorkey"
|
||||||
TTFinalizeReplicaUpdate TaskType = "seal/v0/finalize/replicaupdate"
|
TTFinalizeReplicaUpdate TaskType = "seal/v0/finalize/replicaupdate"
|
||||||
|
|
||||||
|
TTGenerateWindowPoSt TaskType = "post/v0/windowproof"
|
||||||
|
TTGenerateWinningPoSt TaskType = "post/v0/winningproof"
|
||||||
)
|
)
|
||||||
|
|
||||||
var order = map[TaskType]int{
|
var order = map[TaskType]int{
|
||||||
@ -32,8 +35,12 @@ var order = map[TaskType]int{
|
|||||||
TTCommit2: 3,
|
TTCommit2: 3,
|
||||||
TTCommit1: 2,
|
TTCommit1: 2,
|
||||||
TTUnseal: 1,
|
TTUnseal: 1,
|
||||||
TTFetch: -1,
|
|
||||||
TTFinalize: -2, // most priority
|
TTFetch: -1,
|
||||||
|
TTFinalize: -2,
|
||||||
|
|
||||||
|
TTGenerateWindowPoSt: -3,
|
||||||
|
TTGenerateWinningPoSt: -4, // most priority
|
||||||
}
|
}
|
||||||
|
|
||||||
var shortNames = map[TaskType]string{
|
var shortNames = map[TaskType]string{
|
||||||
@ -54,6 +61,26 @@ var shortNames = map[TaskType]string{
|
|||||||
TTProveReplicaUpdate2: "PR2",
|
TTProveReplicaUpdate2: "PR2",
|
||||||
TTRegenSectorKey: "GSK",
|
TTRegenSectorKey: "GSK",
|
||||||
TTFinalizeReplicaUpdate: "FRU",
|
TTFinalizeReplicaUpdate: "FRU",
|
||||||
|
|
||||||
|
TTGenerateWindowPoSt: "WDP",
|
||||||
|
TTGenerateWinningPoSt: "WNP",
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
WorkerSealing = "Sealing"
|
||||||
|
WorkerWinningPoSt = "WinPost"
|
||||||
|
WorkerWindowPoSt = "WdPoSt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (a TaskType) WorkerType() string {
|
||||||
|
switch a {
|
||||||
|
case TTGenerateWinningPoSt:
|
||||||
|
return WorkerWinningPoSt
|
||||||
|
case TTGenerateWindowPoSt:
|
||||||
|
return WorkerWindowPoSt
|
||||||
|
default:
|
||||||
|
return WorkerSealing
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a TaskType) MuchLess(b TaskType) (bool, bool) {
|
func (a TaskType) MuchLess(b TaskType) (bool, bool) {
|
||||||
|
2
extern/sector-storage/selector_alloc.go
vendored
2
extern/sector-storage/selector_alloc.go
vendored
@ -40,7 +40,7 @@ func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi
|
|||||||
return false, xerrors.Errorf("getting worker paths: %w", err)
|
return false, xerrors.Errorf("getting worker paths: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
have := map[stores.ID]struct{}{}
|
have := map[storiface.ID]struct{}{}
|
||||||
for _, path := range paths {
|
for _, path := range paths {
|
||||||
have[path.ID] = struct{}{}
|
have[path.ID] = struct{}{}
|
||||||
}
|
}
|
||||||
|
2
extern/sector-storage/selector_existing.go
vendored
2
extern/sector-storage/selector_existing.go
vendored
@ -42,7 +42,7 @@ func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt
|
|||||||
return false, xerrors.Errorf("getting worker paths: %w", err)
|
return false, xerrors.Errorf("getting worker paths: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
have := map[stores.ID]struct{}{}
|
have := map[storiface.ID]struct{}{}
|
||||||
for _, path := range paths {
|
for _, path := range paths {
|
||||||
have[path.ID] = struct{}{}
|
have[path.ID] = struct{}{}
|
||||||
}
|
}
|
||||||
|
4
extern/sector-storage/selector_task.go
vendored
4
extern/sector-storage/selector_task.go
vendored
@ -8,11 +8,11 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
type taskSelector struct {
|
type taskSelector struct {
|
||||||
best []stores.StorageInfo //nolint: unused, structcheck
|
best []storiface.StorageInfo //nolint: unused, structcheck
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTaskSelector() *taskSelector {
|
func newTaskSelector() *taskSelector {
|
||||||
|
36
extern/sector-storage/stats.go
vendored
36
extern/sector-storage/stats.go
vendored
@ -1,25 +1,40 @@
|
|||||||
package sectorstorage
|
package sectorstorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (m *Manager) WorkerStats() map[uuid.UUID]storiface.WorkerStats {
|
func (m *Manager) WorkerStats(ctx context.Context) map[uuid.UUID]storiface.WorkerStats {
|
||||||
m.sched.workersLk.RLock()
|
m.sched.workersLk.RLock()
|
||||||
defer m.sched.workersLk.RUnlock()
|
|
||||||
|
|
||||||
out := map[uuid.UUID]storiface.WorkerStats{}
|
out := map[uuid.UUID]storiface.WorkerStats{}
|
||||||
|
|
||||||
for id, handle := range m.sched.workers {
|
cb := func(ctx context.Context, id storiface.WorkerID, handle *workerHandle) {
|
||||||
handle.lk.Lock()
|
handle.lk.Lock()
|
||||||
out[uuid.UUID(id)] = storiface.WorkerStats{
|
|
||||||
Info: handle.info,
|
|
||||||
Enabled: handle.enabled,
|
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
tt, err := handle.workerRpc.TaskTypes(ctx)
|
||||||
|
var taskList []sealtasks.TaskType
|
||||||
|
if err != nil {
|
||||||
|
log.Warnw("getting worker task types in WorkerStats", "error", err)
|
||||||
|
} else {
|
||||||
|
for taskType := range tt {
|
||||||
|
taskList = append(taskList, taskType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out[uuid.UUID(id)] = storiface.WorkerStats{
|
||||||
|
Info: handle.info,
|
||||||
|
Tasks: taskList,
|
||||||
|
Enabled: handle.enabled,
|
||||||
MemUsedMin: handle.active.memUsedMin,
|
MemUsedMin: handle.active.memUsedMin,
|
||||||
MemUsedMax: handle.active.memUsedMax,
|
MemUsedMax: handle.active.memUsedMax,
|
||||||
GpuUsed: handle.active.gpuUsed,
|
GpuUsed: handle.active.gpuUsed,
|
||||||
@ -28,6 +43,15 @@ func (m *Manager) WorkerStats() map[uuid.UUID]storiface.WorkerStats {
|
|||||||
handle.lk.Unlock()
|
handle.lk.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for id, handle := range m.sched.workers {
|
||||||
|
cb(ctx, id, handle)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.sched.workersLk.RUnlock()
|
||||||
|
|
||||||
|
//list post workers
|
||||||
|
m.winningPoStSched.WorkerStats(ctx, cb)
|
||||||
|
m.windowPoStSched.WorkerStats(ctx, cb)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
30
extern/sector-storage/stores/http_handler.go
vendored
30
extern/sector-storage/stores/http_handler.go
vendored
@ -1,10 +1,12 @@
|
|||||||
package stores
|
package stores
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
@ -52,6 +54,7 @@ func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
mux := mux.NewRouter()
|
mux := mux.NewRouter()
|
||||||
|
|
||||||
mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET")
|
mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET")
|
||||||
|
mux.HandleFunc("/remote/vanilla/single", handler.generateSingleVanillaProof).Methods("POST")
|
||||||
mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET")
|
mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET")
|
||||||
mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET")
|
mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET")
|
||||||
mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE")
|
mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE")
|
||||||
@ -61,7 +64,7 @@ func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request) {
|
func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request) {
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
id := ID(vars["id"])
|
id := storiface.ID(vars["id"])
|
||||||
|
|
||||||
st, err := handler.Local.FsStat(r.Context(), id)
|
st, err := handler.Local.FsStat(r.Context(), id)
|
||||||
switch err {
|
switch err {
|
||||||
@ -172,7 +175,7 @@ func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.R
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := handler.Local.Remove(r.Context(), id, ft, false, ParseIDList(r.FormValue("keep"))); err != nil {
|
if err := handler.Local.Remove(r.Context(), id, ft, false, storiface.ParseIDList(r.FormValue("keep"))); err != nil {
|
||||||
log.Errorf("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
@ -286,6 +289,29 @@ func (handler *FetchHandler) remoteGetAllocated(w http.ResponseWriter, r *http.R
|
|||||||
w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
|
w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SingleVanillaParams struct {
|
||||||
|
Miner abi.ActorID
|
||||||
|
Sector storiface.PostSectorChallenge
|
||||||
|
ProofType abi.RegisteredPoStProof
|
||||||
|
}
|
||||||
|
|
||||||
|
func (handler *FetchHandler) generateSingleVanillaProof(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var params SingleVanillaParams
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
|
||||||
|
http.Error(w, err.Error(), 500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
vanilla, err := handler.Local.GenerateSingleVanillaProof(r.Context(), params.Miner, params.Sector, params.ProofType)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), 500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/octet-stream")
|
||||||
|
http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(vanilla))
|
||||||
|
}
|
||||||
|
|
||||||
func ftFromString(t string) (storiface.SectorFileType, error) {
|
func ftFromString(t string) (storiface.SectorFileType, error) {
|
||||||
switch t {
|
switch t {
|
||||||
case storiface.FTUnsealed.String():
|
case storiface.FTUnsealed.String():
|
||||||
|
162
extern/sector-storage/stores/index.go
vendored
162
extern/sector-storage/stores/index.go
vendored
@ -7,7 +7,6 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
gopath "path"
|
gopath "path"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -26,95 +25,34 @@ import (
|
|||||||
var HeartbeatInterval = 10 * time.Second
|
var HeartbeatInterval = 10 * time.Second
|
||||||
var SkippedHeartbeatThresh = HeartbeatInterval * 5
|
var SkippedHeartbeatThresh = HeartbeatInterval * 5
|
||||||
|
|
||||||
// ID identifies sector storage by UUID. One sector storage should map to one
|
|
||||||
// filesystem, local or networked / shared by multiple machines
|
|
||||||
type ID string
|
|
||||||
|
|
||||||
const IDSep = "."
|
|
||||||
|
|
||||||
type IDList []ID
|
|
||||||
|
|
||||||
func (il IDList) String() string {
|
|
||||||
l := make([]string, len(il))
|
|
||||||
for i, id := range il {
|
|
||||||
l[i] = string(id)
|
|
||||||
}
|
|
||||||
return strings.Join(l, IDSep)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParseIDList(s string) IDList {
|
|
||||||
strs := strings.Split(s, IDSep)
|
|
||||||
out := make([]ID, len(strs))
|
|
||||||
for i, str := range strs {
|
|
||||||
out[i] = ID(str)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
type Group = string
|
|
||||||
|
|
||||||
type StorageInfo struct {
|
|
||||||
ID ID
|
|
||||||
URLs []string // TODO: Support non-http transports
|
|
||||||
Weight uint64
|
|
||||||
MaxStorage uint64
|
|
||||||
|
|
||||||
CanSeal bool
|
|
||||||
CanStore bool
|
|
||||||
|
|
||||||
Groups []Group
|
|
||||||
AllowTo []Group
|
|
||||||
}
|
|
||||||
|
|
||||||
type HealthReport struct {
|
|
||||||
Stat fsutil.FsStat
|
|
||||||
Err string
|
|
||||||
}
|
|
||||||
|
|
||||||
type SectorStorageInfo struct {
|
|
||||||
ID ID
|
|
||||||
URLs []string // TODO: Support non-http transports
|
|
||||||
Weight uint64
|
|
||||||
|
|
||||||
CanSeal bool
|
|
||||||
CanStore bool
|
|
||||||
|
|
||||||
Primary bool
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/index.go -package=mocks . SectorIndex
|
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/index.go -package=mocks . SectorIndex
|
||||||
|
|
||||||
type SectorIndex interface { // part of storage-miner api
|
type SectorIndex interface { // part of storage-miner api
|
||||||
StorageAttach(context.Context, StorageInfo, fsutil.FsStat) error
|
StorageAttach(context.Context, storiface.StorageInfo, fsutil.FsStat) error
|
||||||
StorageInfo(context.Context, ID) (StorageInfo, error)
|
StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error)
|
||||||
StorageReportHealth(context.Context, ID, HealthReport) error
|
StorageReportHealth(context.Context, storiface.ID, storiface.HealthReport) error
|
||||||
|
|
||||||
StorageDeclareSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error
|
StorageDeclareSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error
|
||||||
StorageDropSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType) error
|
StorageDropSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType) error
|
||||||
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]SectorStorageInfo, error)
|
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error)
|
||||||
|
|
||||||
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]StorageInfo, error)
|
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]storiface.StorageInfo, error)
|
||||||
|
|
||||||
// atomically acquire locks on all sector file types. close ctx to unlock
|
// atomically acquire locks on all sector file types. close ctx to unlock
|
||||||
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error
|
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error
|
||||||
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error)
|
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error)
|
||||||
StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error)
|
StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error)
|
||||||
|
|
||||||
StorageList(ctx context.Context) (map[ID][]Decl, error)
|
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error)
|
||||||
}
|
|
||||||
|
|
||||||
type Decl struct {
|
|
||||||
abi.SectorID
|
|
||||||
storiface.SectorFileType
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type declMeta struct {
|
type declMeta struct {
|
||||||
storage ID
|
storage storiface.ID
|
||||||
primary bool
|
primary bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type storageEntry struct {
|
type storageEntry struct {
|
||||||
info *StorageInfo
|
info *storiface.StorageInfo
|
||||||
fsi fsutil.FsStat
|
fsi fsutil.FsStat
|
||||||
|
|
||||||
lastHeartbeat time.Time
|
lastHeartbeat time.Time
|
||||||
@ -125,8 +63,8 @@ type Index struct {
|
|||||||
*indexLocks
|
*indexLocks
|
||||||
lk sync.RWMutex
|
lk sync.RWMutex
|
||||||
|
|
||||||
sectors map[Decl][]*declMeta
|
sectors map[storiface.Decl][]*declMeta
|
||||||
stores map[ID]*storageEntry
|
stores map[storiface.ID]*storageEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewIndex() *Index {
|
func NewIndex() *Index {
|
||||||
@ -134,16 +72,16 @@ func NewIndex() *Index {
|
|||||||
indexLocks: &indexLocks{
|
indexLocks: &indexLocks{
|
||||||
locks: map[abi.SectorID]*sectorLock{},
|
locks: map[abi.SectorID]*sectorLock{},
|
||||||
},
|
},
|
||||||
sectors: map[Decl][]*declMeta{},
|
sectors: map[storiface.Decl][]*declMeta{},
|
||||||
stores: map[ID]*storageEntry{},
|
stores: map[storiface.ID]*storageEntry{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) {
|
func (i *Index) StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) {
|
||||||
i.lk.RLock()
|
i.lk.RLock()
|
||||||
defer i.lk.RUnlock()
|
defer i.lk.RUnlock()
|
||||||
|
|
||||||
byID := map[ID]map[abi.SectorID]storiface.SectorFileType{}
|
byID := map[storiface.ID]map[abi.SectorID]storiface.SectorFileType{}
|
||||||
|
|
||||||
for id := range i.stores {
|
for id := range i.stores {
|
||||||
byID[id] = map[abi.SectorID]storiface.SectorFileType{}
|
byID[id] = map[abi.SectorID]storiface.SectorFileType{}
|
||||||
@ -154,11 +92,11 @@ func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out := map[ID][]Decl{}
|
out := map[storiface.ID][]storiface.Decl{}
|
||||||
for id, m := range byID {
|
for id, m := range byID {
|
||||||
out[id] = []Decl{}
|
out[id] = []storiface.Decl{}
|
||||||
for sectorID, fileType := range m {
|
for sectorID, fileType := range m {
|
||||||
out[id] = append(out[id], Decl{
|
out[id] = append(out[id], storiface.Decl{
|
||||||
SectorID: sectorID,
|
SectorID: sectorID,
|
||||||
SectorFileType: fileType,
|
SectorFileType: fileType,
|
||||||
})
|
})
|
||||||
@ -168,7 +106,7 @@ func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) {
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsStat) error {
|
func (i *Index) StorageAttach(ctx context.Context, si storiface.StorageInfo, st fsutil.FsStat) error {
|
||||||
i.lk.Lock()
|
i.lk.Lock()
|
||||||
defer i.lk.Unlock()
|
defer i.lk.Unlock()
|
||||||
|
|
||||||
@ -210,7 +148,7 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsS
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageReportHealth(ctx context.Context, id ID, report HealthReport) error {
|
func (i *Index) StorageReportHealth(ctx context.Context, id storiface.ID, report storiface.HealthReport) error {
|
||||||
i.lk.Lock()
|
i.lk.Lock()
|
||||||
defer i.lk.Unlock()
|
defer i.lk.Unlock()
|
||||||
|
|
||||||
@ -249,7 +187,7 @@ func (i *Index) StorageReportHealth(ctx context.Context, id ID, report HealthRep
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageDeclareSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error {
|
func (i *Index) StorageDeclareSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error {
|
||||||
i.lk.Lock()
|
i.lk.Lock()
|
||||||
defer i.lk.Unlock()
|
defer i.lk.Unlock()
|
||||||
|
|
||||||
@ -259,7 +197,7 @@ loop:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
d := Decl{s, fileType}
|
d := storiface.Decl{SectorID: s, SectorFileType: fileType}
|
||||||
|
|
||||||
for _, sid := range i.sectors[d] {
|
for _, sid := range i.sectors[d] {
|
||||||
if sid.storage == storageID {
|
if sid.storage == storageID {
|
||||||
@ -281,7 +219,7 @@ loop:
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType) error {
|
func (i *Index) StorageDropSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType) error {
|
||||||
i.lk.Lock()
|
i.lk.Lock()
|
||||||
defer i.lk.Unlock()
|
defer i.lk.Unlock()
|
||||||
|
|
||||||
@ -290,7 +228,7 @@ func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.Secto
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
d := Decl{s, fileType}
|
d := storiface.Decl{SectorID: s, SectorFileType: fileType}
|
||||||
|
|
||||||
if len(i.sectors[d]) == 0 {
|
if len(i.sectors[d]) == 0 {
|
||||||
continue
|
continue
|
||||||
@ -315,27 +253,27 @@ func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.Secto
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]SectorStorageInfo, error) {
|
func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error) {
|
||||||
i.lk.RLock()
|
i.lk.RLock()
|
||||||
defer i.lk.RUnlock()
|
defer i.lk.RUnlock()
|
||||||
|
|
||||||
storageIDs := map[ID]uint64{}
|
storageIDs := map[storiface.ID]uint64{}
|
||||||
isprimary := map[ID]bool{}
|
isprimary := map[storiface.ID]bool{}
|
||||||
|
|
||||||
allowTo := map[Group]struct{}{}
|
allowTo := map[storiface.Group]struct{}{}
|
||||||
|
|
||||||
for _, pathType := range storiface.PathTypes {
|
for _, pathType := range storiface.PathTypes {
|
||||||
if ft&pathType == 0 {
|
if ft&pathType == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, id := range i.sectors[Decl{s, pathType}] {
|
for _, id := range i.sectors[storiface.Decl{SectorID: s, SectorFileType: pathType}] {
|
||||||
storageIDs[id.storage]++
|
storageIDs[id.storage]++
|
||||||
isprimary[id.storage] = isprimary[id.storage] || id.primary
|
isprimary[id.storage] = isprimary[id.storage] || id.primary
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out := make([]SectorStorageInfo, 0, len(storageIDs))
|
out := make([]storiface.SectorStorageInfo, 0, len(storageIDs))
|
||||||
|
|
||||||
for id, n := range storageIDs {
|
for id, n := range storageIDs {
|
||||||
st, ok := i.stores[id]
|
st, ok := i.stores[id]
|
||||||
@ -344,7 +282,7 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
urls := make([]string, len(st.info.URLs))
|
urls, burls := make([]string, len(st.info.URLs)), make([]string, len(st.info.URLs))
|
||||||
for k, u := range st.info.URLs {
|
for k, u := range st.info.URLs {
|
||||||
rl, err := url.Parse(u)
|
rl, err := url.Parse(u)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -353,6 +291,7 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif
|
|||||||
|
|
||||||
rl.Path = gopath.Join(rl.Path, ft.String(), storiface.SectorName(s))
|
rl.Path = gopath.Join(rl.Path, ft.String(), storiface.SectorName(s))
|
||||||
urls[k] = rl.String()
|
urls[k] = rl.String()
|
||||||
|
burls[k] = u
|
||||||
}
|
}
|
||||||
|
|
||||||
if allowTo != nil && len(st.info.AllowTo) > 0 {
|
if allowTo != nil && len(st.info.AllowTo) > 0 {
|
||||||
@ -363,10 +302,11 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif
|
|||||||
allowTo = nil // allow to any
|
allowTo = nil // allow to any
|
||||||
}
|
}
|
||||||
|
|
||||||
out = append(out, SectorStorageInfo{
|
out = append(out, storiface.SectorStorageInfo{
|
||||||
ID: id,
|
ID: id,
|
||||||
URLs: urls,
|
URLs: urls,
|
||||||
Weight: st.info.Weight * n, // storage with more sector types is better
|
BaseURLs: burls,
|
||||||
|
Weight: st.info.Weight * n, // storage with more sector types is better
|
||||||
|
|
||||||
CanSeal: st.info.CanSeal,
|
CanSeal: st.info.CanSeal,
|
||||||
CanStore: st.info.CanStore,
|
CanStore: st.info.CanStore,
|
||||||
@ -421,7 +361,7 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
urls := make([]string, len(st.info.URLs))
|
urls, burls := make([]string, len(st.info.URLs)), make([]string, len(st.info.URLs))
|
||||||
for k, u := range st.info.URLs {
|
for k, u := range st.info.URLs {
|
||||||
rl, err := url.Parse(u)
|
rl, err := url.Parse(u)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -430,12 +370,14 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif
|
|||||||
|
|
||||||
rl.Path = gopath.Join(rl.Path, ft.String(), storiface.SectorName(s))
|
rl.Path = gopath.Join(rl.Path, ft.String(), storiface.SectorName(s))
|
||||||
urls[k] = rl.String()
|
urls[k] = rl.String()
|
||||||
|
burls[k] = u
|
||||||
}
|
}
|
||||||
|
|
||||||
out = append(out, SectorStorageInfo{
|
out = append(out, storiface.SectorStorageInfo{
|
||||||
ID: id,
|
ID: id,
|
||||||
URLs: urls,
|
URLs: urls,
|
||||||
Weight: st.info.Weight * 0, // TODO: something better than just '0'
|
BaseURLs: burls,
|
||||||
|
Weight: st.info.Weight * 0, // TODO: something better than just '0'
|
||||||
|
|
||||||
CanSeal: st.info.CanSeal,
|
CanSeal: st.info.CanSeal,
|
||||||
CanStore: st.info.CanStore,
|
CanStore: st.info.CanStore,
|
||||||
@ -448,19 +390,19 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageInfo(ctx context.Context, id ID) (StorageInfo, error) {
|
func (i *Index) StorageInfo(ctx context.Context, id storiface.ID) (storiface.StorageInfo, error) {
|
||||||
i.lk.RLock()
|
i.lk.RLock()
|
||||||
defer i.lk.RUnlock()
|
defer i.lk.RUnlock()
|
||||||
|
|
||||||
si, found := i.stores[id]
|
si, found := i.stores[id]
|
||||||
if !found {
|
if !found {
|
||||||
return StorageInfo{}, xerrors.Errorf("sector store not found")
|
return storiface.StorageInfo{}, xerrors.Errorf("sector store not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
return *si.info, nil
|
return *si.info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]StorageInfo, error) {
|
func (i *Index) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]storiface.StorageInfo, error) {
|
||||||
i.lk.RLock()
|
i.lk.RLock()
|
||||||
defer i.lk.RUnlock()
|
defer i.lk.RUnlock()
|
||||||
|
|
||||||
@ -517,7 +459,7 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate storiface.SectorF
|
|||||||
return iw.GreaterThan(jw)
|
return iw.GreaterThan(jw)
|
||||||
})
|
})
|
||||||
|
|
||||||
out := make([]StorageInfo, len(candidates))
|
out := make([]storiface.StorageInfo, len(candidates))
|
||||||
for i, candidate := range candidates {
|
for i, candidate := range candidates {
|
||||||
out[i] = *candidate.info
|
out[i] = *candidate.info
|
||||||
}
|
}
|
||||||
@ -525,18 +467,18 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate storiface.SectorF
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) FindSector(id abi.SectorID, typ storiface.SectorFileType) ([]ID, error) {
|
func (i *Index) FindSector(id abi.SectorID, typ storiface.SectorFileType) ([]storiface.ID, error) {
|
||||||
i.lk.RLock()
|
i.lk.RLock()
|
||||||
defer i.lk.RUnlock()
|
defer i.lk.RUnlock()
|
||||||
|
|
||||||
f, ok := i.sectors[Decl{
|
f, ok := i.sectors[storiface.Decl{
|
||||||
SectorID: id,
|
SectorID: id,
|
||||||
SectorFileType: typ,
|
SectorFileType: typ,
|
||||||
}]
|
}]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
out := make([]ID, 0, len(f))
|
out := make([]storiface.ID, 0, len(f))
|
||||||
for _, meta := range f {
|
for _, meta := range f {
|
||||||
out = append(out, meta.storage)
|
out = append(out, meta.storage)
|
||||||
}
|
}
|
||||||
|
14
extern/sector-storage/stores/index_test.go
vendored
14
extern/sector-storage/stores/index_test.go
vendored
@ -18,9 +18,9 @@ func init() {
|
|||||||
logging.SetLogLevel("stores", "DEBUG")
|
logging.SetLogLevel("stores", "DEBUG")
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestStorage() StorageInfo {
|
func newTestStorage() storiface.StorageInfo {
|
||||||
return StorageInfo{
|
return storiface.StorageInfo{
|
||||||
ID: ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
CanSeal: true,
|
CanSeal: true,
|
||||||
CanStore: true,
|
CanStore: true,
|
||||||
Groups: nil,
|
Groups: nil,
|
||||||
@ -81,7 +81,7 @@ func TestFindNoAllow(t *testing.T) {
|
|||||||
|
|
||||||
i := NewIndex()
|
i := NewIndex()
|
||||||
stor1 := newTestStorage()
|
stor1 := newTestStorage()
|
||||||
stor1.AllowTo = []Group{"grp1"}
|
stor1.AllowTo = []storiface.Group{"grp1"}
|
||||||
stor2 := newTestStorage()
|
stor2 := newTestStorage()
|
||||||
|
|
||||||
require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat))
|
require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat))
|
||||||
@ -114,13 +114,13 @@ func TestFindAllow(t *testing.T) {
|
|||||||
i := NewIndex()
|
i := NewIndex()
|
||||||
|
|
||||||
stor1 := newTestStorage()
|
stor1 := newTestStorage()
|
||||||
stor1.AllowTo = []Group{"grp1"}
|
stor1.AllowTo = []storiface.Group{"grp1"}
|
||||||
|
|
||||||
stor2 := newTestStorage()
|
stor2 := newTestStorage()
|
||||||
stor2.Groups = []Group{"grp1"}
|
stor2.Groups = []storiface.Group{"grp1"}
|
||||||
|
|
||||||
stor3 := newTestStorage()
|
stor3 := newTestStorage()
|
||||||
stor3.Groups = []Group{"grp2"}
|
stor3.Groups = []storiface.Group{"grp2"}
|
||||||
|
|
||||||
require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat))
|
require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat))
|
||||||
require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat))
|
require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat))
|
||||||
|
9
extern/sector-storage/stores/interface.go
vendored
9
extern/sector-storage/stores/interface.go
vendored
@ -5,11 +5,10 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/partialfile"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -36,7 +35,7 @@ type PartialFileHandler interface {
|
|||||||
|
|
||||||
type Store interface {
|
type Store interface {
|
||||||
AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error)
|
AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error)
|
||||||
Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool, keepIn []ID) error
|
Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool, keepIn []storiface.ID) error
|
||||||
|
|
||||||
// like remove, but doesn't remove the primary sector copy, nor the last
|
// like remove, but doesn't remove the primary sector copy, nor the last
|
||||||
// non-primary copy if there no primary copies
|
// non-primary copy if there no primary copies
|
||||||
@ -45,7 +44,9 @@ type Store interface {
|
|||||||
// move sectors into storage
|
// move sectors into storage
|
||||||
MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error
|
MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error
|
||||||
|
|
||||||
FsStat(ctx context.Context, id ID) (fsutil.FsStat, error)
|
FsStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error)
|
||||||
|
|
||||||
Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error)
|
Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error)
|
||||||
|
|
||||||
|
GenerateSingleVanillaProof(ctx context.Context, minerID abi.ActorID, si storiface.PostSectorChallenge, ppt abi.RegisteredPoStProof) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
97
extern/sector-storage/stores/local.go
vendored
97
extern/sector-storage/stores/local.go
vendored
@ -13,26 +13,18 @@ import (
|
|||||||
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StoragePath struct {
|
|
||||||
ID ID
|
|
||||||
Weight uint64
|
|
||||||
|
|
||||||
LocalPath string
|
|
||||||
|
|
||||||
CanSeal bool
|
|
||||||
CanStore bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// LocalStorageMeta [path]/sectorstore.json
|
// LocalStorageMeta [path]/sectorstore.json
|
||||||
type LocalStorageMeta struct {
|
type LocalStorageMeta struct {
|
||||||
ID ID
|
ID storiface.ID
|
||||||
|
|
||||||
// A high weight means data is more likely to be stored in this path
|
// A high weight means data is more likely to be stored in this path
|
||||||
Weight uint64 // 0 = readonly
|
Weight uint64 // 0 = readonly
|
||||||
@ -82,7 +74,7 @@ type Local struct {
|
|||||||
index SectorIndex
|
index SectorIndex
|
||||||
urls []string
|
urls []string
|
||||||
|
|
||||||
paths map[ID]*path
|
paths map[storiface.ID]*path
|
||||||
|
|
||||||
localLk sync.RWMutex
|
localLk sync.RWMutex
|
||||||
}
|
}
|
||||||
@ -178,7 +170,7 @@ func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []st
|
|||||||
index: index,
|
index: index,
|
||||||
urls: urls,
|
urls: urls,
|
||||||
|
|
||||||
paths: map[ID]*path{},
|
paths: map[storiface.ID]*path{},
|
||||||
}
|
}
|
||||||
return l, l.open(ctx)
|
return l, l.open(ctx)
|
||||||
}
|
}
|
||||||
@ -212,7 +204,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = st.index.StorageAttach(ctx, StorageInfo{
|
err = st.index.StorageAttach(ctx, storiface.StorageInfo{
|
||||||
ID: meta.ID,
|
ID: meta.ID,
|
||||||
URLs: st.urls,
|
URLs: st.urls,
|
||||||
Weight: meta.Weight,
|
Weight: meta.Weight,
|
||||||
@ -278,7 +270,7 @@ func (st *Local) Redeclare(ctx context.Context) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
err = st.index.StorageAttach(ctx, StorageInfo{
|
err = st.index.StorageAttach(ctx, storiface.StorageInfo{
|
||||||
ID: id,
|
ID: id,
|
||||||
URLs: st.urls,
|
URLs: st.urls,
|
||||||
Weight: meta.Weight,
|
Weight: meta.Weight,
|
||||||
@ -300,7 +292,7 @@ func (st *Local) Redeclare(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *Local) declareSectors(ctx context.Context, p string, id ID, primary bool) error {
|
func (st *Local) declareSectors(ctx context.Context, p string, id storiface.ID, primary bool) error {
|
||||||
for _, t := range storiface.PathTypes {
|
for _, t := range storiface.PathTypes {
|
||||||
ents, err := ioutil.ReadDir(filepath.Join(p, t.String()))
|
ents, err := ioutil.ReadDir(filepath.Join(p, t.String()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -351,10 +343,10 @@ func (st *Local) reportHealth(ctx context.Context) {
|
|||||||
func (st *Local) reportStorage(ctx context.Context) {
|
func (st *Local) reportStorage(ctx context.Context) {
|
||||||
st.localLk.RLock()
|
st.localLk.RLock()
|
||||||
|
|
||||||
toReport := map[ID]HealthReport{}
|
toReport := map[storiface.ID]storiface.HealthReport{}
|
||||||
for id, p := range st.paths {
|
for id, p := range st.paths {
|
||||||
stat, err := p.stat(st.localStorage)
|
stat, err := p.stat(st.localStorage)
|
||||||
r := HealthReport{Stat: stat}
|
r := storiface.HealthReport{Stat: stat}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Err = err.Error()
|
r.Err = err.Error()
|
||||||
}
|
}
|
||||||
@ -391,7 +383,7 @@ func (st *Local) Reserve(ctx context.Context, sid storage.SectorRef, ft storifac
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
id := ID(storiface.PathByType(storageIDs, fileType))
|
id := storiface.ID(storiface.PathByType(storageIDs, fileType))
|
||||||
|
|
||||||
p, ok := st.paths[id]
|
p, ok := st.paths[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -489,7 +481,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid storage.SectorRef, exist
|
|||||||
}
|
}
|
||||||
|
|
||||||
var best string
|
var best string
|
||||||
var bestID ID
|
var bestID storiface.ID
|
||||||
|
|
||||||
for _, si := range sis {
|
for _, si := range sis {
|
||||||
p, ok := st.paths[si.ID]
|
p, ok := st.paths[si.ID]
|
||||||
@ -528,11 +520,11 @@ func (st *Local) AcquireSector(ctx context.Context, sid storage.SectorRef, exist
|
|||||||
return out, storageIDs, nil
|
return out, storageIDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *Local) Local(ctx context.Context) ([]StoragePath, error) {
|
func (st *Local) Local(ctx context.Context) ([]storiface.StoragePath, error) {
|
||||||
st.localLk.RLock()
|
st.localLk.RLock()
|
||||||
defer st.localLk.RUnlock()
|
defer st.localLk.RUnlock()
|
||||||
|
|
||||||
var out []StoragePath
|
var out []storiface.StoragePath
|
||||||
for id, p := range st.paths {
|
for id, p := range st.paths {
|
||||||
if p.local == "" {
|
if p.local == "" {
|
||||||
continue
|
continue
|
||||||
@ -543,7 +535,7 @@ func (st *Local) Local(ctx context.Context) ([]StoragePath, error) {
|
|||||||
return nil, xerrors.Errorf("get storage info for %s: %w", id, err)
|
return nil, xerrors.Errorf("get storage info for %s: %w", id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
out = append(out, StoragePath{
|
out = append(out, storiface.StoragePath{
|
||||||
ID: id,
|
ID: id,
|
||||||
Weight: si.Weight,
|
Weight: si.Weight,
|
||||||
LocalPath: p.local,
|
LocalPath: p.local,
|
||||||
@ -555,7 +547,7 @@ func (st *Local) Local(ctx context.Context) ([]StoragePath, error) {
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool, keepIn []ID) error {
|
func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool, keepIn []storiface.ID) error {
|
||||||
if bits.OnesCount(uint(typ)) != 1 {
|
if bits.OnesCount(uint(typ)) != 1 {
|
||||||
return xerrors.New("delete expects one file type")
|
return xerrors.New("delete expects one file type")
|
||||||
}
|
}
|
||||||
@ -621,7 +613,7 @@ func (st *Local) RemoveCopies(ctx context.Context, sid abi.SectorID, typ storifa
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, storage ID) error {
|
func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, storage storiface.ID) error {
|
||||||
p, ok := st.paths[storage]
|
p, ok := st.paths[storage]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
@ -663,12 +655,12 @@ func (st *Local) MoveStorage(ctx context.Context, s storage.SectorRef, types sto
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
sst, err := st.index.StorageInfo(ctx, ID(storiface.PathByType(srcIds, fileType)))
|
sst, err := st.index.StorageInfo(ctx, storiface.ID(storiface.PathByType(srcIds, fileType)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to get source storage info: %w", err)
|
return xerrors.Errorf("failed to get source storage info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dst, err := st.index.StorageInfo(ctx, ID(storiface.PathByType(destIds, fileType)))
|
dst, err := st.index.StorageInfo(ctx, storiface.ID(storiface.PathByType(destIds, fileType)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to get source storage info: %w", err)
|
return xerrors.Errorf("failed to get source storage info: %w", err)
|
||||||
}
|
}
|
||||||
@ -685,7 +677,7 @@ func (st *Local) MoveStorage(ctx context.Context, s storage.SectorRef, types sto
|
|||||||
|
|
||||||
log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore)
|
log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore)
|
||||||
|
|
||||||
if err := st.index.StorageDropSector(ctx, ID(storiface.PathByType(srcIds, fileType)), s.ID, fileType); err != nil {
|
if err := st.index.StorageDropSector(ctx, storiface.ID(storiface.PathByType(srcIds, fileType)), s.ID, fileType); err != nil {
|
||||||
return xerrors.Errorf("dropping source sector from index: %w", err)
|
return xerrors.Errorf("dropping source sector from index: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -694,8 +686,8 @@ func (st *Local) MoveStorage(ctx context.Context, s storage.SectorRef, types sto
|
|||||||
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
|
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := st.index.StorageDeclareSector(ctx, ID(storiface.PathByType(destIds, fileType)), s.ID, fileType, true); err != nil {
|
if err := st.index.StorageDeclareSector(ctx, storiface.ID(storiface.PathByType(destIds, fileType)), s.ID, fileType, true); err != nil {
|
||||||
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(storiface.PathByType(destIds, fileType)), err)
|
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, storiface.ID(storiface.PathByType(destIds, fileType)), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -706,7 +698,7 @@ func (st *Local) MoveStorage(ctx context.Context, s storage.SectorRef, types sto
|
|||||||
|
|
||||||
var errPathNotFound = xerrors.Errorf("fsstat: path not found")
|
var errPathNotFound = xerrors.Errorf("fsstat: path not found")
|
||||||
|
|
||||||
func (st *Local) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) {
|
func (st *Local) FsStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) {
|
||||||
st.localLk.RLock()
|
st.localLk.RLock()
|
||||||
defer st.localLk.RUnlock()
|
defer st.localLk.RUnlock()
|
||||||
|
|
||||||
@ -718,4 +710,47 @@ func (st *Local) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) {
|
|||||||
return p.stat(st.localStorage)
|
return p.stat(st.localStorage)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (st *Local) GenerateSingleVanillaProof(ctx context.Context, minerID abi.ActorID, si storiface.PostSectorChallenge, ppt abi.RegisteredPoStProof) ([]byte, error) {
|
||||||
|
sr := storage.SectorRef{
|
||||||
|
ID: abi.SectorID{
|
||||||
|
Miner: minerID,
|
||||||
|
Number: si.SectorNumber,
|
||||||
|
},
|
||||||
|
ProofType: si.SealProof,
|
||||||
|
}
|
||||||
|
|
||||||
|
var cache string
|
||||||
|
var sealed string
|
||||||
|
if si.Update {
|
||||||
|
src, _, err := st.AcquireSector(ctx, sr, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("acquire sector: %w", err)
|
||||||
|
}
|
||||||
|
cache, sealed = src.UpdateCache, src.Update
|
||||||
|
} else {
|
||||||
|
src, _, err := st.AcquireSector(ctx, sr, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("acquire sector: %w", err)
|
||||||
|
}
|
||||||
|
cache, sealed = src.Cache, src.Sealed
|
||||||
|
}
|
||||||
|
|
||||||
|
if sealed == "" || cache == "" {
|
||||||
|
return nil, errPathNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
psi := ffi.PrivateSectorInfo{
|
||||||
|
SectorInfo: proof.SectorInfo{
|
||||||
|
SealProof: si.SealProof,
|
||||||
|
SectorNumber: si.SectorNumber,
|
||||||
|
SealedCID: si.SealedCID,
|
||||||
|
},
|
||||||
|
CacheDirPath: cache,
|
||||||
|
PoStProofType: ppt,
|
||||||
|
SealedSectorPath: sealed,
|
||||||
|
}
|
||||||
|
|
||||||
|
return ffi.GenerateSingleVanillaProof(psi, si.Challenge)
|
||||||
|
}
|
||||||
|
|
||||||
var _ Store = &Local{}
|
var _ Store = &Local{}
|
||||||
|
3
extern/sector-storage/stores/local_test.go
vendored
3
extern/sector-storage/stores/local_test.go
vendored
@ -9,6 +9,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@ -51,7 +52,7 @@ func (t *TestingLocalStorage) init(subpath string) error {
|
|||||||
metaFile := filepath.Join(path, MetaFile)
|
metaFile := filepath.Join(path, MetaFile)
|
||||||
|
|
||||||
meta := &LocalStorageMeta{
|
meta := &LocalStorageMeta{
|
||||||
ID: ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
Weight: 1,
|
Weight: 1,
|
||||||
CanSeal: true,
|
CanSeal: true,
|
||||||
CanStore: true,
|
CanStore: true,
|
||||||
|
25
extern/sector-storage/stores/mocks/index.go
vendored
25
extern/sector-storage/stores/mocks/index.go
vendored
@ -10,7 +10,6 @@ import (
|
|||||||
|
|
||||||
abi "github.com/filecoin-project/go-state-types/abi"
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
fsutil "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
fsutil "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
stores "github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
storiface "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
storiface "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
)
|
)
|
||||||
@ -39,7 +38,7 @@ func (m *MockSectorIndex) EXPECT() *MockSectorIndexMockRecorder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StorageAttach mocks base method.
|
// StorageAttach mocks base method.
|
||||||
func (m *MockSectorIndex) StorageAttach(arg0 context.Context, arg1 stores.StorageInfo, arg2 fsutil.FsStat) error {
|
func (m *MockSectorIndex) StorageAttach(arg0 context.Context, arg1 storiface.StorageInfo, arg2 fsutil.FsStat) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StorageAttach", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "StorageAttach", arg0, arg1, arg2)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
@ -53,10 +52,10 @@ func (mr *MockSectorIndexMockRecorder) StorageAttach(arg0, arg1, arg2 interface{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StorageBestAlloc mocks base method.
|
// StorageBestAlloc mocks base method.
|
||||||
func (m *MockSectorIndex) StorageBestAlloc(arg0 context.Context, arg1 storiface.SectorFileType, arg2 abi.SectorSize, arg3 storiface.PathType) ([]stores.StorageInfo, error) {
|
func (m *MockSectorIndex) StorageBestAlloc(arg0 context.Context, arg1 storiface.SectorFileType, arg2 abi.SectorSize, arg3 storiface.PathType) ([]storiface.StorageInfo, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StorageBestAlloc", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StorageBestAlloc", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].([]stores.StorageInfo)
|
ret0, _ := ret[0].([]storiface.StorageInfo)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -68,7 +67,7 @@ func (mr *MockSectorIndexMockRecorder) StorageBestAlloc(arg0, arg1, arg2, arg3 i
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StorageDeclareSector mocks base method.
|
// StorageDeclareSector mocks base method.
|
||||||
func (m *MockSectorIndex) StorageDeclareSector(arg0 context.Context, arg1 stores.ID, arg2 abi.SectorID, arg3 storiface.SectorFileType, arg4 bool) error {
|
func (m *MockSectorIndex) StorageDeclareSector(arg0 context.Context, arg1 storiface.ID, arg2 abi.SectorID, arg3 storiface.SectorFileType, arg4 bool) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StorageDeclareSector", arg0, arg1, arg2, arg3, arg4)
|
ret := m.ctrl.Call(m, "StorageDeclareSector", arg0, arg1, arg2, arg3, arg4)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
@ -82,7 +81,7 @@ func (mr *MockSectorIndexMockRecorder) StorageDeclareSector(arg0, arg1, arg2, ar
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StorageDropSector mocks base method.
|
// StorageDropSector mocks base method.
|
||||||
func (m *MockSectorIndex) StorageDropSector(arg0 context.Context, arg1 stores.ID, arg2 abi.SectorID, arg3 storiface.SectorFileType) error {
|
func (m *MockSectorIndex) StorageDropSector(arg0 context.Context, arg1 storiface.ID, arg2 abi.SectorID, arg3 storiface.SectorFileType) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StorageDropSector", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StorageDropSector", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
@ -96,10 +95,10 @@ func (mr *MockSectorIndexMockRecorder) StorageDropSector(arg0, arg1, arg2, arg3
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StorageFindSector mocks base method.
|
// StorageFindSector mocks base method.
|
||||||
func (m *MockSectorIndex) StorageFindSector(arg0 context.Context, arg1 abi.SectorID, arg2 storiface.SectorFileType, arg3 abi.SectorSize, arg4 bool) ([]stores.SectorStorageInfo, error) {
|
func (m *MockSectorIndex) StorageFindSector(arg0 context.Context, arg1 abi.SectorID, arg2 storiface.SectorFileType, arg3 abi.SectorSize, arg4 bool) ([]storiface.SectorStorageInfo, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StorageFindSector", arg0, arg1, arg2, arg3, arg4)
|
ret := m.ctrl.Call(m, "StorageFindSector", arg0, arg1, arg2, arg3, arg4)
|
||||||
ret0, _ := ret[0].([]stores.SectorStorageInfo)
|
ret0, _ := ret[0].([]storiface.SectorStorageInfo)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -126,10 +125,10 @@ func (mr *MockSectorIndexMockRecorder) StorageGetLocks(arg0 interface{}) *gomock
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StorageInfo mocks base method.
|
// StorageInfo mocks base method.
|
||||||
func (m *MockSectorIndex) StorageInfo(arg0 context.Context, arg1 stores.ID) (stores.StorageInfo, error) {
|
func (m *MockSectorIndex) StorageInfo(arg0 context.Context, arg1 storiface.ID) (storiface.StorageInfo, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StorageInfo", arg0, arg1)
|
ret := m.ctrl.Call(m, "StorageInfo", arg0, arg1)
|
||||||
ret0, _ := ret[0].(stores.StorageInfo)
|
ret0, _ := ret[0].(storiface.StorageInfo)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -141,10 +140,10 @@ func (mr *MockSectorIndexMockRecorder) StorageInfo(arg0, arg1 interface{}) *gomo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StorageList mocks base method.
|
// StorageList mocks base method.
|
||||||
func (m *MockSectorIndex) StorageList(arg0 context.Context) (map[stores.ID][]stores.Decl, error) {
|
func (m *MockSectorIndex) StorageList(arg0 context.Context) (map[storiface.ID][]storiface.Decl, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StorageList", arg0)
|
ret := m.ctrl.Call(m, "StorageList", arg0)
|
||||||
ret0, _ := ret[0].(map[stores.ID][]stores.Decl)
|
ret0, _ := ret[0].(map[storiface.ID][]storiface.Decl)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -170,7 +169,7 @@ func (mr *MockSectorIndexMockRecorder) StorageLock(arg0, arg1, arg2, arg3 interf
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StorageReportHealth mocks base method.
|
// StorageReportHealth mocks base method.
|
||||||
func (m *MockSectorIndex) StorageReportHealth(arg0 context.Context, arg1 stores.ID, arg2 stores.HealthReport) error {
|
func (m *MockSectorIndex) StorageReportHealth(arg0 context.Context, arg1 storiface.ID, arg2 storiface.HealthReport) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StorageReportHealth", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "StorageReportHealth", arg0, arg1, arg2)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
|
20
extern/sector-storage/stores/mocks/store.go
vendored
20
extern/sector-storage/stores/mocks/store.go
vendored
@ -10,7 +10,6 @@ import (
|
|||||||
|
|
||||||
abi "github.com/filecoin-project/go-state-types/abi"
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
fsutil "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
fsutil "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
stores "github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
storiface "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
storiface "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
storage "github.com/filecoin-project/specs-storage/storage"
|
storage "github.com/filecoin-project/specs-storage/storage"
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
@ -56,7 +55,7 @@ func (mr *MockStoreMockRecorder) AcquireSector(arg0, arg1, arg2, arg3, arg4, arg
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FsStat mocks base method.
|
// FsStat mocks base method.
|
||||||
func (m *MockStore) FsStat(arg0 context.Context, arg1 stores.ID) (fsutil.FsStat, error) {
|
func (m *MockStore) FsStat(arg0 context.Context, arg1 storiface.ID) (fsutil.FsStat, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "FsStat", arg0, arg1)
|
ret := m.ctrl.Call(m, "FsStat", arg0, arg1)
|
||||||
ret0, _ := ret[0].(fsutil.FsStat)
|
ret0, _ := ret[0].(fsutil.FsStat)
|
||||||
@ -70,6 +69,21 @@ func (mr *MockStoreMockRecorder) FsStat(arg0, arg1 interface{}) *gomock.Call {
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FsStat", reflect.TypeOf((*MockStore)(nil).FsStat), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FsStat", reflect.TypeOf((*MockStore)(nil).FsStat), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GenerateSingleVanillaProof mocks base method.
|
||||||
|
func (m *MockStore) GenerateSingleVanillaProof(arg0 context.Context, arg1 abi.ActorID, arg2 storiface.PostSectorChallenge, arg3 abi.RegisteredPoStProof) ([]byte, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GenerateSingleVanillaProof", arg0, arg1, arg2, arg3)
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateSingleVanillaProof indicates an expected call of GenerateSingleVanillaProof.
|
||||||
|
func (mr *MockStoreMockRecorder) GenerateSingleVanillaProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateSingleVanillaProof", reflect.TypeOf((*MockStore)(nil).GenerateSingleVanillaProof), arg0, arg1, arg2, arg3)
|
||||||
|
}
|
||||||
|
|
||||||
// MoveStorage mocks base method.
|
// MoveStorage mocks base method.
|
||||||
func (m *MockStore) MoveStorage(arg0 context.Context, arg1 storage.SectorRef, arg2 storiface.SectorFileType) error {
|
func (m *MockStore) MoveStorage(arg0 context.Context, arg1 storage.SectorRef, arg2 storiface.SectorFileType) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
@ -85,7 +99,7 @@ func (mr *MockStoreMockRecorder) MoveStorage(arg0, arg1, arg2 interface{}) *gomo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove mocks base method.
|
// Remove mocks base method.
|
||||||
func (m *MockStore) Remove(arg0 context.Context, arg1 abi.SectorID, arg2 storiface.SectorFileType, arg3 bool, arg4 []stores.ID) error {
|
func (m *MockStore) Remove(arg0 context.Context, arg1 abi.SectorID, arg2 storiface.SectorFileType, arg3 bool, arg4 []storiface.ID) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Remove", arg0, arg1, arg2, arg3, arg4)
|
ret := m.ctrl.Call(m, "Remove", arg0, arg1, arg2, arg3, arg4)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
|
98
extern/sector-storage/stores/remote.go
vendored
98
extern/sector-storage/stores/remote.go
vendored
@ -14,6 +14,7 @@ import (
|
|||||||
gopath "path"
|
gopath "path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
@ -59,7 +60,7 @@ func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, typ storiface
|
|||||||
}
|
}
|
||||||
|
|
||||||
var hasPrimary bool
|
var hasPrimary bool
|
||||||
var keep []ID
|
var keep []storiface.ID
|
||||||
for _, info := range si {
|
for _, info := range si {
|
||||||
if info.Primary {
|
if info.Primary {
|
||||||
hasPrimary = true
|
hasPrimary = true
|
||||||
@ -173,14 +174,14 @@ func (r *Remote) AcquireSector(ctx context.Context, s storage.SectorRef, existin
|
|||||||
storiface.SetPathByType(&paths, fileType, dest)
|
storiface.SetPathByType(&paths, fileType, dest)
|
||||||
storiface.SetPathByType(&stores, fileType, storageID)
|
storiface.SetPathByType(&stores, fileType, storageID)
|
||||||
|
|
||||||
if err := r.index.StorageDeclareSector(ctx, ID(storageID), s.ID, fileType, op == storiface.AcquireMove); err != nil {
|
if err := r.index.StorageDeclareSector(ctx, storiface.ID(storageID), s.ID, fileType, op == storiface.AcquireMove); err != nil {
|
||||||
log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err)
|
log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if op == storiface.AcquireMove {
|
if op == storiface.AcquireMove {
|
||||||
id := ID(storageID)
|
id := storiface.ID(storageID)
|
||||||
if err := r.deleteFromRemote(ctx, url, []ID{id}); err != nil {
|
if err := r.deleteFromRemote(ctx, url, []storiface.ID{id}); err != nil {
|
||||||
log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err)
|
log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -357,7 +358,7 @@ func (r *Remote) MoveStorage(ctx context.Context, s storage.SectorRef, types sto
|
|||||||
return r.local.MoveStorage(ctx, s, types)
|
return r.local.MoveStorage(ctx, s, types)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool, keepIn []ID) error {
|
func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool, keepIn []storiface.ID) error {
|
||||||
if bits.OnesCount(uint(typ)) != 1 {
|
if bits.OnesCount(uint(typ)) != 1 {
|
||||||
return xerrors.New("delete expects one file type")
|
return xerrors.New("delete expects one file type")
|
||||||
}
|
}
|
||||||
@ -390,7 +391,7 @@ storeLoop:
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) deleteFromRemote(ctx context.Context, url string, keepIn IDList) error {
|
func (r *Remote) deleteFromRemote(ctx context.Context, url string, keepIn storiface.IDList) error {
|
||||||
if keepIn != nil {
|
if keepIn != nil {
|
||||||
url = url + "?keep=" + keepIn.String()
|
url = url + "?keep=" + keepIn.String()
|
||||||
}
|
}
|
||||||
@ -417,7 +418,7 @@ func (r *Remote) deleteFromRemote(ctx context.Context, url string, keepIn IDList
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) {
|
func (r *Remote) FsStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) {
|
||||||
st, err := r.local.FsStat(ctx, id)
|
st, err := r.local.FsStat(ctx, id)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
@ -762,6 +763,89 @@ func (r *Remote) Reserve(ctx context.Context, sid storage.SectorRef, ft storifac
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *Remote) GenerateSingleVanillaProof(ctx context.Context, minerID abi.ActorID, sinfo storiface.PostSectorChallenge, ppt abi.RegisteredPoStProof) ([]byte, error) {
|
||||||
|
p, err := r.local.GenerateSingleVanillaProof(ctx, minerID, sinfo, ppt)
|
||||||
|
if err != errPathNotFound {
|
||||||
|
return p, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sid := abi.SectorID{
|
||||||
|
Miner: minerID,
|
||||||
|
Number: sinfo.SectorNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
ft := storiface.FTSealed | storiface.FTCache
|
||||||
|
if sinfo.Update {
|
||||||
|
ft = storiface.FTUpdate | storiface.FTUpdateCache
|
||||||
|
}
|
||||||
|
|
||||||
|
si, err := r.index.StorageFindSector(ctx, sid, ft, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("finding sector %d failed: %w", sid, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
requestParams := SingleVanillaParams{
|
||||||
|
Miner: minerID,
|
||||||
|
Sector: sinfo,
|
||||||
|
ProofType: ppt,
|
||||||
|
}
|
||||||
|
jreq, err := json.Marshal(requestParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, info := range si {
|
||||||
|
for _, u := range info.BaseURLs {
|
||||||
|
url := fmt.Sprintf("%s/vanilla/single", u)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", url, strings.NewReader(string(jreq)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.auth != nil {
|
||||||
|
req.Header = r.auth.Clone()
|
||||||
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("do request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
if resp.StatusCode == http.StatusNotFound {
|
||||||
|
log.Debugw("reading vanilla proof from remote not-found response", "url", url, "store", info.ID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("resp.Body ReadAll: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
log.Error("response close: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, xerrors.Errorf("non-200 code from %s: '%s'", url, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
log.Error("response close: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, xerrors.Errorf("resp.Body ReadAll: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, xerrors.Errorf("sector not found")
|
||||||
|
}
|
||||||
|
|
||||||
var _ Store = &Remote{}
|
var _ Store = &Remote{}
|
||||||
|
|
||||||
type funcCloser func() error
|
type funcCloser func() error
|
||||||
|
46
extern/sector-storage/stores/remote_test.go
vendored
46
extern/sector-storage/stores/remote_test.go
vendored
@ -31,7 +31,7 @@ import (
|
|||||||
|
|
||||||
const metaFile = "sectorstore.json"
|
const metaFile = "sectorstore.json"
|
||||||
|
|
||||||
func createTestStorage(t *testing.T, p string, seal bool, att ...*stores.Local) stores.ID {
|
func createTestStorage(t *testing.T, p string, seal bool, att ...*stores.Local) storiface.ID {
|
||||||
if err := os.MkdirAll(p, 0755); err != nil {
|
if err := os.MkdirAll(p, 0755); err != nil {
|
||||||
if !os.IsExist(err) {
|
if !os.IsExist(err) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -39,7 +39,7 @@ func createTestStorage(t *testing.T, p string, seal bool, att ...*stores.Local)
|
|||||||
}
|
}
|
||||||
|
|
||||||
cfg := &stores.LocalStorageMeta{
|
cfg := &stores.LocalStorageMeta{
|
||||||
ID: stores.ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
Weight: 10,
|
Weight: 10,
|
||||||
CanSeal: seal,
|
CanSeal: seal,
|
||||||
CanStore: !seal,
|
CanStore: !seal,
|
||||||
@ -126,14 +126,14 @@ func TestMoveShared(t *testing.T) {
|
|||||||
|
|
||||||
sp, sid, err := rs1.AcquireSector(ctx, s1ref, storiface.FTNone, storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove)
|
sp, sid, err := rs1.AcquireSector(ctx, s1ref, storiface.FTNone, storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, id2, stores.ID(sid.Sealed))
|
require.Equal(t, id2, storiface.ID(sid.Sealed))
|
||||||
|
|
||||||
data := make([]byte, 2032)
|
data := make([]byte, 2032)
|
||||||
data[1] = 54
|
data[1] = 54
|
||||||
require.NoError(t, ioutil.WriteFile(sp.Sealed, data, 0666))
|
require.NoError(t, ioutil.WriteFile(sp.Sealed, data, 0666))
|
||||||
fmt.Println("write to ", sp.Sealed)
|
fmt.Println("write to ", sp.Sealed)
|
||||||
|
|
||||||
require.NoError(t, index.StorageDeclareSector(ctx, stores.ID(sid.Sealed), s1ref.ID, storiface.FTSealed, true))
|
require.NoError(t, index.StorageDeclareSector(ctx, storiface.ID(sid.Sealed), s1ref.ID, storiface.FTSealed, true))
|
||||||
|
|
||||||
// move to the shared path from the second node (remote move / delete)
|
// move to the shared path from the second node (remote move / delete)
|
||||||
|
|
||||||
@ -142,7 +142,7 @@ func TestMoveShared(t *testing.T) {
|
|||||||
// check that the file still exists
|
// check that the file still exists
|
||||||
sp, sid, err = rs2.AcquireSector(ctx, s1ref, storiface.FTSealed, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
sp, sid, err = rs2.AcquireSector(ctx, s1ref, storiface.FTSealed, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, id1, stores.ID(sid.Sealed))
|
require.Equal(t, id1, storiface.ID(sid.Sealed))
|
||||||
fmt.Println("read from ", sp.Sealed)
|
fmt.Println("read from ", sp.Sealed)
|
||||||
|
|
||||||
read, err := ioutil.ReadFile(sp.Sealed)
|
read, err := ioutil.ReadFile(sp.Sealed)
|
||||||
@ -296,12 +296,12 @@ func TestReader(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
||||||
si := stores.SectorStorageInfo{
|
si := storiface.SectorStorageInfo{
|
||||||
URLs: []string{url},
|
URLs: []string{url},
|
||||||
}
|
}
|
||||||
|
|
||||||
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
||||||
false).Return([]stores.SectorStorageInfo{si}, nil).Times(1)
|
false).Return([]storiface.SectorStorageInfo{si}, nil).Times(1)
|
||||||
},
|
},
|
||||||
|
|
||||||
needHttpServer: true,
|
needHttpServer: true,
|
||||||
@ -315,12 +315,12 @@ func TestReader(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
||||||
si := stores.SectorStorageInfo{
|
si := storiface.SectorStorageInfo{
|
||||||
URLs: []string{url},
|
URLs: []string{url},
|
||||||
}
|
}
|
||||||
|
|
||||||
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
||||||
false).Return([]stores.SectorStorageInfo{si}, nil).Times(1)
|
false).Return([]storiface.SectorStorageInfo{si}, nil).Times(1)
|
||||||
},
|
},
|
||||||
|
|
||||||
needHttpServer: true,
|
needHttpServer: true,
|
||||||
@ -333,12 +333,12 @@ func TestReader(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
||||||
si := stores.SectorStorageInfo{
|
si := storiface.SectorStorageInfo{
|
||||||
URLs: []string{url},
|
URLs: []string{url},
|
||||||
}
|
}
|
||||||
|
|
||||||
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
||||||
false).Return([]stores.SectorStorageInfo{si}, nil).Times(1)
|
false).Return([]storiface.SectorStorageInfo{si}, nil).Times(1)
|
||||||
},
|
},
|
||||||
|
|
||||||
needHttpServer: true,
|
needHttpServer: true,
|
||||||
@ -390,12 +390,12 @@ func TestReader(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
||||||
si := stores.SectorStorageInfo{
|
si := storiface.SectorStorageInfo{
|
||||||
URLs: []string{url},
|
URLs: []string{url},
|
||||||
}
|
}
|
||||||
|
|
||||||
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
||||||
false).Return([]stores.SectorStorageInfo{si}, nil).Times(1)
|
false).Return([]storiface.SectorStorageInfo{si}, nil).Times(1)
|
||||||
},
|
},
|
||||||
|
|
||||||
needHttpServer: true,
|
needHttpServer: true,
|
||||||
@ -411,12 +411,12 @@ func TestReader(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
||||||
si := stores.SectorStorageInfo{
|
si := storiface.SectorStorageInfo{
|
||||||
URLs: []string{url},
|
URLs: []string{url},
|
||||||
}
|
}
|
||||||
|
|
||||||
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
||||||
false).Return([]stores.SectorStorageInfo{si}, nil).Times(1)
|
false).Return([]storiface.SectorStorageInfo{si}, nil).Times(1)
|
||||||
},
|
},
|
||||||
|
|
||||||
needHttpServer: true,
|
needHttpServer: true,
|
||||||
@ -637,12 +637,12 @@ func TestCheckIsUnsealed(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
||||||
si := stores.SectorStorageInfo{
|
si := storiface.SectorStorageInfo{
|
||||||
URLs: []string{url},
|
URLs: []string{url},
|
||||||
}
|
}
|
||||||
|
|
||||||
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
||||||
false).Return([]stores.SectorStorageInfo{si}, nil).Times(1)
|
false).Return([]storiface.SectorStorageInfo{si}, nil).Times(1)
|
||||||
},
|
},
|
||||||
|
|
||||||
needHttpServer: true,
|
needHttpServer: true,
|
||||||
@ -655,12 +655,12 @@ func TestCheckIsUnsealed(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
||||||
si := stores.SectorStorageInfo{
|
si := storiface.SectorStorageInfo{
|
||||||
URLs: []string{url},
|
URLs: []string{url},
|
||||||
}
|
}
|
||||||
|
|
||||||
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
||||||
false).Return([]stores.SectorStorageInfo{si}, nil).Times(1)
|
false).Return([]storiface.SectorStorageInfo{si}, nil).Times(1)
|
||||||
},
|
},
|
||||||
|
|
||||||
needHttpServer: true,
|
needHttpServer: true,
|
||||||
@ -691,12 +691,12 @@ func TestCheckIsUnsealed(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
||||||
si := stores.SectorStorageInfo{
|
si := storiface.SectorStorageInfo{
|
||||||
URLs: []string{url},
|
URLs: []string{url},
|
||||||
}
|
}
|
||||||
|
|
||||||
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
||||||
false).Return([]stores.SectorStorageInfo{si}, nil).Times(1)
|
false).Return([]storiface.SectorStorageInfo{si}, nil).Times(1)
|
||||||
},
|
},
|
||||||
|
|
||||||
needHttpServer: true,
|
needHttpServer: true,
|
||||||
@ -718,12 +718,12 @@ func TestCheckIsUnsealed(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
indexFnc: func(in *mocks.MockSectorIndex, url string) {
|
||||||
si := stores.SectorStorageInfo{
|
si := storiface.SectorStorageInfo{
|
||||||
URLs: []string{url},
|
URLs: []string{url},
|
||||||
}
|
}
|
||||||
|
|
||||||
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(),
|
||||||
false).Return([]stores.SectorStorageInfo{si}, nil).Times(1)
|
false).Return([]storiface.SectorStorageInfo{si}, nil).Times(1)
|
||||||
},
|
},
|
||||||
|
|
||||||
needHttpServer: true,
|
needHttpServer: true,
|
||||||
|
2
extern/sector-storage/storiface/ffi.go
vendored
2
extern/sector-storage/storiface/ffi.go
vendored
@ -28,4 +28,4 @@ func (i UnpaddedByteIndex) Valid() error {
|
|||||||
|
|
||||||
type PaddedByteIndex uint64
|
type PaddedByteIndex uint64
|
||||||
|
|
||||||
type RGetter func(ctx context.Context, id abi.SectorID) (cid.Cid, error)
|
type RGetter func(ctx context.Context, id abi.SectorID) (sealed cid.Cid, update bool, err error)
|
||||||
|
80
extern/sector-storage/storiface/index.go
vendored
Normal file
80
extern/sector-storage/storiface/index.go
vendored
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
package storiface
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID identifies sector storage by UUID. One sector storage should map to one
|
||||||
|
// filesystem, local or networked / shared by multiple machines
|
||||||
|
type ID string
|
||||||
|
|
||||||
|
const IDSep = "."
|
||||||
|
|
||||||
|
type IDList []ID
|
||||||
|
|
||||||
|
func (il IDList) String() string {
|
||||||
|
l := make([]string, len(il))
|
||||||
|
for i, id := range il {
|
||||||
|
l[i] = string(id)
|
||||||
|
}
|
||||||
|
return strings.Join(l, IDSep)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseIDList(s string) IDList {
|
||||||
|
strs := strings.Split(s, IDSep)
|
||||||
|
out := make([]ID, len(strs))
|
||||||
|
for i, str := range strs {
|
||||||
|
out[i] = ID(str)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
type Group = string
|
||||||
|
|
||||||
|
type StorageInfo struct {
|
||||||
|
ID ID
|
||||||
|
URLs []string // TODO: Support non-http transports
|
||||||
|
Weight uint64
|
||||||
|
MaxStorage uint64
|
||||||
|
|
||||||
|
CanSeal bool
|
||||||
|
CanStore bool
|
||||||
|
|
||||||
|
Groups []Group
|
||||||
|
AllowTo []Group
|
||||||
|
}
|
||||||
|
|
||||||
|
type HealthReport struct {
|
||||||
|
Stat fsutil.FsStat
|
||||||
|
Err string
|
||||||
|
}
|
||||||
|
|
||||||
|
type SectorStorageInfo struct {
|
||||||
|
ID ID
|
||||||
|
URLs []string // TODO: Support non-http transports
|
||||||
|
BaseURLs []string
|
||||||
|
Weight uint64
|
||||||
|
|
||||||
|
CanSeal bool
|
||||||
|
CanStore bool
|
||||||
|
|
||||||
|
Primary bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type Decl struct {
|
||||||
|
abi.SectorID
|
||||||
|
SectorFileType
|
||||||
|
}
|
||||||
|
|
||||||
|
type StoragePath struct {
|
||||||
|
ID ID
|
||||||
|
Weight uint64
|
||||||
|
|
||||||
|
LocalPath string
|
||||||
|
|
||||||
|
CanSeal bool
|
||||||
|
CanStore bool
|
||||||
|
}
|
98
extern/sector-storage/storiface/resources.go
vendored
98
extern/sector-storage/storiface/resources.go
vendored
@ -463,6 +463,104 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
|
|||||||
MaxParallelism: 1,
|
MaxParallelism: 1,
|
||||||
GPUUtilization: 1.0,
|
GPUUtilization: 1.0,
|
||||||
|
|
||||||
|
BaseMinMemory: 8 << 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
sealtasks.TTGenerateWindowPoSt: {
|
||||||
|
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
|
||||||
|
MaxMemory: 120 << 30, // TODO: Confirm
|
||||||
|
MinMemory: 60 << 30,
|
||||||
|
|
||||||
|
MaxParallelism: -1,
|
||||||
|
MaxParallelismGPU: 6,
|
||||||
|
GPUUtilization: 1.0,
|
||||||
|
|
||||||
|
BaseMinMemory: 64 << 30, // params
|
||||||
|
},
|
||||||
|
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{
|
||||||
|
MaxMemory: 96 << 30,
|
||||||
|
MinMemory: 30 << 30,
|
||||||
|
|
||||||
|
MaxParallelism: -1,
|
||||||
|
MaxParallelismGPU: 6,
|
||||||
|
GPUUtilization: 1.0,
|
||||||
|
|
||||||
|
BaseMinMemory: 32 << 30, // params
|
||||||
|
},
|
||||||
|
abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{
|
||||||
|
MaxMemory: 3 << 29, // 1.5G
|
||||||
|
MinMemory: 1 << 30,
|
||||||
|
|
||||||
|
MaxParallelism: 1, // This is fine
|
||||||
|
GPUUtilization: 1.0,
|
||||||
|
|
||||||
|
BaseMinMemory: 10 << 30,
|
||||||
|
},
|
||||||
|
abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{
|
||||||
|
MaxMemory: 2 << 10,
|
||||||
|
MinMemory: 2 << 10,
|
||||||
|
|
||||||
|
MaxParallelism: 1,
|
||||||
|
GPUUtilization: 1.0,
|
||||||
|
|
||||||
|
BaseMinMemory: 2 << 10,
|
||||||
|
},
|
||||||
|
abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{
|
||||||
|
MaxMemory: 8 << 20,
|
||||||
|
MinMemory: 8 << 20,
|
||||||
|
|
||||||
|
MaxParallelism: 1,
|
||||||
|
GPUUtilization: 1.0,
|
||||||
|
|
||||||
|
BaseMinMemory: 8 << 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
sealtasks.TTGenerateWinningPoSt: {
|
||||||
|
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
|
||||||
|
MaxMemory: 1 << 30,
|
||||||
|
MinMemory: 1 << 30,
|
||||||
|
|
||||||
|
MaxParallelism: -1,
|
||||||
|
MaxParallelismGPU: 6,
|
||||||
|
GPUUtilization: 1.0,
|
||||||
|
|
||||||
|
BaseMinMemory: 64 << 30, // params
|
||||||
|
},
|
||||||
|
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{
|
||||||
|
MaxMemory: 1 << 30,
|
||||||
|
MinMemory: 1 << 30,
|
||||||
|
|
||||||
|
MaxParallelism: -1,
|
||||||
|
MaxParallelismGPU: 6,
|
||||||
|
GPUUtilization: 1.0,
|
||||||
|
|
||||||
|
BaseMinMemory: 32 << 30, // params
|
||||||
|
},
|
||||||
|
abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{
|
||||||
|
MaxMemory: 2 << 10,
|
||||||
|
MinMemory: 2 << 10,
|
||||||
|
|
||||||
|
MaxParallelism: 1, // This is fine
|
||||||
|
GPUUtilization: 1.0,
|
||||||
|
|
||||||
|
BaseMinMemory: 10 << 30,
|
||||||
|
},
|
||||||
|
abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{
|
||||||
|
MaxMemory: 2 << 10,
|
||||||
|
MinMemory: 2 << 10,
|
||||||
|
|
||||||
|
MaxParallelism: 1,
|
||||||
|
GPUUtilization: 1.0,
|
||||||
|
|
||||||
|
BaseMinMemory: 2 << 10,
|
||||||
|
},
|
||||||
|
abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{
|
||||||
|
MaxMemory: 8 << 20,
|
||||||
|
MinMemory: 8 << 20,
|
||||||
|
|
||||||
|
MaxParallelism: 1,
|
||||||
|
GPUUtilization: 1.0,
|
||||||
|
|
||||||
BaseMinMemory: 8 << 20,
|
BaseMinMemory: 8 << 20,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
25
extern/sector-storage/storiface/worker.go
vendored
25
extern/sector-storage/storiface/worker.go
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
@ -67,6 +68,7 @@ func (wr WorkerResources) ResourceSpec(spt abi.RegisteredSealProof, tt sealtasks
|
|||||||
|
|
||||||
type WorkerStats struct {
|
type WorkerStats struct {
|
||||||
Info WorkerInfo
|
Info WorkerInfo
|
||||||
|
Tasks []sealtasks.TaskType
|
||||||
Enabled bool
|
Enabled bool
|
||||||
|
|
||||||
MemUsedMin uint64
|
MemUsedMin uint64
|
||||||
@ -114,6 +116,7 @@ var _ fmt.Stringer = &CallID{}
|
|||||||
var UndefCall CallID
|
var UndefCall CallID
|
||||||
|
|
||||||
type WorkerCalls interface {
|
type WorkerCalls interface {
|
||||||
|
// async
|
||||||
AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error)
|
AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error)
|
||||||
SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error)
|
SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error)
|
||||||
SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (CallID, error)
|
SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (CallID, error)
|
||||||
@ -129,6 +132,28 @@ type WorkerCalls interface {
|
|||||||
MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (CallID, error)
|
MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (CallID, error)
|
||||||
UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error)
|
UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error)
|
||||||
Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (CallID, error)
|
Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (CallID, error)
|
||||||
|
|
||||||
|
// sync
|
||||||
|
GenerateWinningPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []PostSectorChallenge, randomness abi.PoStRandomness) ([]proof.PoStProof, error)
|
||||||
|
GenerateWindowPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []PostSectorChallenge, partitionIdx int, randomness abi.PoStRandomness) (WindowPoStResult, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type WindowPoStResult struct {
|
||||||
|
PoStProofs proof.PoStProof
|
||||||
|
Skipped []abi.SectorID
|
||||||
|
}
|
||||||
|
|
||||||
|
type PostSectorChallenge struct {
|
||||||
|
SealProof abi.RegisteredSealProof
|
||||||
|
SectorNumber abi.SectorNumber
|
||||||
|
SealedCID cid.Cid
|
||||||
|
Challenge []uint64
|
||||||
|
Update bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type FallbackChallenges struct {
|
||||||
|
Sectors []abi.SectorNumber
|
||||||
|
Challenges map[abi.SectorNumber][]uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
type ErrorCode int
|
type ErrorCode int
|
||||||
|
8
extern/sector-storage/teststorage_test.go
vendored
8
extern/sector-storage/teststorage_test.go
vendored
@ -31,6 +31,14 @@ func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID,
|
|||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *testExec) GenerateWinningPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte) ([]proof.PoStProof, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) GenerateWindowPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte, partitionIdx int) (proof.PoStProof, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
func (t *testExec) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
func (t *testExec) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
2
extern/sector-storage/testworker_test.go
vendored
2
extern/sector-storage/testworker_test.go
vendored
@ -125,7 +125,7 @@ func (t *testWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]stru
|
|||||||
return t.acceptTasks, nil
|
return t.acceptTasks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
|
func (t *testWorker) Paths(ctx context.Context) ([]storiface.StoragePath, error) {
|
||||||
return t.lstor.Local(ctx)
|
return t.lstor.Local(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
147
extern/sector-storage/worker_local.go
vendored
147
extern/sector-storage/worker_local.go
vendored
@ -20,6 +20,7 @@ import (
|
|||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-statestore"
|
"github.com/filecoin-project/go-statestore"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
@ -38,6 +39,9 @@ type WorkerConfig struct {
|
|||||||
// worker regardless of its currently available resources. Used in testing
|
// worker regardless of its currently available resources. Used in testing
|
||||||
// with the local worker.
|
// with the local worker.
|
||||||
IgnoreResourceFiltering bool
|
IgnoreResourceFiltering bool
|
||||||
|
|
||||||
|
MaxParallelChallengeReads int // 0 = no limit
|
||||||
|
ChallengeReadTimeout time.Duration // 0 = no timeout
|
||||||
}
|
}
|
||||||
|
|
||||||
// used do provide custom proofs impl (mostly used in testing)
|
// used do provide custom proofs impl (mostly used in testing)
|
||||||
@ -61,6 +65,9 @@ type LocalWorker struct {
|
|||||||
running sync.WaitGroup
|
running sync.WaitGroup
|
||||||
taskLk sync.Mutex
|
taskLk sync.Mutex
|
||||||
|
|
||||||
|
challengeThrottle chan struct{}
|
||||||
|
challengeReadTimeout time.Duration
|
||||||
|
|
||||||
session uuid.UUID
|
session uuid.UUID
|
||||||
testDisable int64
|
testDisable int64
|
||||||
closing chan struct{}
|
closing chan struct{}
|
||||||
@ -81,13 +88,18 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, envLookup EnvFunc,
|
|||||||
ct: &workerCallTracker{
|
ct: &workerCallTracker{
|
||||||
st: cst,
|
st: cst,
|
||||||
},
|
},
|
||||||
acceptTasks: acceptTasks,
|
acceptTasks: acceptTasks,
|
||||||
executor: executor,
|
executor: executor,
|
||||||
noSwap: wcfg.NoSwap,
|
noSwap: wcfg.NoSwap,
|
||||||
envLookup: envLookup,
|
envLookup: envLookup,
|
||||||
ignoreResources: wcfg.IgnoreResourceFiltering,
|
ignoreResources: wcfg.IgnoreResourceFiltering,
|
||||||
session: uuid.New(),
|
challengeReadTimeout: wcfg.ChallengeReadTimeout,
|
||||||
closing: make(chan struct{}),
|
session: uuid.New(),
|
||||||
|
closing: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
if wcfg.MaxParallelChallengeReads > 0 {
|
||||||
|
w.challengeThrottle = make(chan struct{}, wcfg.MaxParallelChallengeReads)
|
||||||
}
|
}
|
||||||
|
|
||||||
if w.executor == nil {
|
if w.executor == nil {
|
||||||
@ -154,7 +166,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector stor
|
|||||||
}
|
}
|
||||||
|
|
||||||
sid := storiface.PathByType(storageIDs, fileType)
|
sid := storiface.PathByType(storageIDs, fileType)
|
||||||
if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector.ID, fileType, l.op == storiface.AcquireMove); err != nil {
|
if err := l.w.sindex.StorageDeclareSector(ctx, storiface.ID(sid), sector.ID, fileType, l.op == storiface.AcquireMove); err != nil {
|
||||||
log.Errorf("declare sector error: %+v", err)
|
log.Errorf("declare sector error: %+v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -559,6 +571,123 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector storage.SectorRef,
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) GenerateWinningPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||||
|
sb, err := l.executor()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// don't throttle winningPoSt
|
||||||
|
// * Always want it done asap
|
||||||
|
// * It's usually just one sector
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(sectors))
|
||||||
|
|
||||||
|
vproofs := make([][]byte, len(sectors))
|
||||||
|
var rerr error
|
||||||
|
|
||||||
|
for i, s := range sectors {
|
||||||
|
go func(i int, s storiface.PostSectorChallenge) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
if l.challengeReadTimeout > 0 {
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
ctx, cancel = context.WithTimeout(ctx, l.challengeReadTimeout)
|
||||||
|
defer cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
vanilla, err := l.storage.GenerateSingleVanillaProof(ctx, mid, s, ppt)
|
||||||
|
if err != nil {
|
||||||
|
rerr = multierror.Append(rerr, xerrors.Errorf("get winning sector:%d,vanila failed: %w", s.SectorNumber, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if vanilla == nil {
|
||||||
|
rerr = multierror.Append(rerr, xerrors.Errorf("get winning sector:%d,vanila is nil", s.SectorNumber))
|
||||||
|
}
|
||||||
|
vproofs[i] = vanilla
|
||||||
|
}(i, s)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if rerr != nil {
|
||||||
|
return nil, rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb.GenerateWinningPoStWithVanilla(ctx, ppt, mid, randomness, vproofs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) GenerateWindowPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, partitionIdx int, randomness abi.PoStRandomness) (storiface.WindowPoStResult, error) {
|
||||||
|
sb, err := l.executor()
|
||||||
|
if err != nil {
|
||||||
|
return storiface.WindowPoStResult{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var slk sync.Mutex
|
||||||
|
var skipped []abi.SectorID
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(sectors))
|
||||||
|
|
||||||
|
vproofs := make([][]byte, len(sectors))
|
||||||
|
|
||||||
|
for i, s := range sectors {
|
||||||
|
if l.challengeThrottle != nil {
|
||||||
|
select {
|
||||||
|
case l.challengeThrottle <- struct{}{}:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return storiface.WindowPoStResult{}, xerrors.Errorf("context error waiting on challengeThrottle %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
go func(i int, s storiface.PostSectorChallenge) {
|
||||||
|
defer wg.Done()
|
||||||
|
defer func() {
|
||||||
|
if l.challengeThrottle != nil {
|
||||||
|
<-l.challengeThrottle
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if l.challengeReadTimeout > 0 {
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
ctx, cancel = context.WithTimeout(ctx, l.challengeReadTimeout)
|
||||||
|
defer cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
vanilla, err := l.storage.GenerateSingleVanillaProof(ctx, mid, s, ppt)
|
||||||
|
slk.Lock()
|
||||||
|
defer slk.Unlock()
|
||||||
|
|
||||||
|
if err != nil || vanilla == nil {
|
||||||
|
skipped = append(skipped, abi.SectorID{
|
||||||
|
Miner: mid,
|
||||||
|
Number: s.SectorNumber,
|
||||||
|
})
|
||||||
|
log.Errorf("reading PoSt challenge for sector %d, vlen:%d, err: %s", s.SectorNumber, len(vanilla), err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
vproofs[i] = vanilla
|
||||||
|
}(i, s)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if len(skipped) > 0 {
|
||||||
|
// This should happen rarely because before entering GenerateWindowPoSt we check all sectors by reading challenges.
|
||||||
|
// When it does happen, window post runner logic will just re-check sectors, and retry with newly-discovered-bad sectors skipped
|
||||||
|
log.Errorf("couldn't read some challenges (skipped %d)", len(skipped))
|
||||||
|
|
||||||
|
// note: can't return an error as this in an jsonrpc call
|
||||||
|
return storiface.WindowPoStResult{Skipped: skipped}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := sb.GenerateWindowPoStWithVanilla(ctx, ppt, mid, randomness, vproofs, partitionIdx)
|
||||||
|
|
||||||
|
return storiface.WindowPoStResult{
|
||||||
|
PoStProofs: res,
|
||||||
|
Skipped: skipped,
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
||||||
l.taskLk.Lock()
|
l.taskLk.Lock()
|
||||||
defer l.taskLk.Unlock()
|
defer l.taskLk.Unlock()
|
||||||
@ -582,7 +711,7 @@ func (l *LocalWorker) TaskEnable(ctx context.Context, tt sealtasks.TaskType) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
|
func (l *LocalWorker) Paths(ctx context.Context) ([]storiface.StoragePath, error) {
|
||||||
return l.localStore.Local(ctx)
|
return l.localStore.Local(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
65
extern/sector-storage/worker_local_test.go
vendored
Normal file
65
extern/sector-storage/worker_local_test.go
vendored
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-datastore"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-statestore"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type hangStore struct {
|
||||||
|
stores.Store
|
||||||
|
|
||||||
|
challengeReads chan struct{}
|
||||||
|
unhang chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *hangStore) GenerateSingleVanillaProof(ctx context.Context, minerID abi.ActorID, si storiface.PostSectorChallenge, ppt abi.RegisteredPoStProof) ([]byte, error) {
|
||||||
|
select {
|
||||||
|
case s.challengeReads <- struct{}{}:
|
||||||
|
default:
|
||||||
|
panic("this shouldn't happen")
|
||||||
|
}
|
||||||
|
<-s.unhang
|
||||||
|
<-s.challengeReads
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWorkerChallengeThrottle(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
hs := &hangStore{
|
||||||
|
challengeReads: make(chan struct{}, 8),
|
||||||
|
unhang: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
wcfg := WorkerConfig{
|
||||||
|
MaxParallelChallengeReads: 8,
|
||||||
|
}
|
||||||
|
|
||||||
|
lw := NewLocalWorker(wcfg, hs, nil, nil, nil, statestore.New(datastore.NewMapDatastore()))
|
||||||
|
|
||||||
|
var ch []storiface.PostSectorChallenge
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
ch = append(ch, storiface.PostSectorChallenge{
|
||||||
|
SealProof: 0,
|
||||||
|
SectorNumber: abi.SectorNumber(i),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
close(hs.unhang)
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err := lw.GenerateWindowPoSt(ctx, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 0, ch, 0, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
2
go.mod
2
go.mod
@ -55,7 +55,7 @@ require (
|
|||||||
github.com/filecoin-project/specs-actors/v5 v5.0.4
|
github.com/filecoin-project/specs-actors/v5 v5.0.4
|
||||||
github.com/filecoin-project/specs-actors/v6 v6.0.1
|
github.com/filecoin-project/specs-actors/v6 v6.0.1
|
||||||
github.com/filecoin-project/specs-actors/v7 v7.0.0
|
github.com/filecoin-project/specs-actors/v7 v7.0.0
|
||||||
github.com/filecoin-project/specs-storage v0.2.0
|
github.com/filecoin-project/specs-storage v0.2.1-0.20220310131636-3fe98b33e7ea
|
||||||
github.com/filecoin-project/test-vectors/schema v0.0.5
|
github.com/filecoin-project/test-vectors/schema v0.0.5
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
||||||
github.com/gdamore/tcell/v2 v2.2.0
|
github.com/gdamore/tcell/v2 v2.2.0
|
||||||
|
4
go.sum
4
go.sum
@ -393,8 +393,8 @@ github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/g
|
|||||||
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1.0.20220118005651-2470cb39827e/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M=
|
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1.0.20220118005651-2470cb39827e/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M=
|
||||||
github.com/filecoin-project/specs-actors/v7 v7.0.0 h1:FQN7tjt3o68hfb3qLFSJBoLMuOFY0REkFVLO/zXj8RU=
|
github.com/filecoin-project/specs-actors/v7 v7.0.0 h1:FQN7tjt3o68hfb3qLFSJBoLMuOFY0REkFVLO/zXj8RU=
|
||||||
github.com/filecoin-project/specs-actors/v7 v7.0.0/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M=
|
github.com/filecoin-project/specs-actors/v7 v7.0.0/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M=
|
||||||
github.com/filecoin-project/specs-storage v0.2.0 h1:Y4UDv0apRQ3zI2GiPPubi8JblpUZZphEdaJUxCutfyg=
|
github.com/filecoin-project/specs-storage v0.2.1-0.20220310131636-3fe98b33e7ea h1:fkZ9mMPRH5a+KeZTtqHRdENeiM98e5PKPXP7pWiZtUI=
|
||||||
github.com/filecoin-project/specs-storage v0.2.0/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU=
|
github.com/filecoin-project/specs-storage v0.2.1-0.20220310131636-3fe98b33e7ea/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU=
|
||||||
github.com/filecoin-project/storetheindex v0.3.5 h1:KoS9TvjPm6zIZfUH8atAHJbVHOO7GTP1MdTG+v0eE+Q=
|
github.com/filecoin-project/storetheindex v0.3.5 h1:KoS9TvjPm6zIZfUH8atAHJbVHOO7GTP1MdTG+v0eE+Q=
|
||||||
github.com/filecoin-project/storetheindex v0.3.5/go.mod h1:0r3d0kSpK63O6AvLr1CjAINLi+nWD49clzcnKV+GLpI=
|
github.com/filecoin-project/storetheindex v0.3.5/go.mod h1:0r3d0kSpK63O6AvLr1CjAINLi+nWD49clzcnKV+GLpI=
|
||||||
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
||||||
|
@ -7,16 +7,27 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-datastore"
|
||||||
|
"github.com/ipfs/go-datastore/namespace"
|
||||||
|
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
"github.com/filecoin-project/go-statestore"
|
||||||
"github.com/filecoin-project/go-storedcounter"
|
"github.com/filecoin-project/go-storedcounter"
|
||||||
|
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||||
|
power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/v1api"
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
@ -32,9 +43,11 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-worker/sealworker"
|
||||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
"github.com/filecoin-project/lotus/genesis"
|
"github.com/filecoin-project/lotus/genesis"
|
||||||
"github.com/filecoin-project/lotus/markets/idxprov"
|
"github.com/filecoin-project/lotus/markets/idxprov"
|
||||||
idxprov_test "github.com/filecoin-project/lotus/markets/idxprov/idxprov_test"
|
idxprov_test "github.com/filecoin-project/lotus/markets/idxprov/idxprov_test"
|
||||||
@ -46,13 +59,6 @@ import (
|
|||||||
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
|
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
"github.com/filecoin-project/lotus/storage/mockstorage"
|
"github.com/filecoin-project/lotus/storage/mockstorage"
|
||||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
|
||||||
power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power"
|
|
||||||
"github.com/ipfs/go-datastore"
|
|
||||||
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
|
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
|
||||||
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -111,10 +117,12 @@ type Ensemble struct {
|
|||||||
inactive struct {
|
inactive struct {
|
||||||
fullnodes []*TestFullNode
|
fullnodes []*TestFullNode
|
||||||
miners []*TestMiner
|
miners []*TestMiner
|
||||||
|
workers []*TestWorker
|
||||||
}
|
}
|
||||||
active struct {
|
active struct {
|
||||||
fullnodes []*TestFullNode
|
fullnodes []*TestFullNode
|
||||||
miners []*TestMiner
|
miners []*TestMiner
|
||||||
|
workers []*TestWorker
|
||||||
bms map[*TestMiner]*BlockMiner
|
bms map[*TestMiner]*BlockMiner
|
||||||
}
|
}
|
||||||
genesis struct {
|
genesis struct {
|
||||||
@ -275,6 +283,32 @@ func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeO
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Worker enrolls a new worker, using the provided full node for chain
|
||||||
|
// interactions.
|
||||||
|
func (n *Ensemble) Worker(minerNode *TestMiner, worker *TestWorker, opts ...NodeOpt) *Ensemble {
|
||||||
|
require.NotNil(n.t, minerNode, "miner node required when instantiating worker")
|
||||||
|
|
||||||
|
options := DefaultNodeOpts
|
||||||
|
for _, o := range opts {
|
||||||
|
err := o(&options)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rl, err := net.Listen("tcp", "127.0.0.1:")
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
*worker = TestWorker{
|
||||||
|
t: n.t,
|
||||||
|
MinerNode: minerNode,
|
||||||
|
RemoteListener: rl,
|
||||||
|
options: options,
|
||||||
|
}
|
||||||
|
|
||||||
|
n.inactive.workers = append(n.inactive.workers, worker)
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
// Start starts all enrolled nodes.
|
// Start starts all enrolled nodes.
|
||||||
func (n *Ensemble) Start() *Ensemble {
|
func (n *Ensemble) Start() *Ensemble {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
@ -444,6 +478,7 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
// require.NoError(n.t, err)
|
// require.NoError(n.t, err)
|
||||||
|
|
||||||
r := repo.NewMemory(nil)
|
r := repo.NewMemory(nil)
|
||||||
|
n.t.Cleanup(r.Cleanup)
|
||||||
|
|
||||||
lr, err := r.Lock(repo.StorageMiner)
|
lr, err := r.Lock(repo.StorageMiner)
|
||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
@ -505,6 +540,16 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
_, err = nic.Next()
|
_, err = nic.Next()
|
||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
// using real proofs, therefore need real sectors.
|
||||||
|
if !n.bootstrapped && !n.options.mockProofs {
|
||||||
|
psd := m.PresealDir
|
||||||
|
err := lr.SetStorage(func(sc *stores.StorageConfig) {
|
||||||
|
sc.StoragePaths = append(sc.StoragePaths, stores.LocalPath{Path: psd})
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
}
|
||||||
|
|
||||||
err = lr.Close()
|
err = lr.Close()
|
||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
@ -524,6 +569,8 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
require.NoError(n.t, err2)
|
require.NoError(n.t, err2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
noLocal := m.options.minerNoLocalSealing
|
||||||
|
|
||||||
var mineBlock = make(chan lotusminer.MineReq)
|
var mineBlock = make(chan lotusminer.MineReq)
|
||||||
opts := []node.Option{
|
opts := []node.Option{
|
||||||
node.StorageMiner(&m.StorageMiner, cfg.Subsystems),
|
node.StorageMiner(&m.StorageMiner, cfg.Subsystems),
|
||||||
@ -540,6 +587,14 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
// regardless of system pressure.
|
// regardless of system pressure.
|
||||||
node.Override(new(sectorstorage.SealerConfig), func() sectorstorage.SealerConfig {
|
node.Override(new(sectorstorage.SealerConfig), func() sectorstorage.SealerConfig {
|
||||||
scfg := config.DefaultStorageMiner()
|
scfg := config.DefaultStorageMiner()
|
||||||
|
|
||||||
|
if noLocal {
|
||||||
|
scfg.Storage.AllowAddPiece = false
|
||||||
|
scfg.Storage.AllowPreCommit1 = false
|
||||||
|
scfg.Storage.AllowPreCommit2 = false
|
||||||
|
scfg.Storage.AllowCommit = false
|
||||||
|
}
|
||||||
|
|
||||||
scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled
|
scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled
|
||||||
return scfg.Storage
|
return scfg.Storage
|
||||||
}),
|
}),
|
||||||
@ -591,14 +646,10 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
stop, err := node.New(ctx, opts...)
|
stop, err := node.New(ctx, opts...)
|
||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
// using real proofs, therefore need real sectors.
|
|
||||||
if !n.bootstrapped && !n.options.mockProofs {
|
|
||||||
err := m.StorageAddLocal(ctx, m.PresealDir)
|
|
||||||
require.NoError(n.t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
n.t.Cleanup(func() { _ = stop(context.Background()) })
|
n.t.Cleanup(func() { _ = stop(context.Background()) })
|
||||||
|
|
||||||
|
m.BaseAPI = m.StorageMiner
|
||||||
|
|
||||||
// Are we hitting this node through its RPC?
|
// Are we hitting this node through its RPC?
|
||||||
if m.options.rpc {
|
if m.options.rpc {
|
||||||
withRPC := minerRpc(n.t, m)
|
withRPC := minerRpc(n.t, m)
|
||||||
@ -624,6 +675,65 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
// to active, so clear the slice.
|
// to active, so clear the slice.
|
||||||
n.inactive.miners = n.inactive.miners[:0]
|
n.inactive.miners = n.inactive.miners[:0]
|
||||||
|
|
||||||
|
// ---------------------
|
||||||
|
// WORKERS
|
||||||
|
// ---------------------
|
||||||
|
|
||||||
|
// Create all inactive workers.
|
||||||
|
for i, m := range n.inactive.workers {
|
||||||
|
r := repo.NewMemory(nil)
|
||||||
|
|
||||||
|
lr, err := r.Lock(repo.Worker)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
ds, err := lr.Datastore(context.Background(), "/metadata")
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
addr := m.RemoteListener.Addr().String()
|
||||||
|
|
||||||
|
localStore, err := stores.NewLocal(ctx, lr, m.MinerNode, []string{"http://" + addr + "/remote"})
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
auth := http.Header(nil)
|
||||||
|
|
||||||
|
remote := stores.NewRemote(localStore, m.MinerNode, auth, 20, &stores.DefaultPartialFileHandler{})
|
||||||
|
store := m.options.workerStorageOpt(remote)
|
||||||
|
|
||||||
|
fh := &stores.FetchHandler{Local: localStore, PfHandler: &stores.DefaultPartialFileHandler{}}
|
||||||
|
m.FetchHandler = fh.ServeHTTP
|
||||||
|
|
||||||
|
wsts := statestore.New(namespace.Wrap(ds, modules.WorkerCallsPrefix))
|
||||||
|
|
||||||
|
workerApi := &sealworker.Worker{
|
||||||
|
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
||||||
|
TaskTypes: m.options.workerTasks,
|
||||||
|
NoSwap: false,
|
||||||
|
}, store, localStore, m.MinerNode, m.MinerNode, wsts),
|
||||||
|
LocalStore: localStore,
|
||||||
|
Storage: lr,
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Worker = workerApi
|
||||||
|
|
||||||
|
require.True(n.t, m.options.rpc)
|
||||||
|
|
||||||
|
withRPC := workerRpc(n.t, m)
|
||||||
|
n.inactive.workers[i] = withRPC
|
||||||
|
|
||||||
|
err = m.MinerNode.WorkerConnect(ctx, "http://"+addr+"/rpc/v0")
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
n.active.workers = append(n.active.workers, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are here, we have processed all inactive workers and moved them
|
||||||
|
// to active, so clear the slice.
|
||||||
|
n.inactive.workers = n.inactive.workers[:0]
|
||||||
|
|
||||||
|
// ---------------------
|
||||||
|
// MISC
|
||||||
|
// ---------------------
|
||||||
|
|
||||||
// Link all the nodes.
|
// Link all the nodes.
|
||||||
err = n.mn.LinkAll()
|
err = n.mn.LinkAll()
|
||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
|
@ -23,6 +23,20 @@ func EnsembleMinimal(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMin
|
|||||||
return &full, &miner, ens
|
return &full, &miner, ens
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func EnsembleWorker(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestWorker, *Ensemble) {
|
||||||
|
opts = append(opts, WithAllSubsystems())
|
||||||
|
|
||||||
|
eopts, nopts := siftOptions(t, opts)
|
||||||
|
|
||||||
|
var (
|
||||||
|
full TestFullNode
|
||||||
|
miner TestMiner
|
||||||
|
worker TestWorker
|
||||||
|
)
|
||||||
|
ens := NewEnsemble(t, eopts...).FullNode(&full, nopts...).Miner(&miner, &full, nopts...).Worker(&miner, &worker, nopts...).Start()
|
||||||
|
return &full, &miner, &worker, ens
|
||||||
|
}
|
||||||
|
|
||||||
func EnsembleWithMinerAndMarketNodes(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) {
|
func EnsembleWithMinerAndMarketNodes(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) {
|
||||||
eopts, nopts := siftOptions(t, opts)
|
eopts, nopts := siftOptions(t, opts)
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/miner"
|
"github.com/filecoin-project/lotus/miner"
|
||||||
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
|
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
|
||||||
@ -61,6 +62,8 @@ func (ms MinerSubsystem) All() [MinerSubsystems]bool {
|
|||||||
type TestMiner struct {
|
type TestMiner struct {
|
||||||
api.StorageMiner
|
api.StorageMiner
|
||||||
|
|
||||||
|
BaseAPI api.StorageMiner
|
||||||
|
|
||||||
t *testing.T
|
t *testing.T
|
||||||
|
|
||||||
// ListenAddr is the address on which an API server is listening, if an
|
// ListenAddr is the address on which an API server is listening, if an
|
||||||
@ -179,7 +182,7 @@ func (tm *TestMiner) AddStorage(ctx context.Context, t *testing.T, weight uint64
|
|||||||
}
|
}
|
||||||
|
|
||||||
cfg := &stores.LocalStorageMeta{
|
cfg := &stores.LocalStorageMeta{
|
||||||
ID: stores.ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
Weight: weight,
|
Weight: weight,
|
||||||
CanSeal: seal,
|
CanSeal: seal,
|
||||||
CanStore: store,
|
CanStore: store,
|
||||||
|
@ -3,6 +3,8 @@ package kit
|
|||||||
import (
|
import (
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
"github.com/filecoin-project/lotus/node/config"
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
"github.com/filecoin-project/lotus/node/modules"
|
"github.com/filecoin-project/lotus/node/modules"
|
||||||
@ -38,6 +40,10 @@ type nodeOpts struct {
|
|||||||
optBuilders []OptBuilder
|
optBuilders []OptBuilder
|
||||||
sectorSize abi.SectorSize
|
sectorSize abi.SectorSize
|
||||||
maxStagingDealsBytes int64
|
maxStagingDealsBytes int64
|
||||||
|
minerNoLocalSealing bool // use worker
|
||||||
|
|
||||||
|
workerTasks []sealtasks.TaskType
|
||||||
|
workerStorageOpt func(stores.Store) stores.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultNodeOpts are the default options that will be applied to test nodes.
|
// DefaultNodeOpts are the default options that will be applied to test nodes.
|
||||||
@ -45,6 +51,9 @@ var DefaultNodeOpts = nodeOpts{
|
|||||||
balance: big.Mul(big.NewInt(100000000), types.NewInt(build.FilecoinPrecision)),
|
balance: big.Mul(big.NewInt(100000000), types.NewInt(build.FilecoinPrecision)),
|
||||||
sectors: DefaultPresealsPerBootstrapMiner,
|
sectors: DefaultPresealsPerBootstrapMiner,
|
||||||
sectorSize: abi.SectorSize(2 << 10), // 2KiB.
|
sectorSize: abi.SectorSize(2 << 10), // 2KiB.
|
||||||
|
|
||||||
|
workerTasks: []sealtasks.TaskType{sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize},
|
||||||
|
workerStorageOpt: func(store stores.Store) stores.Store { return store },
|
||||||
}
|
}
|
||||||
|
|
||||||
// OptBuilder is used to create an option after some other node is already
|
// OptBuilder is used to create an option after some other node is already
|
||||||
@ -81,6 +90,13 @@ func WithMaxStagingDealsBytes(size int64) NodeOpt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WithNoLocalSealing(nope bool) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.minerNoLocalSealing = nope
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func DisableLibp2p() NodeOpt {
|
func DisableLibp2p() NodeOpt {
|
||||||
return func(opts *nodeOpts) error {
|
return func(opts *nodeOpts) error {
|
||||||
opts.disableLibp2p = true
|
opts.disableLibp2p = true
|
||||||
@ -170,3 +186,17 @@ func SectorSize(sectorSize abi.SectorSize) NodeOpt {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WithTaskTypes(tt []sealtasks.TaskType) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.workerTasks = tt
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithWorkerStorage(transform func(stores.Store) stores.Store) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.workerStorageOpt = transform
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
30
itests/kit/node_worker.go
Normal file
30
itests/kit/node_worker.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestWorker represents a worker enrolled in an Ensemble.
|
||||||
|
type TestWorker struct {
|
||||||
|
api.Worker
|
||||||
|
|
||||||
|
t *testing.T
|
||||||
|
|
||||||
|
// ListenAddr is the address on which an API server is listening, if an
|
||||||
|
// API server is created for this Node
|
||||||
|
ListenAddr multiaddr.Multiaddr
|
||||||
|
|
||||||
|
Stop func(context.Context) error
|
||||||
|
|
||||||
|
FetchHandler http.HandlerFunc
|
||||||
|
MinerNode *TestMiner
|
||||||
|
RemoteListener net.Listener
|
||||||
|
|
||||||
|
options nodeOpts
|
||||||
|
}
|
@ -8,11 +8,14 @@ import (
|
|||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api/client"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/filecoin-project/lotus/node"
|
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
manet "github.com/multiformats/go-multiaddr/net"
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
"github.com/filecoin-project/lotus/api/client"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-worker/sealworker"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateRPCServer(t *testing.T, handler http.Handler, listener net.Listener) (*httptest.Server, multiaddr.Multiaddr) {
|
func CreateRPCServer(t *testing.T, handler http.Handler, listener net.Listener) (*httptest.Server, multiaddr.Multiaddr) {
|
||||||
@ -68,3 +71,18 @@ func minerRpc(t *testing.T, m *TestMiner) *TestMiner {
|
|||||||
m.ListenAddr, m.StorageMiner = maddr, cl
|
m.ListenAddr, m.StorageMiner = maddr, cl
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func workerRpc(t *testing.T, m *TestWorker) *TestWorker {
|
||||||
|
handler := sealworker.WorkerHandler(m.MinerNode.AuthVerify, m.FetchHandler, m.Worker, false)
|
||||||
|
|
||||||
|
srv, maddr := CreateRPCServer(t, handler, m.RemoteListener)
|
||||||
|
|
||||||
|
fmt.Println("creating RPC server for a worker at: ", srv.Listener.Addr().String())
|
||||||
|
url := "ws://" + srv.Listener.Addr().String() + "/rpc/v0"
|
||||||
|
cl, stop, err := client.NewWorkerRPCV0(context.Background(), url, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(stop)
|
||||||
|
|
||||||
|
m.ListenAddr, m.Worker = maddr, cl
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
298
itests/worker_test.go
Normal file
298
itests/worker_test.go
Normal file
@ -0,0 +1,298 @@
|
|||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
"github.com/filecoin-project/lotus/node/impl"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
|
"github.com/filecoin-project/lotus/storage"
|
||||||
|
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWorkerPledge(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
|
||||||
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, sealtasks.TTUnseal})) // no mock proofs
|
||||||
|
|
||||||
|
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||||
|
|
||||||
|
e, err := worker.Enabled(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, e)
|
||||||
|
|
||||||
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWinningPostWorker(t *testing.T) {
|
||||||
|
prevIns := build.InsecurePoStValidation
|
||||||
|
build.InsecurePoStValidation = false
|
||||||
|
defer func() {
|
||||||
|
build.InsecurePoStValidation = prevIns
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
client, _, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(),
|
||||||
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWinningPoSt})) // no mock proofs
|
||||||
|
|
||||||
|
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||||
|
|
||||||
|
e, err := worker.Enabled(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, e)
|
||||||
|
|
||||||
|
client.WaitTillChain(ctx, kit.HeightAtLeast(6))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWindowPostWorker(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_ = logging.SetLogLevel("storageminer", "INFO")
|
||||||
|
|
||||||
|
sectors := 2 * 48 * 2
|
||||||
|
|
||||||
|
client, miner, _, ens := kit.EnsembleWorker(t,
|
||||||
|
kit.PresealSectors(sectors), // 2 sectors per partition, 2 partitions in all 48 deadlines
|
||||||
|
kit.LatestActorsAt(-1),
|
||||||
|
kit.ThroughRPC(),
|
||||||
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}))
|
||||||
|
|
||||||
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bm := ens.InterconnectAll().BeginMining(2 * time.Millisecond)[0]
|
||||||
|
|
||||||
|
di = di.NextNotElapsed()
|
||||||
|
|
||||||
|
t.Log("Running one proving period")
|
||||||
|
waitUntil := di.Open + di.WPoStChallengeWindow*2 + storage.SubmitConfidence
|
||||||
|
client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
|
|
||||||
|
t.Log("Waiting for post message")
|
||||||
|
bm.Stop()
|
||||||
|
|
||||||
|
var lastPending []*types.SignedMessage
|
||||||
|
for i := 0; i < 500; i++ {
|
||||||
|
lastPending, err = client.MpoolPending(ctx, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(lastPending) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(40 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Greater(t, len(lastPending), 0)
|
||||||
|
|
||||||
|
t.Log("post message landed")
|
||||||
|
|
||||||
|
bm.MineBlocks(ctx, 2*time.Millisecond)
|
||||||
|
|
||||||
|
waitUntil = di.Open + di.WPoStChallengeWindow*3
|
||||||
|
t.Logf("End for head.Height > %d", waitUntil)
|
||||||
|
|
||||||
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
|
|
||||||
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(sectors)))
|
||||||
|
|
||||||
|
mid, err := address.IDFromAddress(maddr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Remove one sector in the next deadline (so it's skipped)
|
||||||
|
{
|
||||||
|
parts, err := client.StateMinerPartitions(ctx, maddr, di.Index+1, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Greater(t, len(parts), 0)
|
||||||
|
|
||||||
|
secs := parts[0].AllSectors
|
||||||
|
n, err := secs.Count()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(2), n)
|
||||||
|
|
||||||
|
// Drop the sector
|
||||||
|
sid, err := secs.First()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Logf("Drop sector %d; dl %d part %d", sid, di.Index+1, 0)
|
||||||
|
|
||||||
|
err = miner.BaseAPI.(*impl.StorageMinerAPI).IStorageMgr.Remove(ctx, storage2.SectorRef{
|
||||||
|
ID: abi.SectorID{
|
||||||
|
Miner: abi.ActorID(mid),
|
||||||
|
Number: abi.SectorNumber(sid),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
waitUntil = di.Close + di.WPoStChallengeWindow
|
||||||
|
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
|
|
||||||
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(sectors-1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
type badWorkerStorage struct {
|
||||||
|
stores.Store
|
||||||
|
|
||||||
|
badsector *uint64
|
||||||
|
notBadCount int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *badWorkerStorage) GenerateSingleVanillaProof(ctx context.Context, minerID abi.ActorID, si storiface.PostSectorChallenge, ppt abi.RegisteredPoStProof) ([]byte, error) {
|
||||||
|
if atomic.LoadUint64(bs.badsector) == uint64(si.SectorNumber) {
|
||||||
|
bs.notBadCount--
|
||||||
|
if bs.notBadCount < 0 {
|
||||||
|
return nil, xerrors.New("no proof for you")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return bs.Store.GenerateSingleVanillaProof(ctx, minerID, si, ppt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWindowPostWorkerSkipBadSector(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_ = logging.SetLogLevel("storageminer", "INFO")
|
||||||
|
|
||||||
|
sectors := 2 * 48 * 2
|
||||||
|
|
||||||
|
var badsector uint64 = 100000
|
||||||
|
|
||||||
|
client, miner, _, ens := kit.EnsembleWorker(t,
|
||||||
|
kit.PresealSectors(sectors), // 2 sectors per partition, 2 partitions in all 48 deadlines
|
||||||
|
kit.LatestActorsAt(-1),
|
||||||
|
kit.ThroughRPC(),
|
||||||
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}),
|
||||||
|
kit.WithWorkerStorage(func(store stores.Store) stores.Store {
|
||||||
|
return &badWorkerStorage{
|
||||||
|
Store: store,
|
||||||
|
badsector: &badsector,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
kit.ConstructorOpts(node.ApplyIf(node.IsType(repo.StorageMiner),
|
||||||
|
node.Override(new(stores.Store), func(store *stores.Remote) stores.Store {
|
||||||
|
return &badWorkerStorage{
|
||||||
|
Store: store,
|
||||||
|
badsector: &badsector,
|
||||||
|
notBadCount: 1,
|
||||||
|
}
|
||||||
|
}))))
|
||||||
|
|
||||||
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bm := ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond)[0]
|
||||||
|
|
||||||
|
di = di.NextNotElapsed()
|
||||||
|
|
||||||
|
t.Log("Running one proving period")
|
||||||
|
waitUntil := di.Open + di.WPoStChallengeWindow*2 + storage.SubmitConfidence
|
||||||
|
client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
|
|
||||||
|
t.Log("Waiting for post message")
|
||||||
|
bm.Stop()
|
||||||
|
|
||||||
|
var lastPending []*types.SignedMessage
|
||||||
|
for i := 0; i < 500; i++ {
|
||||||
|
lastPending, err = client.MpoolPending(ctx, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(lastPending) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(40 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Greater(t, len(lastPending), 0)
|
||||||
|
|
||||||
|
t.Log("post message landed")
|
||||||
|
|
||||||
|
bm.MineBlocksMustPost(ctx, 2*time.Millisecond)
|
||||||
|
|
||||||
|
waitUntil = di.Open + di.WPoStChallengeWindow*3
|
||||||
|
t.Logf("End for head.Height > %d", waitUntil)
|
||||||
|
|
||||||
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
|
|
||||||
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(sectors)))
|
||||||
|
|
||||||
|
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Remove one sector in the next deadline (so it's skipped)
|
||||||
|
{
|
||||||
|
parts, err := client.StateMinerPartitions(ctx, maddr, di.Index+1, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Greater(t, len(parts), 0)
|
||||||
|
|
||||||
|
secs := parts[0].AllSectors
|
||||||
|
n, err := secs.Count()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(2), n)
|
||||||
|
|
||||||
|
// Drop the sector
|
||||||
|
sid, err := secs.First()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Logf("Drop sector %d; dl %d part %d", sid, di.Index+1, 0)
|
||||||
|
|
||||||
|
atomic.StoreUint64(&badsector, sid)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
waitUntil = di.Close + di.WPoStChallengeWindow
|
||||||
|
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
|
|
||||||
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(sectors-1)))
|
||||||
|
}
|
@ -83,6 +83,7 @@ func ConfigStorageMiner(c interface{}) Option {
|
|||||||
Override(new(stores.LocalStorage), From(new(repo.LockedRepo))),
|
Override(new(stores.LocalStorage), From(new(repo.LockedRepo))),
|
||||||
Override(new(*stores.Local), modules.LocalStorage),
|
Override(new(*stores.Local), modules.LocalStorage),
|
||||||
Override(new(*stores.Remote), modules.RemoteStorage),
|
Override(new(*stores.Remote), modules.RemoteStorage),
|
||||||
|
Override(new(stores.Store), From(new(*stores.Remote))),
|
||||||
Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)),
|
Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)),
|
||||||
|
|
||||||
If(!cfg.Subsystems.EnableMining,
|
If(!cfg.Subsystems.EnableMining,
|
||||||
|
@ -131,8 +131,8 @@ func (sm *StorageMinerAPI) ServeRemote(perm bool) func(w http.ResponseWriter, r
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) {
|
func (sm *StorageMinerAPI) WorkerStats(ctx context.Context) (map[uuid.UUID]storiface.WorkerStats, error) {
|
||||||
return sm.StorageMgr.WorkerStats(), nil
|
return sm.StorageMgr.WorkerStats(ctx), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) {
|
func (sm *StorageMinerAPI) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) {
|
||||||
@ -294,13 +294,13 @@ func (sm *StorageMinerAPI) SectorsSummary(ctx context.Context) (map[api.SectorSt
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
|
func (sm *StorageMinerAPI) StorageLocal(ctx context.Context) (map[storiface.ID]string, error) {
|
||||||
l, err := sm.LocalStore.Local(ctx)
|
l, err := sm.LocalStore.Local(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
out := map[stores.ID]string{}
|
out := map[storiface.ID]string{}
|
||||||
for _, st := range l {
|
for _, st := range l {
|
||||||
out[st.ID] = st.LocalPath
|
out[st.ID] = st.LocalPath
|
||||||
}
|
}
|
||||||
@ -324,7 +324,7 @@ func (sm *StorageMinerAPI) SectorsRefs(ctx context.Context) (map[string][]api.Se
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) {
|
func (sm *StorageMinerAPI) StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) {
|
||||||
return sm.RemoteStore.FsStat(ctx, id)
|
return sm.RemoteStore.FsStat(ctx, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1173,23 +1173,23 @@ func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error
|
|||||||
return backup(ctx, sm.DS, fpath)
|
return backup(ctx, sm.DS, fpath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) {
|
func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) {
|
||||||
var rg storiface.RGetter
|
var rg storiface.RGetter
|
||||||
if expensive {
|
if expensive {
|
||||||
rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, error) {
|
rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, bool, error) {
|
||||||
si, err := sm.Miner.SectorsStatus(ctx, id.Number, false)
|
si, err := sm.Miner.SectorsStatus(ctx, id.Number, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, err
|
return cid.Undef, false, err
|
||||||
}
|
}
|
||||||
if si.CommR == nil {
|
if si.CommR == nil {
|
||||||
return cid.Undef, xerrors.Errorf("commr is nil")
|
return cid.Undef, false, xerrors.Errorf("commr is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
return *si.CommR, nil
|
return *si.CommR, si.ReplicaUpdateMessage != nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bad, err := sm.StorageMgr.CheckProvable(ctx, pp, sectors, update, rg)
|
bad, err := sm.StorageMgr.CheckProvable(ctx, pp, sectors, rg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -721,7 +721,7 @@ func RemoteStorage(lstor *stores.Local, si stores.SectorIndex, sa sectorstorage.
|
|||||||
return stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit, &stores.DefaultPartialFileHandler{})
|
return stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit, &stores.DefaultPartialFileHandler{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, lstor *stores.Local, stor *stores.Remote, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) {
|
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, lstor *stores.Local, stor stores.Store, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) {
|
||||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||||
|
|
||||||
wsts := statestore.New(namespace.Wrap(ds, WorkerCallsPrefix))
|
wsts := statestore.New(namespace.Wrap(ds, WorkerCallsPrefix))
|
||||||
|
@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/node/config"
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -36,6 +37,9 @@ type MemRepo struct {
|
|||||||
keystore map[string]types.KeyInfo
|
keystore map[string]types.KeyInfo
|
||||||
blockstore blockstore.Blockstore
|
blockstore blockstore.Blockstore
|
||||||
|
|
||||||
|
sc *stores.StorageConfig
|
||||||
|
tempDir string
|
||||||
|
|
||||||
// holds the current config value
|
// holds the current config value
|
||||||
config struct {
|
config struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
@ -48,9 +52,7 @@ type lockedMemRepo struct {
|
|||||||
t RepoType
|
t RepoType
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
tempDir string
|
token *byte
|
||||||
token *byte
|
|
||||||
sc *stores.StorageConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lmem *lockedMemRepo) GetStorage() (stores.StorageConfig, error) {
|
func (lmem *lockedMemRepo) GetStorage() (stores.StorageConfig, error) {
|
||||||
@ -58,13 +60,13 @@ func (lmem *lockedMemRepo) GetStorage() (stores.StorageConfig, error) {
|
|||||||
return stores.StorageConfig{}, err
|
return stores.StorageConfig{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if lmem.sc == nil {
|
if lmem.mem.sc == nil {
|
||||||
lmem.sc = &stores.StorageConfig{StoragePaths: []stores.LocalPath{
|
lmem.mem.sc = &stores.StorageConfig{StoragePaths: []stores.LocalPath{
|
||||||
{Path: lmem.Path()},
|
{Path: lmem.Path()},
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
return *lmem.sc, nil
|
return *lmem.mem.sc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lmem *lockedMemRepo) SetStorage(c func(*stores.StorageConfig)) error {
|
func (lmem *lockedMemRepo) SetStorage(c func(*stores.StorageConfig)) error {
|
||||||
@ -74,7 +76,7 @@ func (lmem *lockedMemRepo) SetStorage(c func(*stores.StorageConfig)) error {
|
|||||||
|
|
||||||
_, _ = lmem.GetStorage()
|
_, _ = lmem.GetStorage()
|
||||||
|
|
||||||
c(lmem.sc)
|
c(lmem.mem.sc)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,8 +96,8 @@ func (lmem *lockedMemRepo) Path() string {
|
|||||||
lmem.Lock()
|
lmem.Lock()
|
||||||
defer lmem.Unlock()
|
defer lmem.Unlock()
|
||||||
|
|
||||||
if lmem.tempDir != "" {
|
if lmem.mem.tempDir != "" {
|
||||||
return lmem.tempDir
|
return lmem.mem.tempDir
|
||||||
}
|
}
|
||||||
|
|
||||||
t, err := ioutil.TempDir(os.TempDir(), "lotus-memrepo-temp-")
|
t, err := ioutil.TempDir(os.TempDir(), "lotus-memrepo-temp-")
|
||||||
@ -110,32 +112,38 @@ func (lmem *lockedMemRepo) Path() string {
|
|||||||
if err := os.MkdirAll(filepath.Join(t, "deal-staging"), 0755); err != nil {
|
if err := os.MkdirAll(filepath.Join(t, "deal-staging"), 0755); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if err := config.WriteStorageFile(filepath.Join(t, fsStorageConfig), stores.StorageConfig{
|
}
|
||||||
StoragePaths: []stores.LocalPath{
|
if lmem.t == StorageMiner || lmem.t == Worker {
|
||||||
{Path: t},
|
lmem.initSectorStore(t)
|
||||||
}}); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
|
||||||
ID: stores.ID(uuid.New().String()),
|
|
||||||
Weight: 10,
|
|
||||||
CanSeal: true,
|
|
||||||
CanStore: true,
|
|
||||||
}, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ioutil.WriteFile(filepath.Join(t, "sectorstore.json"), b, 0644); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
lmem.tempDir = t
|
lmem.mem.tempDir = t
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (lmem *lockedMemRepo) initSectorStore(t string) {
|
||||||
|
if err := config.WriteStorageFile(filepath.Join(t, fsStorageConfig), stores.StorageConfig{
|
||||||
|
StoragePaths: []stores.LocalPath{
|
||||||
|
{Path: t},
|
||||||
|
}}); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
||||||
|
ID: storiface.ID(uuid.New().String()),
|
||||||
|
Weight: 10,
|
||||||
|
CanSeal: true,
|
||||||
|
CanStore: true,
|
||||||
|
}, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ioutil.WriteFile(filepath.Join(t, "sectorstore.json"), b, 0644); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var _ Repo = &MemRepo{}
|
var _ Repo = &MemRepo{}
|
||||||
|
|
||||||
// MemRepoOptions contains options for memory repo
|
// MemRepoOptions contains options for memory repo
|
||||||
@ -199,6 +207,18 @@ func (mem *MemRepo) Lock(t RepoType) (LockedRepo, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mem *MemRepo) Cleanup() {
|
||||||
|
mem.api.Lock()
|
||||||
|
defer mem.api.Unlock()
|
||||||
|
|
||||||
|
if mem.tempDir != "" {
|
||||||
|
if err := os.RemoveAll(mem.tempDir); err != nil {
|
||||||
|
log.Errorw("cleanup test memrepo", "error", err)
|
||||||
|
}
|
||||||
|
mem.tempDir = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (lmem *lockedMemRepo) Readonly() bool {
|
func (lmem *lockedMemRepo) Readonly() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -223,20 +243,12 @@ func (lmem *lockedMemRepo) Close() error {
|
|||||||
return ErrClosedRepo
|
return ErrClosedRepo
|
||||||
}
|
}
|
||||||
|
|
||||||
if lmem.tempDir != "" {
|
|
||||||
if err := os.RemoveAll(lmem.tempDir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
lmem.tempDir = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
lmem.mem.token = nil
|
lmem.mem.token = nil
|
||||||
lmem.mem.api.Lock()
|
lmem.mem.api.Lock()
|
||||||
lmem.mem.api.ma = nil
|
lmem.mem.api.ma = nil
|
||||||
lmem.mem.api.Unlock()
|
lmem.mem.api.Unlock()
|
||||||
<-lmem.mem.repoLock // unlock
|
<-lmem.mem.repoLock // unlock
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lmem *lockedMemRepo) Datastore(_ context.Context, ns string) (datastore.Batching, error) {
|
func (lmem *lockedMemRepo) Datastore(_ context.Context, ns string) (datastore.Batching, error) {
|
||||||
|
@ -140,10 +140,11 @@ func (m *Miner) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnC
|
|||||||
Value: info.SeedValue,
|
Value: info.SeedValue,
|
||||||
Epoch: info.SeedEpoch,
|
Epoch: info.SeedEpoch,
|
||||||
},
|
},
|
||||||
PreCommitMsg: info.PreCommitMessage,
|
PreCommitMsg: info.PreCommitMessage,
|
||||||
CommitMsg: info.CommitMessage,
|
CommitMsg: info.CommitMessage,
|
||||||
Retries: info.InvalidProofs,
|
Retries: info.InvalidProofs,
|
||||||
ToUpgrade: false,
|
ToUpgrade: false,
|
||||||
|
ReplicaUpdateMessage: info.ReplicaUpdateMessage,
|
||||||
|
|
||||||
LastErr: info.LastErr,
|
LastErr: info.LastErr,
|
||||||
Log: log,
|
Log: log,
|
||||||
|
@ -45,13 +45,6 @@ func (s *WindowPoStScheduler) recordPoStFailure(err error, ts *types.TipSet, dea
|
|||||||
State: SchedulerStateFaulted,
|
State: SchedulerStateFaulted,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
log.Errorf("Got err %+v - TODO handle errors", err)
|
|
||||||
/*s.failLk.Lock()
|
|
||||||
if eps > s.failed {
|
|
||||||
s.failed = eps
|
|
||||||
}
|
|
||||||
s.failLk.Unlock()*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// recordProofsEvent records a successful proofs_processed event in the
|
// recordProofsEvent records a successful proofs_processed event in the
|
||||||
@ -204,11 +197,18 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B
|
|||||||
return bitfield.BitField{}, err
|
return bitfield.BitField{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
sectors := make(map[abi.SectorNumber]struct{})
|
type checkSector struct {
|
||||||
|
sealed cid.Cid
|
||||||
|
update bool
|
||||||
|
}
|
||||||
|
|
||||||
|
sectors := make(map[abi.SectorNumber]checkSector)
|
||||||
var tocheck []storage.SectorRef
|
var tocheck []storage.SectorRef
|
||||||
var update []bool
|
|
||||||
for _, info := range sectorInfos {
|
for _, info := range sectorInfos {
|
||||||
sectors[info.SectorNumber] = struct{}{}
|
sectors[info.SectorNumber] = checkSector{
|
||||||
|
sealed: info.SealedCID,
|
||||||
|
update: info.SectorKeyCID != nil,
|
||||||
|
}
|
||||||
tocheck = append(tocheck, storage.SectorRef{
|
tocheck = append(tocheck, storage.SectorRef{
|
||||||
ProofType: info.SealProof,
|
ProofType: info.SealProof,
|
||||||
ID: abi.SectorID{
|
ID: abi.SectorID{
|
||||||
@ -216,10 +216,15 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B
|
|||||||
Number: info.SectorNumber,
|
Number: info.SectorNumber,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
update = append(update, info.SectorKeyCID != nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck, update, nil)
|
bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck, func(ctx context.Context, id abi.SectorID) (cid.Cid, bool, error) {
|
||||||
|
s, ok := sectors[id.Number]
|
||||||
|
if !ok {
|
||||||
|
return cid.Undef, false, xerrors.Errorf("sealed CID not found")
|
||||||
|
}
|
||||||
|
return s.sealed, s.update, nil
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err)
|
return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err)
|
||||||
}
|
}
|
||||||
@ -549,6 +554,12 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
log.Errorf("recover: %s", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Generate proofs in batches
|
// Generate proofs in batches
|
||||||
posts := make([]miner.SubmitWindowedPoStParams, 0, len(partitionBatches))
|
posts := make([]miner.SubmitWindowedPoStParams, 0, len(partitionBatches))
|
||||||
for batchIdx, batch := range partitionBatches {
|
for batchIdx, batch := range partitionBatches {
|
||||||
@ -639,14 +650,9 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
log.Errorf("recover: %s", r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
postOut, ps, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), xsinfos, append(abi.PoStRandomness{}, rand...))
|
postOut, ps, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), xsinfos, append(abi.PoStRandomness{}, rand...))
|
||||||
elapsed := time.Since(tsStart)
|
elapsed := time.Since(tsStart)
|
||||||
log.Infow("computing window post", "batch", batchIdx, "elapsed", elapsed)
|
log.Infow("computing window post", "batch", batchIdx, "elapsed", elapsed, "skip", len(ps), "err", err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error generating window post: %s", err)
|
log.Errorf("error generating window post: %s", err)
|
||||||
}
|
}
|
||||||
@ -855,7 +861,7 @@ func (s *WindowPoStScheduler) submitPoStMessage(ctx context.Context, proof *mine
|
|||||||
return nil, xerrors.Errorf("pushing message to mpool: %w", err)
|
return nil, xerrors.Errorf("pushing message to mpool: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Submitted window post: %s", sm.Cid())
|
log.Infof("Submitted window post: %s (deadline %d)", sm.Cid(), proof.Deadline)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||||
@ -865,6 +871,7 @@ func (s *WindowPoStScheduler) submitPoStMessage(ctx context.Context, proof *mine
|
|||||||
}
|
}
|
||||||
|
|
||||||
if rec.Receipt.ExitCode == 0 {
|
if rec.Receipt.ExitCode == 0 {
|
||||||
|
log.Infow("Window post submission successful", "cid", sm.Cid(), "deadline", proof.Deadline, "epoch", rec.Height, "ts", rec.TipSet.Cids())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,11 +6,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
|
||||||
|
|
||||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
|
||||||
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -20,11 +15,21 @@ import (
|
|||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
|
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||||
|
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||||
|
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||||
|
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
|
||||||
|
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||||
|
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/specs-actors/v6/actors/runtime/proof"
|
||||||
|
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/go-state-types/dline"
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
@ -32,10 +37,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/journal"
|
"github.com/filecoin-project/lotus/journal"
|
||||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
|
||||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
|
||||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
|
||||||
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockStorageMinerAPI struct {
|
type mockStorageMinerAPI struct {
|
||||||
@ -117,6 +118,14 @@ func (m *mockStorageMinerAPI) GasEstimateFeeCap(context.Context, *types.Message,
|
|||||||
type mockProver struct {
|
type mockProver struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *mockProver) GenerateWinningPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte) ([]proof.PoStProof, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockProver) GenerateWindowPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte, partitionIdx int) (proof.PoStProof, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof7.ExtendedSectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) {
|
func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof7.ExtendedSectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
@ -169,7 +178,7 @@ func (m mockVerif) GenerateWinningPoStSectorChallenge(context.Context, abi.Regis
|
|||||||
type mockFaultTracker struct {
|
type mockFaultTracker struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) {
|
||||||
// Returns "bad" sectors so just return empty map meaning all sectors are good
|
// Returns "bad" sectors so just return empty map meaning all sectors are good
|
||||||
return map[abi.SectorID]string{}, nil
|
return map[abi.SectorID]string{}, nil
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||||
"github.com/filecoin-project/lotus/miner"
|
"github.com/filecoin-project/lotus/miner"
|
||||||
"github.com/filecoin-project/lotus/node"
|
"github.com/filecoin-project/lotus/node"
|
||||||
@ -198,7 +199,7 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) {
|
|||||||
var localPaths []stores.LocalPath
|
var localPaths []stores.LocalPath
|
||||||
|
|
||||||
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
||||||
ID: stores.ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
Weight: 10,
|
Weight: 10,
|
||||||
CanSeal: true,
|
CanSeal: true,
|
||||||
CanStore: true,
|
CanStore: true,
|
||||||
|
Loading…
Reference in New Issue
Block a user