Merge pull request #1405 from filecoin-project/feat/new-workers
New workers
This commit is contained in:
commit
08ca60cfa8
2
Makefile
2
Makefile
@ -19,7 +19,7 @@ GOFLAGS+=-ldflags=-X="github.com/filecoin-project/lotus/build".CurrentCommit="+g
|
||||
## FFI
|
||||
|
||||
FFI_PATH:=extern/filecoin-ffi/
|
||||
FFI_DEPS:=libfilecoin.a filecoin.pc filecoin.h
|
||||
FFI_DEPS:=.install-filcrypto
|
||||
FFI_DEPS:=$(addprefix $(FFI_PATH),$(FFI_DEPS))
|
||||
|
||||
$(FFI_DEPS): build/.filecoin-install ;
|
||||
|
@ -7,10 +7,11 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
// alias because cbor-gen doesn't like non-alias types
|
||||
@ -107,20 +108,20 @@ type StorageMiner interface {
|
||||
|
||||
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error
|
||||
|
||||
/*WorkerStats(context.Context) (sealsched.WorkerStats, error)*/
|
||||
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error)
|
||||
StorageLocal(ctx context.Context) (map[stores.ID]string, error)
|
||||
StorageStat(ctx context.Context, id stores.ID) (stores.FsStat, error)
|
||||
|
||||
/*// WorkerQueue registers a remote worker
|
||||
WorkerQueue(context.Context, WorkerCfg) (<-chan WorkerTask, error)
|
||||
// WorkerConnect tells the node to connect to workers RPC
|
||||
WorkerConnect(context.Context, string) error
|
||||
WorkerStats(context.Context) (map[uint64]WorkerStats, error)
|
||||
|
||||
// WorkerQueue registers a remote worker
|
||||
WorkerQueue(context.Context, sectorbuilder.WorkerCfg) (<-chan sectorbuilder.WorkerTask, error)
|
||||
stores.SectorIndex
|
||||
|
||||
WorkerDone(ctx context.Context, task uint64, res sectorbuilder.SealRes) error
|
||||
*/
|
||||
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
|
||||
MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error)
|
||||
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
|
||||
SetPrice(context.Context, types.BigInt) error
|
||||
MarketSetPrice(context.Context, types.BigInt) error
|
||||
|
||||
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
|
||||
DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error)
|
||||
|
46
api/api_worker.go
Normal file
46
api/api_worker.go
Normal file
@ -0,0 +1,46 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
type WorkerApi interface {
|
||||
Version(context.Context) (build.Version, error)
|
||||
// TODO: Info() (name, ...) ?
|
||||
|
||||
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) // TaskType -> Weight
|
||||
Paths(context.Context) ([]stores.StoragePath, error)
|
||||
Info(context.Context) (WorkerInfo, error)
|
||||
|
||||
storage.Sealer
|
||||
}
|
||||
|
||||
type WorkerResources struct {
|
||||
MemPhysical uint64
|
||||
MemSwap uint64
|
||||
|
||||
MemReserved uint64 // Used by system / other processes
|
||||
|
||||
GPUs []string
|
||||
}
|
||||
|
||||
type WorkerInfo struct {
|
||||
Hostname string
|
||||
|
||||
Resources WorkerResources
|
||||
}
|
||||
|
||||
type WorkerStats struct {
|
||||
Info WorkerInfo
|
||||
|
||||
MemUsedMin uint64
|
||||
MemUsedMax uint64
|
||||
GpuUsed bool
|
||||
CpuUse int
|
||||
}
|
@ -43,6 +43,12 @@ func PermissionedFullAPI(a api.FullNode) api.FullNode {
|
||||
return &out
|
||||
}
|
||||
|
||||
func PermissionedWorkerAPI(a api.WorkerApi) api.WorkerApi {
|
||||
var out WorkerStruct
|
||||
permissionedAny(a, &out.Internal)
|
||||
return &out
|
||||
}
|
||||
|
||||
func HasPerm(ctx context.Context, perm api.Permission) bool {
|
||||
callerPerms, ok := ctx.Value(permCtxKey).([]api.Permission)
|
||||
if !ok {
|
||||
|
@ -9,16 +9,21 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
// All permissions are listed in permissioned.go
|
||||
@ -168,6 +173,7 @@ type StorageMinerStruct struct {
|
||||
MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"`
|
||||
MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
|
||||
MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
|
||||
MarketSetPrice func(context.Context, types.BigInt) error `perm:"admin"`
|
||||
|
||||
PledgeSector func(context.Context) error `perm:"write"`
|
||||
|
||||
@ -176,12 +182,18 @@ type StorageMinerStruct struct {
|
||||
SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"`
|
||||
SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"write"`
|
||||
|
||||
/* WorkerStats func(context.Context) (sectorbuilder.WorkerStats, error) `perm:"read"`
|
||||
WorkerConnect func(context.Context, string) error `perm:"admin"` // TODO: worker perm
|
||||
WorkerStats func(context.Context) (map[uint64]api.WorkerStats, error) `perm:"admin"`
|
||||
|
||||
WorkerQueue func(ctx context.Context, cfg sectorbuilder.WorkerCfg) (<-chan sectorbuilder.WorkerTask, error) `perm:"admin"` // TODO: worker perm
|
||||
WorkerDone func(ctx context.Context, task uint64, res sectorbuilder.SealRes) error `perm:"admin"`
|
||||
*/
|
||||
SetPrice func(context.Context, types.BigInt) error `perm:"admin"`
|
||||
StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"`
|
||||
StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"`
|
||||
StorageStat func(context.Context, stores.ID) (stores.FsStat, error) `perm:"admin"`
|
||||
StorageAttach func(context.Context, stores.StorageInfo, stores.FsStat) error `perm:"admin"`
|
||||
StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, sectorbuilder.SectorFileType) error `perm:"admin"`
|
||||
StorageDropSector func(context.Context, stores.ID, abi.SectorID, sectorbuilder.SectorFileType) error `perm:"admin"`
|
||||
StorageFindSector func(context.Context, abi.SectorID, sectorbuilder.SectorFileType, bool) ([]stores.StorageInfo, error) `perm:"admin"`
|
||||
StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"`
|
||||
StorageBestAlloc func(ctx context.Context, allocate sectorbuilder.SectorFileType, sealing bool) ([]stores.StorageInfo, error) `perm:"admin"`
|
||||
|
||||
DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
|
||||
DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
|
||||
@ -190,6 +202,24 @@ type StorageMinerStruct struct {
|
||||
}
|
||||
}
|
||||
|
||||
type WorkerStruct struct {
|
||||
Internal struct {
|
||||
// TODO: lower perms
|
||||
|
||||
Version func(context.Context) (build.Version, error) `perm:"admin"`
|
||||
|
||||
TaskTypes func(context.Context) (map[sealtasks.TaskType]struct{}, error) `perm:"admin"`
|
||||
Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"`
|
||||
Info func(context.Context) (api.WorkerInfo, error) `perm:"admin"`
|
||||
|
||||
SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) `perm:"admin"`
|
||||
SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"`
|
||||
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"`
|
||||
SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"`
|
||||
FinalizeSector func(context.Context, abi.SectorID) error `perm:"admin"`
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommonStruct) AuthVerify(ctx context.Context, token string) ([]api.Permission, error) {
|
||||
return c.Internal.AuthVerify(ctx, token)
|
||||
}
|
||||
@ -633,17 +663,49 @@ func (c *StorageMinerStruct) SectorsUpdate(ctx context.Context, id abi.SectorNum
|
||||
return c.Internal.SectorsUpdate(ctx, id, state)
|
||||
}
|
||||
|
||||
/*func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (sealsched.WorkerStats, error) {
|
||||
return c.Internal.WorkerStats(ctx)
|
||||
}*/
|
||||
|
||||
/*func (c *StorageMinerStruct) WorkerQueue(ctx context.Context, cfg sectorbuilder.WorkerCfg) (<-chan sectorbuilder.WorkerTask, error) {
|
||||
return c.Internal.WorkerQueue(ctx, cfg)
|
||||
func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) error {
|
||||
return c.Internal.WorkerConnect(ctx, url)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) WorkerDone(ctx context.Context, task uint64, res sectorbuilder.SealRes) error {
|
||||
return c.Internal.WorkerDone(ctx, task, res)
|
||||
}*/
|
||||
func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (map[uint64]api.WorkerStats, error) {
|
||||
return c.Internal.WorkerStats(ctx)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) StorageAttach(ctx context.Context, si stores.StorageInfo, st stores.FsStat) error {
|
||||
return c.Internal.StorageAttach(ctx, si, st)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error {
|
||||
return c.Internal.StorageDeclareSector(ctx, storageId, s, ft)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error {
|
||||
return c.Internal.StorageDropSector(ctx, storageId, s, ft)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types sectorbuilder.SectorFileType, allowFetch bool) ([]stores.StorageInfo, error) {
|
||||
return c.Internal.StorageFindSector(ctx, si, types, allowFetch)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) {
|
||||
return c.Internal.StorageList(ctx)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
|
||||
return c.Internal.StorageLocal(ctx)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) StorageStat(ctx context.Context, id stores.ID) (stores.FsStat, error) {
|
||||
return c.Internal.StorageStat(ctx, id)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) StorageInfo(ctx context.Context, id stores.ID) (stores.StorageInfo, error) {
|
||||
return c.Internal.StorageInfo(ctx, id)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate sectorbuilder.SectorFileType, sealing bool) ([]stores.StorageInfo, error) {
|
||||
return c.Internal.StorageBestAlloc(ctx, allocate, sealing)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error {
|
||||
return c.Internal.MarketImportDealData(ctx, propcid, path)
|
||||
@ -657,8 +719,8 @@ func (c *StorageMinerStruct) MarketListIncompleteDeals(ctx context.Context) ([]s
|
||||
return c.Internal.MarketListIncompleteDeals(ctx)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) SetPrice(ctx context.Context, p types.BigInt) error {
|
||||
return c.Internal.SetPrice(ctx, p)
|
||||
func (c *StorageMinerStruct) MarketSetPrice(ctx context.Context, p types.BigInt) error {
|
||||
return c.Internal.MarketSetPrice(ctx, p)
|
||||
}
|
||||
|
||||
func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error {
|
||||
@ -673,6 +735,43 @@ func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) e
|
||||
return c.Internal.StorageAddLocal(ctx, path)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) Version(ctx context.Context) (build.Version, error) {
|
||||
return w.Internal.Version(ctx)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
||||
return w.Internal.TaskTypes(ctx)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) Paths(ctx context.Context) ([]stores.StoragePath, error) {
|
||||
return w.Internal.Paths(ctx)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) Info(ctx context.Context) (api.WorkerInfo, error) {
|
||||
return w.Internal.Info(ctx)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
||||
return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, p1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||
return w.Internal.SealPreCommit2(ctx, sector, p1o)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||
return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
|
||||
return w.Internal.SealCommit2(ctx, sector, c1o)
|
||||
}
|
||||
|
||||
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID) error {
|
||||
return w.Internal.FinalizeSector(ctx, sector)
|
||||
}
|
||||
|
||||
var _ api.Common = &CommonStruct{}
|
||||
var _ api.FullNode = &FullNodeStruct{}
|
||||
var _ api.StorageMiner = &StorageMinerStruct{}
|
||||
var _ api.WorkerApi = &WorkerStruct{}
|
||||
|
@ -14,7 +14,9 @@ func NewCommonRPC(addr string, requestHeader http.Header) (api.Common, jsonrpc.C
|
||||
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
|
||||
[]interface{}{
|
||||
&res.Internal,
|
||||
}, requestHeader)
|
||||
},
|
||||
requestHeader,
|
||||
)
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
@ -38,7 +40,21 @@ func NewStorageMinerRPC(addr string, requestHeader http.Header) (api.StorageMine
|
||||
[]interface{}{
|
||||
&res.CommonStruct.Internal,
|
||||
&res.Internal,
|
||||
}, requestHeader)
|
||||
},
|
||||
requestHeader,
|
||||
)
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
|
||||
func NewWorkerRPC(addr string, requestHeader http.Header) (api.WorkerApi, jsonrpc.ClientCloser, error) {
|
||||
var res apistruct.WorkerStruct
|
||||
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
|
||||
[]interface{}{
|
||||
&res.Internal,
|
||||
},
|
||||
requestHeader,
|
||||
)
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage/sbmock"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/mock"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/xerrors"
|
||||
@ -690,7 +690,7 @@ func (m genFakeVerifier) VerifyElectionPost(ctx context.Context, pvi abi.PoStVer
|
||||
panic("nyi")
|
||||
}
|
||||
func (m genFakeVerifier) GenerateDataCommitment(ssize abi.PaddedPieceSize, pieces []abi.PieceInfo) (cid.Cid, error) {
|
||||
return sbmock.MockVerifier.GenerateDataCommitment(ssize, pieces)
|
||||
return mock.MockVerifier.GenerateDataCommitment(ssize, pieces)
|
||||
}
|
||||
|
||||
func (m genFakeVerifier) VerifySeal(svi abi.SealVerifyInfo) (bool, error) {
|
||||
|
@ -5,13 +5,14 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||
"github.com/filecoin-project/specs-actors/actors/runtime"
|
||||
"github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -134,7 +134,6 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
}
|
||||
|
||||
func GetRawAPI(ctx *cli.Context, t repo.RepoType) (string, http.Header, error) {
|
||||
|
||||
ainfo, err := GetAPIInfo(ctx, t)
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("could not get API info: %w", err)
|
||||
@ -210,6 +209,14 @@ func ReqContext(cctx *cli.Context) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
var CommonCommands = []*cli.Command{
|
||||
authCmd,
|
||||
fetchParamCmd,
|
||||
netCmd,
|
||||
versionCmd,
|
||||
logCmd,
|
||||
}
|
||||
|
||||
var Commands = []*cli.Command{
|
||||
authCmd,
|
||||
chainCmd,
|
||||
|
@ -168,7 +168,6 @@ func main() {
|
||||
}
|
||||
|
||||
cfg := §orbuilder.Config{
|
||||
Miner: maddr,
|
||||
SealProofType: spt,
|
||||
PoStProofType: ppt,
|
||||
}
|
||||
@ -186,8 +185,7 @@ func main() {
|
||||
}
|
||||
|
||||
sbfs := &fs.Basic{
|
||||
Miner: maddr,
|
||||
Root: sbdir,
|
||||
Root: sbdir,
|
||||
}
|
||||
|
||||
sb, err := sectorbuilder.New(sbfs, cfg)
|
||||
@ -206,12 +204,17 @@ func main() {
|
||||
var sealedSectors []abi.SectorInfo
|
||||
numSectors := abi.SectorNumber(1)
|
||||
for i := abi.SectorNumber(1); i <= numSectors && robench == ""; i++ {
|
||||
sid := abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
log.Info("Writing piece into sector...")
|
||||
|
||||
r := rand.New(rand.NewSource(100 + int64(i)))
|
||||
|
||||
pi, err := sb.AddPiece(context.TODO(), i, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), r)
|
||||
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -223,7 +226,7 @@ func main() {
|
||||
|
||||
log.Info("Running replication(1)...")
|
||||
pieces := []abi.PieceInfo{pi}
|
||||
pc1o, err := sb.SealPreCommit1(context.TODO(), i, ticket, pieces)
|
||||
pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, pieces)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("commit: %w", err)
|
||||
}
|
||||
@ -231,7 +234,7 @@ func main() {
|
||||
precommit1 := time.Now()
|
||||
|
||||
log.Info("Running replication(2)...")
|
||||
commR, commD, err := sb.SealPreCommit2(context.TODO(), i, pc1o)
|
||||
cids, err := sb.SealPreCommit2(context.TODO(), sid, pc1o)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("commit: %w", err)
|
||||
}
|
||||
@ -241,7 +244,7 @@ func main() {
|
||||
sealedSectors = append(sealedSectors, abi.SectorInfo{
|
||||
RegisteredProof: spt,
|
||||
SectorNumber: i,
|
||||
SealedCID: commR,
|
||||
SealedCID: cids.Sealed,
|
||||
})
|
||||
|
||||
seed := lapi.SealSeed{
|
||||
@ -250,7 +253,7 @@ func main() {
|
||||
}
|
||||
|
||||
log.Info("Generating PoRep for sector (1)")
|
||||
c1o, err := sb.SealCommit1(context.TODO(), i, ticket, seed.Value, pieces, commR, commD)
|
||||
c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, pieces, cids)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -278,7 +281,7 @@ func main() {
|
||||
|
||||
var proof storage.Proof
|
||||
if !c.Bool("skip-commit2") {
|
||||
proof, err = sb.SealCommit2(context.TODO(), i, c1o)
|
||||
proof, err = sb.SealCommit2(context.TODO(), sid, c1o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -291,7 +294,7 @@ func main() {
|
||||
svi := abi.SealVerifyInfo{
|
||||
SectorID: abi.SectorID{Miner: mid, Number: i},
|
||||
OnChain: abi.OnChainSealVerifyInfo{
|
||||
SealedCID: commR,
|
||||
SealedCID: cids.Sealed,
|
||||
InteractiveEpoch: seed.Epoch,
|
||||
RegisteredProof: spt,
|
||||
Proof: proof,
|
||||
@ -301,7 +304,7 @@ func main() {
|
||||
},
|
||||
Randomness: ticket,
|
||||
InteractiveRandomness: seed.Value,
|
||||
UnsealedCID: commD,
|
||||
UnsealedCID: cids.Unsealed,
|
||||
}
|
||||
|
||||
ok, err := sectorbuilder.ProofVerifier.VerifySeal(svi)
|
||||
@ -318,7 +321,7 @@ func main() {
|
||||
if !c.Bool("skip-unseal") {
|
||||
log.Info("Unsealing sector")
|
||||
// TODO: RM unsealed sector first
|
||||
rc, err := sb.ReadPieceFromSealedSector(context.TODO(), 1, 0, abi.UnpaddedPieceSize(sectorSize), ticket, commD)
|
||||
rc, err := sb.ReadPieceFromSealedSector(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, 0, abi.UnpaddedPieceSize(sectorSize), ticket, cids.Unsealed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -380,7 +383,7 @@ func main() {
|
||||
|
||||
if !c.Bool("skip-commit2") {
|
||||
log.Info("generating election post candidates")
|
||||
fcandidates, err := sb.GenerateEPostCandidates(sealedSectors, challenge[:], []abi.SectorNumber{})
|
||||
fcandidates, err := sb.GenerateEPostCandidates(context.TODO(), mid, sealedSectors, challenge[:], []abi.SectorNumber{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -394,7 +397,7 @@ func main() {
|
||||
gencandidates := time.Now()
|
||||
|
||||
log.Info("computing election post snark (cold)")
|
||||
proof1, err := sb.ComputeElectionPoSt(sealedSectors, challenge[:], candidates[:1])
|
||||
proof1, err := sb.ComputeElectionPoSt(context.TODO(), mid, sealedSectors, challenge[:], candidates[:1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -402,7 +405,7 @@ func main() {
|
||||
epost1 := time.Now()
|
||||
|
||||
log.Info("computing election post snark (hot)")
|
||||
proof2, err := sb.ComputeElectionPoSt(sealedSectors, challenge[:], candidates[:1])
|
||||
proof2, err := sb.ComputeElectionPoSt(context.TODO(), mid, sealedSectors, challenge[:], candidates[:1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -528,6 +531,10 @@ var proveCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ppt, spt, err := lapi.ProofTypeFromSectorSize(abi.SectorSize(c2in.SectorSize))
|
||||
if err != nil {
|
||||
@ -535,7 +542,6 @@ var proveCmd = &cli.Command{
|
||||
}
|
||||
|
||||
cfg := §orbuilder.Config{
|
||||
Miner: maddr,
|
||||
SealProofType: spt,
|
||||
PoStProofType: ppt,
|
||||
}
|
||||
@ -547,7 +553,7 @@ var proveCmd = &cli.Command{
|
||||
|
||||
start := time.Now()
|
||||
|
||||
proof, err := sb.SealCommit2(context.TODO(), abi.SectorNumber(c2in.SectorNum), c2in.Phase1Out)
|
||||
proof, err := sb.SealCommit2(context.TODO(), abi.SectorID{Miner: abi.ActorID(mid), Number: abi.SectorNumber(c2in.SectorNum)}, c2in.Phase1Out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,22 +1,39 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
"gopkg.in/urfave/cli.v2"
|
||||
|
||||
paramfetch "github.com/filecoin-project/go-paramfetch"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/apistruct"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/auth"
|
||||
"github.com/filecoin-project/lotus/lib/jsonrpc"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
var log = logging.Logger("main")
|
||||
|
||||
const (
|
||||
workers = 1 // TODO: Configurability
|
||||
transfers = 1
|
||||
)
|
||||
const FlagStorageRepo = "workerrepo"
|
||||
|
||||
func main() {
|
||||
lotuslog.SetupLogLevels()
|
||||
@ -33,7 +50,7 @@ func main() {
|
||||
Version: build.UserVersion,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "repo",
|
||||
Name: FlagStorageRepo,
|
||||
EnvVars: []string{"WORKER_PATH"},
|
||||
Value: "~/.lotusworker", // TODO: Consider XDG_DATA_HOME
|
||||
},
|
||||
@ -47,12 +64,6 @@ func main() {
|
||||
Usage: "enable use of GPU for mining operations",
|
||||
Value: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "no-precommit",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "no-commit",
|
||||
},
|
||||
},
|
||||
|
||||
Commands: local,
|
||||
@ -66,100 +77,206 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
type limits struct {
|
||||
workLimit chan struct{}
|
||||
transferLimit chan struct{}
|
||||
}
|
||||
|
||||
var runCmd = &cli.Command{
|
||||
Name: "run",
|
||||
Usage: "Start lotus worker",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "address",
|
||||
Usage: "Locally reachable address",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "no-local-storage",
|
||||
Usage: "don't use storageminer repo for sector storage",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
/* if !cctx.Bool("enable-gpu-proving") {
|
||||
os.Setenv("BELLMAN_NO_GPU", "true")
|
||||
if !cctx.Bool("enable-gpu-proving") {
|
||||
os.Setenv("BELLMAN_NO_GPU", "true")
|
||||
}
|
||||
|
||||
if cctx.String("address") == "" {
|
||||
return xerrors.Errorf("--address flag is required")
|
||||
}
|
||||
|
||||
// Connect to storage-miner
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting miner api: %w", err)
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
v, err := nodeApi.Version(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v.APIVersion != build.APIVersion {
|
||||
return xerrors.Errorf("lotus-storage-miner API version doesn't match: local: ", api.Version{APIVersion: build.APIVersion})
|
||||
}
|
||||
log.Infof("Remote version %s", v)
|
||||
|
||||
// Check params
|
||||
|
||||
act, err := nodeApi.ActorAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ssize, err := nodeApi.ActorSectorSize(ctx, act)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := paramfetch.GetParams(build.ParametersJson(), uint64(ssize)); err != nil {
|
||||
return xerrors.Errorf("get params: %w", err)
|
||||
}
|
||||
|
||||
// Open repo
|
||||
|
||||
repoPath := cctx.String(FlagStorageRepo)
|
||||
r, err := repo.NewFS(repoPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok, err := r.Exists()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
if err := r.Init(repo.Worker); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting miner api: %w", err)
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
ainfo, err := lcli.GetAPIInfo(cctx, repo.StorageMiner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not get api info: %w", err)
|
||||
}
|
||||
_, storageAddr, err := manet.DialArgs(ainfo.Addr)
|
||||
|
||||
r, err := homedir.Expand(cctx.String("repo"))
|
||||
lr, err := r.Lock(repo.Worker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v, err := nodeApi.Version(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v.APIVersion != build.APIVersion {
|
||||
return xerrors.Errorf("lotus-storage-miner API version doesn't match: local: ", api.Version{APIVersion: build.APIVersion})
|
||||
var localPaths []config.LocalPath
|
||||
|
||||
if !cctx.Bool("no-local-storage") {
|
||||
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
||||
ID: stores.ID(uuid.New().String()),
|
||||
Weight: 10,
|
||||
CanSeal: true,
|
||||
CanStore: false,
|
||||
}, "", " ")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("marshaling storage config: %w", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(lr.Path(), "sectorstore.json"), b, 0644); err != nil {
|
||||
return xerrors.Errorf("persisting storage metadata (%s): %w", filepath.Join(lr.Path(), "sectorstore.json"), err)
|
||||
}
|
||||
|
||||
localPaths = append(localPaths, config.LocalPath{
|
||||
Path: lr.Path(),
|
||||
})
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
log.Warn("Shutting down..")
|
||||
}()
|
||||
|
||||
limiter := &limits{
|
||||
workLimit: make(chan struct{}, workers),
|
||||
transferLimit: make(chan struct{}, transfers),
|
||||
if err := lr.SetStorage(func(sc *config.StorageConfig) {
|
||||
sc.StoragePaths = append(sc.StoragePaths, localPaths...)
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("set storage config: %w", err)
|
||||
}
|
||||
|
||||
act, err := nodeApi.ActorAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
{
|
||||
// init datastore for r.Exists
|
||||
_, err := lr.Datastore("/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ssize, err := nodeApi.ActorSectorSize(ctx, act)
|
||||
if err != nil {
|
||||
return err
|
||||
if err := lr.Close(); err != nil {
|
||||
return xerrors.Errorf("close repo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := paramfetch.GetParams(build.ParametersJson(), uint64(ssize)); err != nil {
|
||||
return xerrors.Errorf("get params: %w", err)
|
||||
lr, err := r.Lock(repo.Worker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Opening local storage; connecting to master")
|
||||
|
||||
localStore, err := stores.NewLocal(ctx, lr, nodeApi, []string{"http://" + cctx.String("address") + "/remote"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Setup remote sector store
|
||||
_, spt, err := api.ProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting proof type: %w", err)
|
||||
}
|
||||
|
||||
sminfo, err := lcli.GetAPIInfo(cctx, repo.StorageMiner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not get api info: %w", err)
|
||||
}
|
||||
|
||||
remote := stores.NewRemote(localStore, nodeApi, sminfo.AuthHeader())
|
||||
|
||||
// Create / expose the worker
|
||||
|
||||
workerApi := &worker{
|
||||
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
||||
SealProof: spt,
|
||||
TaskTypes: []sealtasks.TaskType{sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2},
|
||||
}, remote, localStore, nodeApi),
|
||||
}
|
||||
|
||||
mux := mux.NewRouter()
|
||||
|
||||
log.Info("Setting up control endpoint at " + cctx.String("address"))
|
||||
|
||||
rpcServer := jsonrpc.NewServer()
|
||||
rpcServer.Register("Filecoin", apistruct.PermissionedWorkerAPI(workerApi))
|
||||
|
||||
mux.Handle("/rpc/v0", rpcServer)
|
||||
mux.PathPrefix("/remote").HandlerFunc((&stores.FetchHandler{Local: localStore}).ServeHTTP)
|
||||
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
|
||||
|
||||
ah := &auth.Handler{
|
||||
Verify: nodeApi.AuthVerify,
|
||||
Next: mux.ServeHTTP,
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: ah,
|
||||
BaseContext: func(listener net.Listener) context.Context {
|
||||
return ctx
|
||||
},
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
log.Warn("Shutting down..")
|
||||
if err := srv.Shutdown(context.TODO()); err != nil {
|
||||
log.Errorf("shutting down RPC server failed: %s", err)
|
||||
}
|
||||
log.Warn("Graceful shutdown successful")
|
||||
}()
|
||||
|
||||
/*ppt, spt, err := api.ProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
return err
|
||||
nl, err := net.Listen("tcp", cctx.String("address"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Waiting for tasks")
|
||||
|
||||
go func() {
|
||||
if err := nodeApi.WorkerConnect(ctx, "ws://"+cctx.String("address")+"/rpc/v0"); err != nil {
|
||||
log.Errorf("Registering worker failed: %+v", err)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
/*sb, err := sectorbuilder.NewStandalone(§orbuilder.Config{
|
||||
SealProofType: spt,
|
||||
PoStProofType: ppt,
|
||||
Miner: act,
|
||||
WorkerThreads: workers,
|
||||
Paths: sectorbuilder.SimplePath(r),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nQueues := workers + transfers
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(nQueues)
|
||||
|
||||
for i := 0; i < nQueues; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
/* if err := acceptJobs(ctx, nodeApi, sb, limiter, "http://"+storageAddr, ainfo.AuthHeader(), r, cctx.Bool("no-precommit"), cctx.Bool("no-commit")); err != nil {
|
||||
log.Warnf("%+v", err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()*/
|
||||
return nil
|
||||
return srv.Serve(nl)
|
||||
},
|
||||
}
|
||||
|
20
cmd/lotus-seal-worker/rpc.go
Normal file
20
cmd/lotus-seal-worker/rpc.go
Normal file
@ -0,0 +1,20 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
)
|
||||
|
||||
type worker struct {
|
||||
*sectorstorage.LocalWorker
|
||||
}
|
||||
|
||||
func (w *worker) Version(context.Context) (build.Version, error) {
|
||||
return build.APIVersion, nil
|
||||
}
|
||||
|
||||
var _ storage.Sealer = &worker{}
|
@ -1,132 +0,0 @@
|
||||
package main
|
||||
|
||||
/*
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
)
|
||||
|
||||
type worker struct {
|
||||
api lapi.StorageMiner
|
||||
minerEndpoint string
|
||||
repo string
|
||||
auth http.Header
|
||||
|
||||
limiter *limits
|
||||
sb *sectorbuilder.SectorBuilder
|
||||
}
|
||||
|
||||
func acceptJobs(ctx context.Context, api lapi.StorageMiner, sb *sectorbuilder.SectorBuilder, limiter *limits, endpoint string, auth http.Header, repo string, noprecommit, nocommit bool) error {
|
||||
w := &worker{
|
||||
api: api,
|
||||
minerEndpoint: endpoint,
|
||||
auth: auth,
|
||||
repo: repo,
|
||||
|
||||
limiter: limiter,
|
||||
sb: sb,
|
||||
}
|
||||
|
||||
tasks, err := api.WorkerQueue(ctx, sectorbuilder.WorkerCfg{
|
||||
NoPreCommit: noprecommit,
|
||||
NoCommit: nocommit,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
log.Infof("Waiting for new task")
|
||||
|
||||
select {
|
||||
case task := <-tasks:
|
||||
log.Infof("New task: %d, sector %d, action: %d", task.TaskID, task.SectorNum, task.Type)
|
||||
|
||||
res := w.processTask(ctx, task)
|
||||
|
||||
log.Infof("Task %d done, err: %+v", task.TaskID, res.GoErr)
|
||||
|
||||
if err := api.WorkerDone(ctx, task.TaskID, res); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
log.Warn("acceptJobs exit")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *worker) processTask(ctx context.Context, task sectorbuilder.WorkerTask) sectorbuilder.SealRes {
|
||||
switch task.Type {
|
||||
case sectorbuilder.WorkerPreCommit:
|
||||
case sectorbuilder.WorkerCommit:
|
||||
default:
|
||||
return errRes(xerrors.Errorf("unknown task type %d", task.Type))
|
||||
}
|
||||
|
||||
if err := w.fetchSector(task.SectorNum, task.Type); err != nil {
|
||||
return errRes(xerrors.Errorf("fetching sector: %w", err))
|
||||
}
|
||||
|
||||
log.Infof("Data fetched, starting computation")
|
||||
|
||||
var res sectorbuilder.SealRes
|
||||
|
||||
switch task.Type {
|
||||
case sectorbuilder.WorkerPreCommit:
|
||||
w.limiter.workLimit <- struct{}{}
|
||||
sealedCid, unsealedCid, err := w.sb.SealPreCommit(ctx, task.SectorNum, task.SealTicket, task.Pieces)
|
||||
<-w.limiter.workLimit
|
||||
|
||||
if err != nil {
|
||||
return errRes(xerrors.Errorf("precomitting: %w", err))
|
||||
}
|
||||
res.Rspco.CommD = unsealedCid
|
||||
res.Rspco.CommR = sealedCid
|
||||
|
||||
if err := w.push("sealed", task.SectorNum); err != nil {
|
||||
return errRes(xerrors.Errorf("pushing precommited data: %w", err))
|
||||
}
|
||||
|
||||
if err := w.push("cache", task.SectorNum); err != nil {
|
||||
return errRes(xerrors.Errorf("pushing precommited data: %w", err))
|
||||
}
|
||||
|
||||
if err := w.remove("staging", task.SectorNum); err != nil {
|
||||
return errRes(xerrors.Errorf("cleaning up staged sector: %w", err))
|
||||
}
|
||||
case sectorbuilder.WorkerCommit:
|
||||
w.limiter.workLimit <- struct{}{}
|
||||
proof, err := w.sb.SealCommit(ctx, task.SectorNum, task.SealTicket, task.SealSeed, task.Pieces, task.SealedCID, task.UnsealedCID)
|
||||
<-w.limiter.workLimit
|
||||
|
||||
if err != nil {
|
||||
return errRes(xerrors.Errorf("comitting: %w", err))
|
||||
}
|
||||
|
||||
res.Proof = proof
|
||||
|
||||
if err := w.push("cache", task.SectorNum); err != nil {
|
||||
return errRes(xerrors.Errorf("pushing precommited data: %w", err))
|
||||
}
|
||||
|
||||
if err := w.remove("sealed", task.SectorNum); err != nil {
|
||||
return errRes(xerrors.Errorf("cleaning up sealed sector: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func errRes(err error) sectorbuilder.SealRes {
|
||||
return sectorbuilder.SealRes{Err: err.Error(), GoErr: err}
|
||||
}
|
||||
*/
|
@ -1,178 +0,0 @@
|
||||
package main
|
||||
|
||||
/*
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
sectorbuilder "github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/go-sectorbuilder/fs"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
"golang.org/x/xerrors"
|
||||
"gopkg.in/cheggaaa/pb.v1"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/filecoin-project/lotus/lib/tarutil"
|
||||
)
|
||||
|
||||
func (w *worker) sizeForType(typ string) int64 {
|
||||
size := int64(w.sb.SectorSize())
|
||||
if typ == "cache" {
|
||||
size *= 10
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (w *worker) fetch(typ string, sectorID abi.SectorNumber) error {
|
||||
outname := filepath.Join(w.repo, typ, w.sb.SectorName(sectorID))
|
||||
|
||||
url := w.minerEndpoint + "/remote/" + typ + "/" + fmt.Sprint(sectorID)
|
||||
log.Infof("Fetch %s %s", typ, url)
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("request: %w", err)
|
||||
}
|
||||
req.Header = w.auth
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("do request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("non-200 code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
bar := pb.New64(w.sizeForType(typ))
|
||||
bar.ShowPercent = true
|
||||
bar.ShowSpeed = true
|
||||
bar.Units = pb.U_BYTES
|
||||
|
||||
barreader := bar.NewProxyReader(resp.Body)
|
||||
|
||||
bar.Start()
|
||||
defer bar.Finish()
|
||||
|
||||
mediatype, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse media type: %w", err)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(outname); err != nil {
|
||||
return xerrors.Errorf("removing dest: %w", err)
|
||||
}
|
||||
|
||||
switch mediatype {
|
||||
case "application/x-tar":
|
||||
return tarutil.ExtractTar(barreader, outname)
|
||||
case "application/octet-stream":
|
||||
return files.WriteTo(files.NewReaderFile(barreader), outname)
|
||||
default:
|
||||
return xerrors.Errorf("unknown content type: '%s'", mediatype)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (w *worker) push(typ string, sectorID abi.SectorNumber) error {
|
||||
w.limiter.transferLimit <- struct{}{}
|
||||
defer func() {
|
||||
<-w.limiter.transferLimit
|
||||
}()
|
||||
|
||||
filename, err := w.sb.SectorPath(fs.DataType(typ), sectorID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url := w.minerEndpoint + "/remote/" + typ + "/" + fmt.Sprint(sectorID)
|
||||
log.Infof("Push %s %s", typ, url)
|
||||
|
||||
stat, err := os.Stat(string(filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var r io.Reader
|
||||
if stat.IsDir() {
|
||||
r, err = tarutil.TarDirectory(string(filename))
|
||||
} else {
|
||||
r, err = os.OpenFile(string(filename), os.O_RDONLY, 0644)
|
||||
}
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening push reader: %w", err)
|
||||
}
|
||||
|
||||
bar := pb.New64(w.sizeForType(typ))
|
||||
bar.ShowPercent = true
|
||||
bar.ShowSpeed = true
|
||||
bar.ShowCounters = true
|
||||
bar.Units = pb.U_BYTES
|
||||
|
||||
bar.Start()
|
||||
defer bar.Finish()
|
||||
//todo set content size
|
||||
|
||||
header := w.auth
|
||||
|
||||
if stat.IsDir() {
|
||||
header.Set("Content-Type", "application/x-tar")
|
||||
} else {
|
||||
header.Set("Content-Type", "application/octet-stream")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("PUT", url, bar.NewProxyReader(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header = header
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("non-200 response: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: keep files around for later stages of sealing
|
||||
return w.remove(typ, sectorID)
|
||||
}
|
||||
|
||||
func (w *worker) remove(typ string, sectorID abi.SectorNumber) error {
|
||||
filename := filepath.Join(w.repo, typ, w.sb.SectorName(sectorID))
|
||||
return os.RemoveAll(filename)
|
||||
}
|
||||
|
||||
func (w *worker) fetchSector(sectorID abi.SectorNumber, typ sectorbuilder.WorkerTaskType) error {
|
||||
w.limiter.transferLimit <- struct{}{}
|
||||
defer func() {
|
||||
<-w.limiter.transferLimit
|
||||
}()
|
||||
|
||||
var err error
|
||||
switch typ {
|
||||
case sectorbuilder.WorkerPreCommit:
|
||||
err = w.fetch("staging", sectorID)
|
||||
case sectorbuilder.WorkerCommit:
|
||||
err = w.fetch("sealed", sectorID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch sealed: %w", err)
|
||||
}
|
||||
err = w.fetch("cache", sectorID)
|
||||
}
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
*/
|
@ -26,7 +26,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
var log = logging.Logger("preseal")
|
||||
@ -42,8 +42,12 @@ func PreSeal(maddr address.Address, pt abi.RegisteredProof, offset abi.SectorNum
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cfg := §orbuilder.Config{
|
||||
Miner: maddr,
|
||||
SealProofType: spt,
|
||||
PoStProofType: ppt,
|
||||
}
|
||||
@ -55,8 +59,7 @@ func PreSeal(maddr address.Address, pt abi.RegisteredProof, offset abi.SectorNum
|
||||
next := offset
|
||||
|
||||
sbfs := &fs.Basic{
|
||||
Miner: maddr,
|
||||
Root: sbroot,
|
||||
Root: sbroot,
|
||||
}
|
||||
|
||||
sb, err := sectorbuilder.New(sbfs, cfg)
|
||||
@ -71,7 +74,7 @@ func PreSeal(maddr address.Address, pt abi.RegisteredProof, offset abi.SectorNum
|
||||
|
||||
var sealedSectors []*genesis.PreSeal
|
||||
for i := 0; i < sectors; i++ {
|
||||
sid := next
|
||||
sid := abi.SectorID{Miner: abi.ActorID(mid), Number: next}
|
||||
next++
|
||||
|
||||
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(ssize).Unpadded(), rand.Reader)
|
||||
@ -89,7 +92,7 @@ func PreSeal(maddr address.Address, pt abi.RegisteredProof, offset abi.SectorNum
|
||||
return nil, nil, xerrors.Errorf("commit: %w", err)
|
||||
}
|
||||
|
||||
scid, ucid, err := sb.SealPreCommit2(context.TODO(), sid, in2)
|
||||
cids, err := sb.SealPreCommit2(context.TODO(), sid, in2)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("commit: %w", err)
|
||||
}
|
||||
@ -98,11 +101,11 @@ func PreSeal(maddr address.Address, pt abi.RegisteredProof, offset abi.SectorNum
|
||||
return nil, nil, xerrors.Errorf("trim cache: %w", err)
|
||||
}
|
||||
|
||||
log.Warn("PreCommitOutput: ", sid, scid, ucid)
|
||||
log.Warn("PreCommitOutput: ", sid, cids.Sealed, cids.Unsealed)
|
||||
sealedSectors = append(sealedSectors, &genesis.PreSeal{
|
||||
CommR: scid,
|
||||
CommD: ucid,
|
||||
SectorID: sid,
|
||||
CommR: cids.Sealed,
|
||||
CommD: cids.Unsealed,
|
||||
SectorID: sid.Number,
|
||||
ProofType: pt,
|
||||
})
|
||||
}
|
||||
@ -134,8 +137,8 @@ func PreSeal(maddr address.Address, pt abi.RegisteredProof, offset abi.SectorNum
|
||||
}
|
||||
|
||||
{
|
||||
b, err := json.MarshalIndent(&config.StorageMeta{
|
||||
ID: uuid.New().String(),
|
||||
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
||||
ID: stores.ID(uuid.New().String()),
|
||||
Weight: 0, // read-only
|
||||
CanSeal: false,
|
||||
CanStore: false,
|
||||
|
@ -12,18 +12,7 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||
crypto2 "github.com/filecoin-project/specs-actors/actors/crypto"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||
paramfetch "github.com/filecoin-project/go-paramfetch"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/libp2p/go-libp2p-core/crypto"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
@ -31,6 +20,17 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
"gopkg.in/urfave/cli.v2"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||
paramfetch "github.com/filecoin-project/go-paramfetch"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||
crypto2 "github.com/filecoin-project/specs-actors/actors/crypto"
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -43,7 +43,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sealing"
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr/advmgr"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
var initCmd = &cli.Command{
|
||||
@ -173,7 +174,7 @@ var initCmd = &cli.Command{
|
||||
var localPaths []config.LocalPath
|
||||
|
||||
if pssb := cctx.StringSlice("pre-sealed-sectors"); len(pssb) != 0 {
|
||||
log.Infof("Setting up storage config with presealed sector: %v", pssb)
|
||||
log.Infof("Setting up storage config with presealed sectors: %v", pssb)
|
||||
|
||||
for _, psp := range pssb {
|
||||
psp, err := homedir.Expand(psp)
|
||||
@ -187,8 +188,8 @@ var initCmd = &cli.Command{
|
||||
}
|
||||
|
||||
if !cctx.Bool("no-local-storage") {
|
||||
b, err := json.MarshalIndent(&config.StorageMeta{
|
||||
ID: uuid.New().String(),
|
||||
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
||||
ID: stores.ID(uuid.New().String()),
|
||||
Weight: 10,
|
||||
CanSeal: true,
|
||||
CanStore: true,
|
||||
@ -248,11 +249,16 @@ func migratePreSealMeta(ctx context.Context, api lapi.FullNode, metadata string,
|
||||
return xerrors.Errorf("reading preseal metadata: %w", err)
|
||||
}
|
||||
|
||||
meta := genesis.Miner{}
|
||||
if err := json.Unmarshal(b, &meta); err != nil {
|
||||
psm := map[string]genesis.Miner{}
|
||||
if err := json.Unmarshal(b, &psm); err != nil {
|
||||
return xerrors.Errorf("unmarshaling preseal metadata: %w", err)
|
||||
}
|
||||
|
||||
meta, ok := psm[maddr.String()]
|
||||
if !ok {
|
||||
return xerrors.Errorf("preseal file didn't contain metadata for miner %s", maddr)
|
||||
}
|
||||
|
||||
maxSectorID := abi.SectorNumber(0)
|
||||
for _, sector := range meta.Sectors {
|
||||
sectorKey := datastore.NewKey(sealing.SectorStorePrefix).ChildString(fmt.Sprint(sector.SectorID))
|
||||
@ -326,6 +332,8 @@ func migratePreSealMeta(ctx context.Context, api lapi.FullNode, metadata string,
|
||||
}*/
|
||||
}
|
||||
|
||||
log.Infof("Setting next sector ID to %d", maxSectorID+1)
|
||||
|
||||
buf := make([]byte, binary.MaxVarintLen64)
|
||||
size := binary.PutUvarint(buf, uint64(maxSectorID+1))
|
||||
return mds.Put(datastore.NewKey("/storage/nextid"), buf[:size])
|
||||
@ -391,15 +399,19 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
||||
return err
|
||||
}
|
||||
|
||||
smgr, err := advmgr.New(lr, §orbuilder.Config{
|
||||
mid, err := address.IDFromAddress(a)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting id address: %w", err)
|
||||
}
|
||||
|
||||
smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), §orbuilder.Config{
|
||||
SealProofType: spt,
|
||||
PoStProofType: ppt,
|
||||
Miner: a,
|
||||
}, nil)
|
||||
}, config.Storage{true, true, true}, nil, api)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
epp := storage.NewElectionPoStProver(smgr)
|
||||
epp := storage.NewElectionPoStProver(smgr, dtypes.MinerID(mid))
|
||||
|
||||
m := miner.NewMiner(api, epp)
|
||||
{
|
||||
@ -407,17 +419,30 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
||||
return xerrors.Errorf("failed to start up genesis miner: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := m.Unregister(ctx, a); err != nil {
|
||||
log.Error("failed to shut down storage miner: ", err)
|
||||
}
|
||||
}()
|
||||
cerr := configureStorageMiner(ctx, api, a, peerid)
|
||||
|
||||
if err := configureStorageMiner(ctx, api, a, peerid); err != nil {
|
||||
if err := m.Unregister(ctx, a); err != nil {
|
||||
log.Error("failed to shut down storage miner: ", err)
|
||||
}
|
||||
|
||||
if cerr != nil {
|
||||
return xerrors.Errorf("failed to configure storage miner: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if pssb := cctx.String("pre-sealed-metadata"); pssb != "" {
|
||||
pssb, err := homedir.Expand(pssb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Importing pre-sealed sector metadata for %s", a)
|
||||
|
||||
if err := migratePreSealMeta(ctx, api, pssb, a, mds); err != nil {
|
||||
return xerrors.Errorf("migrating presealed sector metadata: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -25,12 +25,12 @@ func main() {
|
||||
dealsCmd,
|
||||
infoCmd,
|
||||
initCmd,
|
||||
pledgeSectorCmd,
|
||||
rewardsCmd,
|
||||
runCmd,
|
||||
sectorsCmd,
|
||||
storageCmd,
|
||||
setPriceCmd,
|
||||
workersCmd,
|
||||
}
|
||||
jaeger := tracing.SetupJaegerTracing("lotus")
|
||||
defer func() {
|
||||
@ -71,7 +71,7 @@ func main() {
|
||||
},
|
||||
},
|
||||
|
||||
Commands: append(local, lcli.Commands...),
|
||||
Commands: append(local, lcli.CommonCommands...),
|
||||
}
|
||||
app.Setup()
|
||||
app.Metadata["repoType"] = repo.StorageMiner
|
||||
|
@ -32,7 +32,7 @@ var setPriceCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
return api.SetPrice(ctx, types.BigInt(fp))
|
||||
return api.MarketSetPrice(ctx, types.BigInt(fp))
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -20,8 +20,10 @@ import (
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/auth"
|
||||
"github.com/filecoin-project/lotus/lib/jsonrpc"
|
||||
"github.com/filecoin-project/lotus/lib/ulimit"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
@ -29,10 +31,6 @@ var runCmd = &cli.Command{
|
||||
Name: "run",
|
||||
Usage: "Start a lotus storage miner process",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "api",
|
||||
Value: "2345",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "enable-gpu-proving",
|
||||
Usage: "enable use of GPU for mining operations",
|
||||
@ -42,6 +40,11 @@ var runCmd = &cli.Command{
|
||||
Name: "nosync",
|
||||
Usage: "don't check full-node sync status",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "manage-fdlimit",
|
||||
Usage: "manage open file limit",
|
||||
Value: true,
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Bool("enable-gpu-proving") {
|
||||
@ -60,6 +63,12 @@ var runCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
if cctx.Bool("manage-fdlimit") {
|
||||
if _, _, err := ulimit.ManageFdLimit(); err != nil {
|
||||
log.Errorf("setting file descriptor limit: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if v.APIVersion != build.APIVersion {
|
||||
return xerrors.Errorf("lotus-daemon API version doesn't match: local: %s", api.Version{APIVersion: build.APIVersion})
|
||||
}
|
||||
@ -93,13 +102,8 @@ var runCmd = &cli.Command{
|
||||
node.Repo(r),
|
||||
|
||||
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("api") },
|
||||
node.Override(node.SetApiEndpointKey, func(lr repo.LockedRepo) error {
|
||||
apima, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" +
|
||||
cctx.String("api"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return lr.SetAPIEndpoint(apima)
|
||||
node.Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) {
|
||||
return multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" + cctx.String("api"))
|
||||
})),
|
||||
node.Override(new(api.FullNode), nodeApi),
|
||||
)
|
||||
|
@ -18,8 +18,20 @@ import (
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
)
|
||||
|
||||
var pledgeSectorCmd = &cli.Command{
|
||||
Name: "pledge-sector",
|
||||
var sectorsCmd = &cli.Command{
|
||||
Name: "sectors",
|
||||
Usage: "interact with sector store",
|
||||
Subcommands: []*cli.Command{
|
||||
sectorsStatusCmd,
|
||||
sectorsListCmd,
|
||||
sectorsRefsCmd,
|
||||
sectorsUpdateCmd,
|
||||
sectorsPledgeCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsPledgeCmd = &cli.Command{
|
||||
Name: "pledge",
|
||||
Usage: "store random data in a sector",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
@ -33,17 +45,6 @@ var pledgeSectorCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsCmd = &cli.Command{
|
||||
Name: "sectors",
|
||||
Usage: "interact with sector store",
|
||||
Subcommands: []*cli.Command{
|
||||
sectorsStatusCmd,
|
||||
sectorsListCmd,
|
||||
sectorsRefsCmd,
|
||||
sectorsUpdateCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsStatusCmd = &cli.Command{
|
||||
Name: "status",
|
||||
Usage: "Get the seal status of a sector by its ID",
|
||||
|
@ -2,17 +2,26 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"golang.org/x/xerrors"
|
||||
"gopkg.in/urfave/cli.v2"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
const metaFile = "sectorstore.json"
|
||||
@ -22,6 +31,8 @@ var storageCmd = &cli.Command{
|
||||
Usage: "manage sector storage",
|
||||
Subcommands: []*cli.Command{
|
||||
storageAttachCmd,
|
||||
storageListCmd,
|
||||
storageFindCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -79,8 +90,8 @@ var storageAttachCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := &config.StorageMeta{
|
||||
ID: uuid.New().String(),
|
||||
cfg := &stores.LocalStorageMeta{
|
||||
ID: stores.ID(uuid.New().String()),
|
||||
Weight: cctx.Uint64("weight"),
|
||||
CanSeal: cctx.Bool("seal"),
|
||||
CanStore: cctx.Bool("store"),
|
||||
@ -103,3 +114,235 @@ var storageAttachCmd = &cli.Command{
|
||||
return nodeApi.StorageAddLocal(ctx, p)
|
||||
},
|
||||
}
|
||||
|
||||
var storageListCmd = &cli.Command{
|
||||
Name: "list",
|
||||
Usage: "list local storage paths",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
st, err := nodeApi.StorageList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
local, err := nodeApi.StorageLocal(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sorted := make([]struct {
|
||||
stores.ID
|
||||
sectors []stores.Decl
|
||||
}, 0, len(st))
|
||||
for id, decls := range st {
|
||||
sorted = append(sorted, struct {
|
||||
stores.ID
|
||||
sectors []stores.Decl
|
||||
}{id, decls})
|
||||
}
|
||||
|
||||
sort.Slice(sorted, func(i, j int) bool {
|
||||
return sorted[i].ID < sorted[j].ID
|
||||
})
|
||||
|
||||
for _, s := range sorted {
|
||||
|
||||
var cnt [3]int
|
||||
for _, decl := range s.sectors {
|
||||
for i := range cnt {
|
||||
if decl.SectorFileType&(1<<i) != 0 {
|
||||
cnt[i]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pingStart := time.Now()
|
||||
st, err := nodeApi.StorageStat(ctx, s.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ping := time.Now().Sub(pingStart)
|
||||
|
||||
fmt.Printf("%s:\n", s.ID)
|
||||
fmt.Printf("\tUnsealed: %d; Sealed: %d; Caches: %d\n", cnt[0], cnt[1], cnt[2])
|
||||
fmt.Printf("\tSpace Used: %s/%s %d%% (%s avail)\n",
|
||||
types.SizeStr(types.NewInt(st.Capacity-st.Available)),
|
||||
types.SizeStr(types.NewInt(st.Capacity)),
|
||||
(st.Capacity-st.Available)*100/st.Capacity,
|
||||
types.SizeStr(types.NewInt(st.Available)))
|
||||
|
||||
si, err := nodeApi.StorageInfo(ctx, s.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Print("\t")
|
||||
if si.CanSeal || si.CanStore {
|
||||
fmt.Printf("Weight: %d; Use: ", si.Weight)
|
||||
if si.CanSeal {
|
||||
fmt.Print("Seal ")
|
||||
}
|
||||
if si.CanStore {
|
||||
fmt.Print("Store")
|
||||
}
|
||||
fmt.Println("")
|
||||
} else {
|
||||
fmt.Println("Use: ReadOnly")
|
||||
}
|
||||
|
||||
if localPath, ok := local[s.ID]; ok {
|
||||
fmt.Printf("\tLocal: %s\n", localPath)
|
||||
}
|
||||
for i, l := range si.URLs {
|
||||
var rtt string
|
||||
if _, ok := local[s.ID]; !ok && i == 0 {
|
||||
rtt = " (latency: " + ping.Truncate(time.Microsecond*100).String() + ")"
|
||||
}
|
||||
|
||||
fmt.Printf("\tURL: %s%s\n", l, rtt) // TODO; try pinging maybe?? print latency?
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
type storedSector struct {
|
||||
id stores.ID
|
||||
store stores.StorageInfo
|
||||
|
||||
unsealed, sealed, cache bool
|
||||
}
|
||||
|
||||
var storageFindCmd = &cli.Command{
|
||||
Name: "find",
|
||||
Usage: "find sector in the storage system",
|
||||
ArgsUsage: "[sector number]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
ma, err := nodeApi.ActorAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(ma)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
return xerrors.New("Usage: lotus-storage-miner storage find [sector number]")
|
||||
}
|
||||
|
||||
snum, err := strconv.ParseUint(cctx.Args().First(), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sid := abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(snum),
|
||||
}
|
||||
|
||||
u, err := nodeApi.StorageFindSector(ctx, sid, sectorbuilder.FTUnsealed, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding unsealed: %w", err)
|
||||
}
|
||||
|
||||
s, err := nodeApi.StorageFindSector(ctx, sid, sectorbuilder.FTSealed, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding sealed: %w", err)
|
||||
}
|
||||
|
||||
c, err := nodeApi.StorageFindSector(ctx, sid, sectorbuilder.FTCache, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding cache: %w", err)
|
||||
}
|
||||
|
||||
byId := map[stores.ID]*storedSector{}
|
||||
for _, info := range u {
|
||||
sts, ok := byId[info.ID]
|
||||
if !ok {
|
||||
sts = &storedSector{
|
||||
id: info.ID,
|
||||
store: info,
|
||||
}
|
||||
byId[info.ID] = sts
|
||||
}
|
||||
sts.unsealed = true
|
||||
}
|
||||
for _, info := range s {
|
||||
sts, ok := byId[info.ID]
|
||||
if !ok {
|
||||
sts = &storedSector{
|
||||
id: info.ID,
|
||||
store: info,
|
||||
}
|
||||
byId[info.ID] = sts
|
||||
}
|
||||
sts.sealed = true
|
||||
}
|
||||
for _, info := range c {
|
||||
sts, ok := byId[info.ID]
|
||||
if !ok {
|
||||
sts = &storedSector{
|
||||
id: info.ID,
|
||||
store: info,
|
||||
}
|
||||
byId[info.ID] = sts
|
||||
}
|
||||
sts.cache = true
|
||||
}
|
||||
|
||||
local, err := nodeApi.StorageLocal(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var out []*storedSector
|
||||
for _, sector := range byId {
|
||||
out = append(out, sector)
|
||||
}
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return out[i].id < out[j].id
|
||||
})
|
||||
|
||||
for _, info := range out {
|
||||
var types string
|
||||
if info.unsealed {
|
||||
types += "Unsealed, "
|
||||
}
|
||||
if info.sealed {
|
||||
types += "Sealed, "
|
||||
}
|
||||
if info.cache {
|
||||
types += "Cache, "
|
||||
}
|
||||
|
||||
fmt.Printf("In %s (%s)\n", info.id, types[:len(types)-2])
|
||||
fmt.Printf("\tSealing: %t; Storage: %t\n", info.store.CanSeal, info.store.CanSeal)
|
||||
if localPath, ok := local[info.id]; ok {
|
||||
fmt.Printf("\tLocal (%s)\n", localPath)
|
||||
} else {
|
||||
fmt.Printf("\tRemote\n")
|
||||
}
|
||||
for _, l := range info.store.URLs {
|
||||
fmt.Printf("\tURL: %s\n", l)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
86
cmd/lotus-storage-miner/workers.go
Normal file
86
cmd/lotus-storage-miner/workers.go
Normal file
@ -0,0 +1,86 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"gopkg.in/urfave/cli.v2"
|
||||
"sort"
|
||||
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
)
|
||||
|
||||
var workersCmd = &cli.Command{
|
||||
Name: "workers",
|
||||
Usage: "interact with workers",
|
||||
Subcommands: []*cli.Command{
|
||||
workersListCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var workersListCmd = &cli.Command{
|
||||
Name: "list",
|
||||
Usage: "list workers",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
stats, err := nodeApi.WorkerStats(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
type sortableStat struct {
|
||||
id uint64
|
||||
api.WorkerStats
|
||||
}
|
||||
|
||||
st := make([]sortableStat, 0, len(stats))
|
||||
for id, stat := range stats {
|
||||
st = append(st, sortableStat{id, stat})
|
||||
}
|
||||
|
||||
sort.Slice(st, func(i, j int) bool {
|
||||
return st[i].id < st[j].id
|
||||
})
|
||||
|
||||
for _, stat := range st {
|
||||
gpuUse := "not "
|
||||
if stat.GpuUsed {
|
||||
gpuUse = ""
|
||||
}
|
||||
|
||||
fmt.Printf("Worker %d, host %s\n", stat.id, stat.Info.Hostname)
|
||||
|
||||
if stat.CpuUse != -1 {
|
||||
fmt.Printf("\tCPU: %d core(s) in use\n", stat.CpuUse)
|
||||
} else {
|
||||
fmt.Printf("\tCPU: all cores in use\n")
|
||||
}
|
||||
|
||||
for _, gpu := range stat.Info.Resources.GPUs {
|
||||
fmt.Printf("\tGPU: %s, %sused\n", gpu, gpuUse)
|
||||
}
|
||||
|
||||
fmt.Printf("\tMemory: System: Physical %s, Swap %s, Reserved %s (%d%% phys)\n",
|
||||
types.SizeStr(types.NewInt(stat.Info.Resources.MemPhysical)),
|
||||
types.SizeStr(types.NewInt(stat.Info.Resources.MemSwap)),
|
||||
types.SizeStr(types.NewInt(stat.Info.Resources.MemReserved)),
|
||||
stat.Info.Resources.MemReserved*100/stat.Info.Resources.MemPhysical)
|
||||
|
||||
fmt.Printf("\t\tUsed: Physical %s (%d%% phys), Virtual %s (%d%% phys, %d%% virt)\n",
|
||||
types.SizeStr(types.NewInt(stat.MemUsedMin)),
|
||||
stat.MemUsedMin*100/stat.Info.Resources.MemPhysical,
|
||||
types.SizeStr(types.NewInt(stat.MemUsedMax)),
|
||||
stat.MemUsedMax*100/stat.Info.Resources.MemPhysical,
|
||||
stat.MemUsedMax*100/(stat.Info.Resources.MemPhysical+stat.Info.Resources.MemSwap))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
||||
Subproject commit f20cfbe28d99beda69e5416c6829927945116428
|
||||
Subproject commit 41b20ed16500eb5b4bacd07ec8aee386257e56da
|
7
go.mod
7
go.mod
@ -12,6 +12,7 @@ require (
|
||||
github.com/coreos/go-systemd/v22 v22.0.0
|
||||
github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/elastic/go-sysinfo v1.3.0
|
||||
github.com/filecoin-project/chain-validation v0.0.6-0.20200324185232-f581621b7fbf
|
||||
github.com/filecoin-project/filecoin-ffi v0.0.0-20200304181354-4446ff8a1bb9
|
||||
github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be
|
||||
@ -23,11 +24,11 @@ require (
|
||||
github.com/filecoin-project/go-fil-markets v0.0.0-20200318012938-6403a5bda668
|
||||
github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6
|
||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663
|
||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200314022627-38af9db49ba2
|
||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200317221918-42574fc2aab9
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9
|
||||
github.com/filecoin-project/go-statestore v0.1.0
|
||||
github.com/filecoin-project/specs-actors v0.0.0-20200321055844-54fa2e8da1c2
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200303233430-1a5a408f7513
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200317133846-063ba163b217
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/google/uuid v1.1.1
|
||||
@ -108,7 +109,7 @@ require (
|
||||
golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6 // indirect
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect
|
||||
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d // indirect
|
||||
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||
golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
|
||||
|
27
go.sum
27
go.sum
@ -10,6 +10,7 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
|
||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg=
|
||||
github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0=
|
||||
github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ=
|
||||
github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0=
|
||||
@ -25,6 +26,7 @@ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrU
|
||||
github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo=
|
||||
github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw=
|
||||
github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@ -95,10 +97,12 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE=
|
||||
github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0=
|
||||
github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY=
|
||||
github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU=
|
||||
github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY=
|
||||
github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8=
|
||||
github.com/filecoin-project/chain-validation v0.0.6-0.20200324001434-7c1ecd76e3eb h1:tynvU1AYRXYAzRrMX6VZGYgUg3+/lweulbAyeZqET/I=
|
||||
github.com/filecoin-project/chain-validation v0.0.6-0.20200324001434-7c1ecd76e3eb/go.mod h1:YTLxUr6gOZpkUaXzLe7OZ4s1dpfJGp2FY/J2/K5DJqc=
|
||||
github.com/filecoin-project/chain-validation v0.0.6-0.20200324185232-f581621b7fbf h1:GiCNQc9LuIrH2buA2T07FiM2WEMgUllJ/ET28cOQY7E=
|
||||
github.com/filecoin-project/chain-validation v0.0.6-0.20200324185232-f581621b7fbf/go.mod h1:YTLxUr6gOZpkUaXzLe7OZ4s1dpfJGp2FY/J2/K5DJqc=
|
||||
github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0=
|
||||
@ -123,8 +127,8 @@ github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6/go.m
|
||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 h1:eYxi6vI5CyeXD15X1bB3bledDXbqKxqf0wQzTLgwYwA=
|
||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200311224656-7d83652bdbed/go.mod h1:xAd/X905Ncgj8kkHsP2pmQUf6MQT2qJTDcOEfkwCjYc=
|
||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200314022627-38af9db49ba2 h1:4RjDynwobd/UYlZUprRg/GMEsMP6fAfVRTXgFs4XNfo=
|
||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200314022627-38af9db49ba2/go.mod h1:NcE+iL0bbYnamGmYQgCPVGbSaf8VF2/CLra/61B3I3I=
|
||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200317221918-42574fc2aab9 h1:ROfxm5X9dMATYk6MvNe8WBcL2yaelpgGEvve5CnZ1+g=
|
||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200317221918-42574fc2aab9/go.mod h1:3c3MEU9GHLlau37+MmefFNunTo9sVEKfjaJuHBgksdY=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9 h1:k9qVR9ItcziSB2rxtlkN/MDWNlbsI6yzec+zjUatLW0=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||
github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ=
|
||||
@ -135,8 +139,8 @@ github.com/filecoin-project/specs-actors v0.0.0-20200302223606-0eaf97b10aaf/go.m
|
||||
github.com/filecoin-project/specs-actors v0.0.0-20200306000749-99e98e61e2a0/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU=
|
||||
github.com/filecoin-project/specs-actors v0.0.0-20200321055844-54fa2e8da1c2 h1:6oyLnDQTUnqaVSy+GxiMsfS5EYZm6xtzXcylw29NtOk=
|
||||
github.com/filecoin-project/specs-actors v0.0.0-20200321055844-54fa2e8da1c2/go.mod h1:5WngRgTN5Eo4+0SjCBqLzEr2l6Mj45DrP2606gBhqI0=
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200303233430-1a5a408f7513 h1:okBx3lPomwDxlPmRvyP078BwivDfdxNUlpCDhDD0ia8=
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200303233430-1a5a408f7513/go.mod h1:sC2Ck2l1G8hXI5Do/3sp0yxbMRMnukbFwP9KF1CRFLw=
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200317133846-063ba163b217 h1:doPA79fSLg5TnY2rJhXs5dIZHP3IoCcIiCLKFGfgrY8=
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200317133846-063ba163b217/go.mod h1:dUmzHS7izOD6HW3/JpzFrjxnptxbsHXBlO8puK2UzBk=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0=
|
||||
@ -354,7 +358,10 @@ github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsj
|
||||
github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10=
|
||||
github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4=
|
||||
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
@ -655,6 +662,7 @@ github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS
|
||||
github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg=
|
||||
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg=
|
||||
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@ -703,6 +711,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0 h1:c8R11WC8m7KNMkTv/0+Be8vvwo4I3/Ut9AC2FW8fX3U=
|
||||
github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
@ -750,7 +760,9 @@ github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli/v2 v2.0.0 h1:+HU9SCbu8GnEUFtIBfuUNXN39ofWViIEJIp6SURMpCg=
|
||||
github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
||||
github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0=
|
||||
@ -895,6 +907,7 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -971,5 +984,7 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
||||
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54=
|
||||
launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM=
|
||||
|
@ -14,5 +14,7 @@ func SetupLogLevels() {
|
||||
logging.SetLogLevel("bitswap", "WARN")
|
||||
//logging.SetLogLevel("pubsub", "WARN")
|
||||
logging.SetLogLevel("connmgr", "WARN")
|
||||
logging.SetLogLevel("advmgr", "DEBUG")
|
||||
logging.SetLogLevel("stores", "DEBUG")
|
||||
}
|
||||
}
|
||||
|
116
lib/ulimit/ulimit.go
Normal file
116
lib/ulimit/ulimit.go
Normal file
@ -0,0 +1,116 @@
|
||||
package ulimit
|
||||
|
||||
// from go-ipfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
logging "github.com/ipfs/go-log"
|
||||
)
|
||||
|
||||
var log = logging.Logger("ulimit")
|
||||
|
||||
var (
|
||||
supportsFDManagement = false
|
||||
|
||||
// getlimit returns the soft and hard limits of file descriptors counts
|
||||
getLimit func() (uint64, uint64, error)
|
||||
// set limit sets the soft and hard limits of file descriptors counts
|
||||
setLimit func(uint64, uint64) error
|
||||
)
|
||||
|
||||
// minimum file descriptor limit before we complain
|
||||
const minFds = 2048
|
||||
|
||||
// default max file descriptor limit.
|
||||
const maxFds = 16 << 10
|
||||
|
||||
// userMaxFDs returns the value of IPFS_FD_MAX
|
||||
func userMaxFDs() uint64 {
|
||||
// check if the IPFS_FD_MAX is set up and if it does
|
||||
// not have a valid fds number notify the user
|
||||
if val := os.Getenv("IPFS_FD_MAX"); val != "" {
|
||||
fds, err := strconv.ParseUint(val, 10, 64)
|
||||
if err != nil {
|
||||
log.Errorf("bad value for IPFS_FD_MAX: %s", err)
|
||||
return 0
|
||||
}
|
||||
return fds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// ManageFdLimit raise the current max file descriptor count
|
||||
// of the process based on the IPFS_FD_MAX value
|
||||
func ManageFdLimit() (changed bool, newLimit uint64, err error) {
|
||||
if !supportsFDManagement {
|
||||
return false, 0, nil
|
||||
}
|
||||
|
||||
targetLimit := uint64(maxFds)
|
||||
userLimit := userMaxFDs()
|
||||
if userLimit > 0 {
|
||||
targetLimit = userLimit
|
||||
}
|
||||
|
||||
soft, hard, err := getLimit()
|
||||
if err != nil {
|
||||
return false, 0, err
|
||||
}
|
||||
|
||||
if targetLimit <= soft {
|
||||
return false, 0, nil
|
||||
}
|
||||
|
||||
// the soft limit is the value that the kernel enforces for the
|
||||
// corresponding resource
|
||||
// the hard limit acts as a ceiling for the soft limit
|
||||
// an unprivileged process may only set it's soft limit to a
|
||||
// alue in the range from 0 up to the hard limit
|
||||
err = setLimit(targetLimit, targetLimit)
|
||||
switch err {
|
||||
case nil:
|
||||
newLimit = targetLimit
|
||||
case syscall.EPERM:
|
||||
// lower limit if necessary.
|
||||
if targetLimit > hard {
|
||||
targetLimit = hard
|
||||
}
|
||||
|
||||
// the process does not have permission so we should only
|
||||
// set the soft value
|
||||
err = setLimit(targetLimit, hard)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error setting ulimit wihout hard limit: %s", err)
|
||||
break
|
||||
}
|
||||
newLimit = targetLimit
|
||||
|
||||
// Warn on lowered limit.
|
||||
|
||||
if newLimit < userLimit {
|
||||
err = fmt.Errorf(
|
||||
"failed to raise ulimit to IPFS_FD_MAX (%d): set to %d",
|
||||
userLimit,
|
||||
newLimit,
|
||||
)
|
||||
break
|
||||
}
|
||||
|
||||
if userLimit == 0 && newLimit < minFds {
|
||||
err = fmt.Errorf(
|
||||
"failed to raise ulimit to minimum %d: set to %d",
|
||||
minFds,
|
||||
newLimit,
|
||||
)
|
||||
break
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("error setting: ulimit: %s", err)
|
||||
}
|
||||
|
||||
return newLimit > 0, newLimit, err
|
||||
}
|
36
lib/ulimit/ulimit_freebsd.go
Normal file
36
lib/ulimit/ulimit_freebsd.go
Normal file
@ -0,0 +1,36 @@
|
||||
// +build freebsd
|
||||
|
||||
package ulimit
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
|
||||
unix "golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
supportsFDManagement = true
|
||||
getLimit = freebsdGetLimit
|
||||
setLimit = freebsdSetLimit
|
||||
}
|
||||
|
||||
func freebsdGetLimit() (uint64, uint64, error) {
|
||||
rlimit := unix.Rlimit{}
|
||||
err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit)
|
||||
if (rlimit.Cur < 0) || (rlimit.Max < 0) {
|
||||
return 0, 0, errors.New("invalid rlimits")
|
||||
}
|
||||
return uint64(rlimit.Cur), uint64(rlimit.Max), err
|
||||
}
|
||||
|
||||
func freebsdSetLimit(soft uint64, max uint64) error {
|
||||
if (soft > math.MaxInt64) || (max > math.MaxInt64) {
|
||||
return errors.New("invalid rlimits")
|
||||
}
|
||||
rlimit := unix.Rlimit{
|
||||
Cur: int64(soft),
|
||||
Max: int64(max),
|
||||
}
|
||||
return unix.Setrlimit(unix.RLIMIT_NOFILE, &rlimit)
|
||||
}
|
84
lib/ulimit/ulimit_test.go
Normal file
84
lib/ulimit/ulimit_test.go
Normal file
@ -0,0 +1,84 @@
|
||||
// +build !windows
|
||||
|
||||
package ulimit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestManageFdLimit(t *testing.T) {
|
||||
t.Log("Testing file descriptor count")
|
||||
if _, _, err := ManageFdLimit(); err != nil {
|
||||
t.Errorf("Cannot manage file descriptors")
|
||||
}
|
||||
|
||||
if maxFds != uint64(16<<10) {
|
||||
t.Errorf("Maximum file descriptors default value changed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestManageInvalidNFds(t *testing.T) {
|
||||
t.Logf("Testing file descriptor invalidity")
|
||||
var err error
|
||||
if err = os.Unsetenv("IPFS_FD_MAX"); err != nil {
|
||||
t.Fatal("Cannot unset the IPFS_FD_MAX env variable")
|
||||
}
|
||||
|
||||
rlimit := syscall.Rlimit{}
|
||||
if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
|
||||
t.Fatal("Cannot get the file descriptor count")
|
||||
}
|
||||
|
||||
value := rlimit.Max + rlimit.Cur
|
||||
if err = os.Setenv("IPFS_FD_MAX", fmt.Sprintf("%d", value)); err != nil {
|
||||
t.Fatal("Cannot set the IPFS_FD_MAX env variable")
|
||||
}
|
||||
|
||||
t.Logf("setting ulimit to %d, max %d, cur %d", value, rlimit.Max, rlimit.Cur)
|
||||
|
||||
if changed, new, err := ManageFdLimit(); err == nil {
|
||||
t.Errorf("ManageFdLimit should return an error: changed %t, new: %d", changed, new)
|
||||
} else if err != nil {
|
||||
flag := strings.Contains(err.Error(),
|
||||
"failed to raise ulimit to IPFS_FD_MAX")
|
||||
if !flag {
|
||||
t.Error("ManageFdLimit returned unexpected error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// unset all previous operations
|
||||
if err = os.Unsetenv("IPFS_FD_MAX"); err != nil {
|
||||
t.Fatal("Cannot unset the IPFS_FD_MAX env variable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestManageFdLimitWithEnvSet(t *testing.T) {
|
||||
t.Logf("Testing file descriptor manager with IPFS_FD_MAX set")
|
||||
var err error
|
||||
if err = os.Unsetenv("IPFS_FD_MAX"); err != nil {
|
||||
t.Fatal("Cannot unset the IPFS_FD_MAX env variable")
|
||||
}
|
||||
|
||||
rlimit := syscall.Rlimit{}
|
||||
if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
|
||||
t.Fatal("Cannot get the file descriptor count")
|
||||
}
|
||||
|
||||
value := rlimit.Max - rlimit.Cur + 1
|
||||
if err = os.Setenv("IPFS_FD_MAX", fmt.Sprintf("%d", value)); err != nil {
|
||||
t.Fatal("Cannot set the IPFS_FD_MAX env variable")
|
||||
}
|
||||
|
||||
if _, _, err = ManageFdLimit(); err != nil {
|
||||
t.Errorf("Cannot manage file descriptor count")
|
||||
}
|
||||
|
||||
// unset all previous operations
|
||||
if err = os.Unsetenv("IPFS_FD_MAX"); err != nil {
|
||||
t.Fatal("Cannot unset the IPFS_FD_MAX env variable")
|
||||
}
|
||||
}
|
27
lib/ulimit/ulimit_unix.go
Normal file
27
lib/ulimit/ulimit_unix.go
Normal file
@ -0,0 +1,27 @@
|
||||
// +build darwin linux netbsd openbsd
|
||||
|
||||
package ulimit
|
||||
|
||||
import (
|
||||
unix "golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
supportsFDManagement = true
|
||||
getLimit = unixGetLimit
|
||||
setLimit = unixSetLimit
|
||||
}
|
||||
|
||||
func unixGetLimit() (uint64, uint64, error) {
|
||||
rlimit := unix.Rlimit{}
|
||||
err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit)
|
||||
return rlimit.Cur, rlimit.Max, err
|
||||
}
|
||||
|
||||
func unixSetLimit(soft uint64, max uint64) error {
|
||||
rlimit := unix.Rlimit{
|
||||
Cur: soft,
|
||||
Max: max,
|
||||
}
|
||||
return unix.Setrlimit(unix.RLIMIT_NOFILE, &rlimit)
|
||||
}
|
@ -14,18 +14,18 @@ import (
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
)
|
||||
|
||||
type retrievalProviderNode struct {
|
||||
miner *storage.Miner
|
||||
sealer sealmgr.Manager
|
||||
sealer sectorstorage.SectorManager
|
||||
full api.FullNode
|
||||
}
|
||||
|
||||
// NewRetrievalProviderNode returns a new node adapter for a retrieval provider that talks to the
|
||||
// Lotus Node
|
||||
func NewRetrievalProviderNode(miner *storage.Miner, sealer sealmgr.Manager, full api.FullNode) retrievalmarket.RetrievalProviderNode {
|
||||
func NewRetrievalProviderNode(miner *storage.Miner, sealer sectorstorage.SectorManager, full api.FullNode) retrievalmarket.RetrievalProviderNode {
|
||||
return &retrievalProviderNode{miner, sealer, full}
|
||||
}
|
||||
|
||||
@ -44,7 +44,17 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID uin
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rpn.sealer.ReadPieceFromSealedSector(ctx, abi.SectorNumber(sectorID), sectorbuilder.UnpaddedByteIndex(offset), abi.UnpaddedPieceSize(length), si.Ticket.Value, *si.CommD)
|
||||
|
||||
mid, err := address.IDFromAddress(rpn.miner.Address())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sid := abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sectorID),
|
||||
}
|
||||
return rpn.sealer.ReadPieceFromSealedSector(ctx, sid, sectorbuilder.UnpaddedByteIndex(offset), abi.UnpaddedPieceSize(length), si.Ticket.Value, *si.CommD)
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, voucher *paych.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, tok shared.TipSetToken) (abi.TokenAmount, error) {
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr-net"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -23,7 +24,9 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation"
|
||||
|
||||
sectorbuilder "github.com/filecoin-project/go-sectorbuilder"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/runtime"
|
||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
@ -47,6 +50,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/hello"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/node/impl/common"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
@ -56,9 +60,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/paychmgr"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sealing"
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr"
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr/advmgr"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
var log = logging.Logger("builder")
|
||||
@ -259,13 +263,19 @@ func Online() Option {
|
||||
|
||||
// Storage miner
|
||||
ApplyIf(func(s *Settings) bool { return s.nodeType == repo.StorageMiner },
|
||||
Override(new(*sectorbuilder.Config), modules.SectorBuilderConfig),
|
||||
Override(new(advmgr.LocalStorage), From(new(repo.LockedRepo))),
|
||||
Override(new(advmgr.SectorIDCounter), modules.SectorIDCounter),
|
||||
Override(new(*advmgr.Manager), advmgr.New),
|
||||
Override(new(api.Common), From(new(common.CommonAPI))),
|
||||
|
||||
Override(new(sealmgr.Manager), From(new(*advmgr.Manager))),
|
||||
Override(new(storage2.Prover), From(new(sealmgr.Manager))),
|
||||
Override(new(*stores.Index), stores.NewIndex),
|
||||
Override(new(stores.SectorIndex), From(new(*stores.Index))),
|
||||
Override(new(dtypes.MinerID), modules.MinerID),
|
||||
Override(new(dtypes.MinerAddress), modules.MinerAddress),
|
||||
Override(new(*sectorbuilder.Config), modules.SectorBuilderConfig),
|
||||
Override(new(stores.LocalStorage), From(new(repo.LockedRepo))),
|
||||
Override(new(sealing.SectorIDCounter), modules.SectorIDCounter),
|
||||
Override(new(*sectorstorage.Manager), modules.SectorStorage),
|
||||
|
||||
Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))),
|
||||
Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))),
|
||||
|
||||
Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks),
|
||||
Override(new(sealing.TicketFn), modules.SealTicketGen),
|
||||
@ -318,15 +328,22 @@ func StorageMiner(out *api.StorageMiner) Option {
|
||||
func ConfigCommon(cfg *config.Common) Option {
|
||||
return Options(
|
||||
func(s *Settings) error { s.Config = true; return nil },
|
||||
|
||||
Override(SetApiEndpointKey, func(lr repo.LockedRepo) error {
|
||||
apima, err := multiaddr.NewMultiaddr(cfg.API.ListenAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return lr.SetAPIEndpoint(apima)
|
||||
Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) {
|
||||
return multiaddr.NewMultiaddr(cfg.API.ListenAddress)
|
||||
}),
|
||||
Override(SetApiEndpointKey, func(lr repo.LockedRepo, e dtypes.APIEndpoint) error {
|
||||
return lr.SetAPIEndpoint(e)
|
||||
}),
|
||||
Override(new(sectorstorage.URLs), func(e dtypes.APIEndpoint) (sectorstorage.URLs, error) {
|
||||
_, ip, err := manet.DialArgs(e)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting api endpoint dial args: %w", err)
|
||||
}
|
||||
|
||||
var urls sectorstorage.URLs
|
||||
urls = append(urls, "http://"+ip+"/remote") // TODO: This makes assumptions, and probably bad ones too
|
||||
return urls, nil
|
||||
}),
|
||||
ApplyIf(func(s *Settings) bool { return s.Online },
|
||||
Override(StartListeningKey, lp2p.StartListening(cfg.Libp2p.ListenAddresses)),
|
||||
Override(ConnectionManagerKey, lp2p.ConnectionManager(
|
||||
@ -365,7 +382,11 @@ func ConfigStorageMiner(c interface{}) Option {
|
||||
return Error(xerrors.Errorf("invalid config from repo, got: %T", c))
|
||||
}
|
||||
|
||||
return Options(ConfigCommon(&cfg.Common))
|
||||
return Options(
|
||||
ConfigCommon(&cfg.Common),
|
||||
|
||||
Override(new(config.Storage), cfg.Storage),
|
||||
)
|
||||
}
|
||||
|
||||
func Repo(r repo.Repo) Option {
|
||||
|
@ -53,6 +53,10 @@ type Metrics struct {
|
||||
|
||||
// // Storage Miner
|
||||
type Storage struct {
|
||||
// Local worker config
|
||||
AllowPreCommit1 bool
|
||||
AllowPreCommit2 bool
|
||||
AllowCommit bool
|
||||
}
|
||||
|
||||
func defCommon() Common {
|
||||
@ -86,7 +90,11 @@ func DefaultStorageMiner() *StorageMiner {
|
||||
cfg := &StorageMiner{
|
||||
Common: defCommon(),
|
||||
|
||||
Storage: Storage{},
|
||||
Storage: Storage{
|
||||
AllowPreCommit1: true,
|
||||
AllowPreCommit2: true,
|
||||
AllowCommit: true,
|
||||
},
|
||||
}
|
||||
cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"
|
||||
return cfg
|
||||
|
@ -18,15 +18,6 @@ type StorageConfig struct {
|
||||
StoragePaths []LocalPath
|
||||
}
|
||||
|
||||
// [path]/metadata.json
|
||||
type StorageMeta struct {
|
||||
ID string
|
||||
Weight uint64 // 0 = readonly
|
||||
|
||||
CanSeal bool
|
||||
CanStore bool
|
||||
}
|
||||
|
||||
func StorageFromFile(path string, def *StorageConfig) (*StorageConfig, error) {
|
||||
file, err := os.Open(path)
|
||||
switch {
|
||||
|
@ -1,4 +1,4 @@
|
||||
package impl
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
@ -3,18 +3,18 @@ package impl
|
||||
import (
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/node/impl/client"
|
||||
"github.com/filecoin-project/lotus/node/impl/common"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
"github.com/filecoin-project/lotus/node/impl/market"
|
||||
"github.com/filecoin-project/lotus/node/impl/paych"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
)
|
||||
|
||||
var log = logging.Logger("node")
|
||||
|
||||
type FullNodeAPI struct {
|
||||
CommonAPI
|
||||
common.CommonAPI
|
||||
full.ChainAPI
|
||||
client.API
|
||||
full.MpoolAPI
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -20,13 +19,15 @@ import (
|
||||
"github.com/filecoin-project/lotus/api/apistruct"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/impl/common"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr/advmgr"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
type StorageMinerAPI struct {
|
||||
CommonAPI
|
||||
common.CommonAPI
|
||||
|
||||
SectorBuilderConfig *sectorbuilder.Config
|
||||
//SectorBuilder sectorbuilder.Interface
|
||||
@ -36,7 +37,8 @@ type StorageMinerAPI struct {
|
||||
Miner *storage.Miner
|
||||
BlockMiner *miner.Miner
|
||||
Full api.FullNode
|
||||
StorageMgr *advmgr.Manager `optional:"true"`
|
||||
StorageMgr *sectorstorage.Manager `optional:"true"`
|
||||
*stores.Index
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) {
|
||||
@ -46,132 +48,15 @@ func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
mux := mux.NewRouter()
|
||||
|
||||
mux.HandleFunc("/remote/{type}/{id}", sm.remoteGetSector).Methods("GET")
|
||||
mux.HandleFunc("/remote/{type}/{id}", sm.remotePutSector).Methods("PUT")
|
||||
|
||||
log.Infof("SERVEGETREMOTE %s", r.URL)
|
||||
|
||||
mux.ServeHTTP(w, r)
|
||||
sm.StorageMgr.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) remoteGetSector(w http.ResponseWriter, r *http.Request) {
|
||||
panic("todo")
|
||||
/* vars := mux.Vars(r)
|
||||
|
||||
id, err := strconv.ParseUint(vars["id"], 10, 64)
|
||||
if err != nil {
|
||||
log.Error("parsing sector id: ", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
path, err := sm.SectorBuilder.SectorPath(fs.DataType(vars["type"]), abi.SectorNumber(id))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
stat, err := os.Stat(string(path))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
var rd io.Reader
|
||||
if stat.IsDir() {
|
||||
rd, err = tarutil.TarDirectory(string(path))
|
||||
w.Header().Set("Content-Type", "application/x-tar")
|
||||
} else {
|
||||
rd, err = os.OpenFile(string(path), os.O_RDONLY, 0644)
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
}
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(200)
|
||||
if _, err := io.Copy(w, rd); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}*/
|
||||
func (sm *StorageMinerAPI) WorkerStats(context.Context) (map[uint64]api.WorkerStats, error) {
|
||||
return sm.StorageMgr.WorkerStats(), nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) remotePutSector(w http.ResponseWriter, r *http.Request) {
|
||||
panic("todo")
|
||||
/* vars := mux.Vars(r)
|
||||
|
||||
id, err := strconv.ParseUint(vars["id"], 10, 64)
|
||||
if err != nil {
|
||||
log.Error("parsing sector id: ", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
// This is going to get better with worker-to-worker transfers
|
||||
|
||||
path, err := sm.SectorBuilder.SectorPath(fs.DataType(vars["type"]), abi.SectorNumber(id))
|
||||
if err != nil {
|
||||
if err != fs.ErrNotFound {
|
||||
log.Error(err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
path, err = sm.SectorBuilder.AllocSectorPath(fs.DataType(vars["type"]), abi.SectorNumber(id), true)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mediatype, _, err := mime.ParseMediaType(r.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(string(path)); err != nil {
|
||||
log.Error(err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
switch mediatype {
|
||||
case "application/x-tar":
|
||||
if err := tarutil.ExtractTar(r.Body, string(path)); err != nil {
|
||||
log.Error(err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
default:
|
||||
if err := files.WriteTo(files.NewReaderFile(r.Body), string(path)); err != nil {
|
||||
log.Error(err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(200)
|
||||
|
||||
log.Infof("received %s sector (%s): %d bytes", vars["type"], vars["sname"], r.ContentLength)*/
|
||||
}
|
||||
|
||||
/*
|
||||
func (sm *StorageMinerAPI) WorkerStats(context.Context) (sectorbuilder.WorkerStats, error) {
|
||||
stat := sm.SectorBuilder.WorkerStats()
|
||||
return stat, nil
|
||||
}*/
|
||||
|
||||
func (sm *StorageMinerAPI) ActorAddress(context.Context) (address.Address, error) {
|
||||
return sm.SectorBuilderConfig.Miner, nil
|
||||
return sm.Miner.Address(), nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) ActorSectorSize(ctx context.Context, addr address.Address) (abi.SectorSize, error) {
|
||||
@ -236,6 +121,10 @@ func (sm *StorageMinerAPI) SectorsList(context.Context) ([]abi.SectorNumber, err
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
|
||||
return sm.StorageMgr.StorageLocal(ctx)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.SealedRef, error) {
|
||||
// json can't handle cids as map keys
|
||||
out := map[string][]api.SealedRef{}
|
||||
@ -252,19 +141,24 @@ func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.Sealed
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) StorageStat(ctx context.Context, id stores.ID) (stores.FsStat, error) {
|
||||
return sm.StorageMgr.FsStat(ctx, id)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SectorsUpdate(ctx context.Context, id abi.SectorNumber, state api.SectorState) error {
|
||||
return sm.Miner.ForceSectorState(ctx, id, state)
|
||||
}
|
||||
|
||||
/*
|
||||
func (sm *StorageMinerAPI) WorkerQueue(ctx context.Context, cfg sectorbuilder.WorkerCfg) (<-chan sectorbuilder.WorkerTask, error) {
|
||||
return sm.SectorBuilder.AddWorker(ctx, cfg)
|
||||
}
|
||||
func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error {
|
||||
w, err := sectorstorage.ConnectRemote(ctx, sm, url)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("connecting remote storage failed: %w", err)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) WorkerDone(ctx context.Context, task uint64, res sectorbuilder.SealRes) error {
|
||||
return sm.SectorBuilder.TaskDone(ctx, task, res)
|
||||
log.Infof("Connected to a remote worker at %s", url)
|
||||
|
||||
return sm.StorageMgr.AddWorker(ctx, w)
|
||||
}
|
||||
*/
|
||||
|
||||
func (sm *StorageMinerAPI) MarketImportDealData(ctx context.Context, propCid cid.Cid, path string) error {
|
||||
fi, err := os.Open(path)
|
||||
@ -284,7 +178,7 @@ func (sm *StorageMinerAPI) MarketListIncompleteDeals(ctx context.Context) ([]sto
|
||||
return sm.StorageProvider.ListIncompleteDeals()
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) SetPrice(ctx context.Context, p types.BigInt) error {
|
||||
func (sm *StorageMinerAPI) MarketSetPrice(ctx context.Context, p types.BigInt) error {
|
||||
return sm.StorageProvider.AddAsk(abi.TokenAmount(p), 60*60*24*100) // lasts for 100 days?
|
||||
}
|
||||
|
||||
@ -307,7 +201,7 @@ func (sm *StorageMinerAPI) StorageAddLocal(ctx context.Context, path string) err
|
||||
return xerrors.Errorf("no storage manager")
|
||||
}
|
||||
|
||||
return sm.StorageMgr.AddLocalStorage(path)
|
||||
return sm.StorageMgr.AddLocalStorage(ctx, path)
|
||||
}
|
||||
|
||||
var _ api.StorageMiner = &StorageMinerAPI{}
|
||||
|
@ -1,5 +1,10 @@
|
||||
package dtypes
|
||||
|
||||
import "github.com/gbrlsnchs/jwt/v3"
|
||||
import (
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
type APIAlg jwt.HMACSHA
|
||||
|
||||
type APIEndpoint multiaddr.Multiaddr
|
||||
|
9
node/modules/dtypes/miner.go
Normal file
9
node/modules/dtypes/miner.go
Normal file
@ -0,0 +1,9 @@
|
||||
package dtypes
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
)
|
||||
|
||||
type MinerAddress address.Address
|
||||
type MinerID abi.ActorID
|
@ -44,13 +44,14 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sealing"
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr"
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr/advmgr"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
func minerAddrFromDS(ds dtypes.MetadataDS) (address.Address, error) {
|
||||
@ -75,13 +76,18 @@ func GetParams(sbc *sectorbuilder.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func SectorBuilderConfig(ds dtypes.MetadataDS, fnapi lapi.FullNode) (*sectorbuilder.Config, error) {
|
||||
minerAddr, err := minerAddrFromDS(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func MinerAddress(ds dtypes.MetadataDS) (dtypes.MinerAddress, error) {
|
||||
ma, err := minerAddrFromDS(ds)
|
||||
return dtypes.MinerAddress(ma), err
|
||||
}
|
||||
|
||||
ssize, err := fnapi.StateMinerSectorSize(context.TODO(), minerAddr, types.EmptyTSK)
|
||||
func MinerID(ma dtypes.MinerAddress) (dtypes.MinerID, error) {
|
||||
id, err := address.IDFromAddress(address.Address(ma))
|
||||
return dtypes.MinerID(id), err
|
||||
}
|
||||
|
||||
func SectorBuilderConfig(maddr dtypes.MinerAddress, fnapi lapi.FullNode) (*sectorbuilder.Config, error) {
|
||||
ssize, err := fnapi.StateMinerSectorSize(context.TODO(), address.Address(maddr), types.EmptyTSK)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -92,7 +98,6 @@ func SectorBuilderConfig(ds dtypes.MetadataDS, fnapi lapi.FullNode) (*sectorbuil
|
||||
}
|
||||
|
||||
sb := §orbuilder.Config{
|
||||
Miner: minerAddr,
|
||||
SealProofType: spt,
|
||||
PoStProofType: ppt,
|
||||
}
|
||||
@ -109,12 +114,12 @@ func (s *sidsc) Next() (abi.SectorNumber, error) {
|
||||
return abi.SectorNumber(i), err
|
||||
}
|
||||
|
||||
func SectorIDCounter(ds dtypes.MetadataDS) advmgr.SectorIDCounter {
|
||||
func SectorIDCounter(ds dtypes.MetadataDS) sealing.SectorIDCounter {
|
||||
sc := storedcounter.New(ds, datastore.NewKey("/storage/nextid"))
|
||||
return &sidsc{sc}
|
||||
}
|
||||
|
||||
func StorageMiner(mctx helpers.MetricsCtx, lc fx.Lifecycle, api lapi.FullNode, h host.Host, ds dtypes.MetadataDS, sealer sealmgr.Manager, tktFn sealing.TicketFn) (*storage.Miner, error) {
|
||||
func StorageMiner(mctx helpers.MetricsCtx, lc fx.Lifecycle, api lapi.FullNode, h host.Host, ds dtypes.MetadataDS, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, tktFn sealing.TicketFn) (*storage.Miner, error) {
|
||||
maddr, err := minerAddrFromDS(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -134,7 +139,7 @@ func StorageMiner(mctx helpers.MetricsCtx, lc fx.Lifecycle, api lapi.FullNode, h
|
||||
|
||||
fps := storage.NewFPoStScheduler(api, sealer, maddr, worker, ppt)
|
||||
|
||||
sm, err := storage.NewMiner(api, maddr, worker, h, ds, sealer, tktFn)
|
||||
sm, err := storage.NewMiner(api, maddr, worker, h, ds, sealer, sc, tktFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -324,7 +329,7 @@ func StorageProvider(ctx helpers.MetricsCtx, fapi lapi.FullNode, h host.Host, ds
|
||||
}
|
||||
|
||||
// RetrievalProvider creates a new retrieval provider attached to the provider blockstore
|
||||
func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sealmgr.Manager, full lapi.FullNode, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, ibs dtypes.StagingBlockstore) (retrievalmarket.RetrievalProvider, error) {
|
||||
func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sectorstorage.SectorManager, full lapi.FullNode, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, ibs dtypes.StagingBlockstore) (retrievalmarket.RetrievalProvider, error) {
|
||||
adapter := retrievaladapter.NewRetrievalProviderNode(miner, sealer, full)
|
||||
address, err := minerAddrFromDS(ds)
|
||||
if err != nil {
|
||||
@ -333,3 +338,24 @@ func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sealmgr.Manager
|
||||
network := rmnet.NewFromLibp2pHost(h)
|
||||
return retrievalimpl.NewProvider(address, adapter, network, pieceStore, ibs, ds)
|
||||
}
|
||||
|
||||
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Config, sc config.Storage, urls sectorstorage.URLs, ca lapi.Common) (*sectorstorage.Manager, error) {
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
|
||||
sst, err := sectorstorage.New(ctx, ls, si, cfg, sc, urls, ca)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(_ context.Context) error {
|
||||
if err := sst.Close(); err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
return sst, nil
|
||||
}
|
||||
|
@ -41,9 +41,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
modtest "github.com/filecoin-project/lotus/node/modules/testing"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage/sbmock"
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr"
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr/advmgr"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/mock"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -80,6 +79,7 @@ func testStorageNode(ctx context.Context, t *testing.T, waddr address.Address, a
|
||||
for i := 0; i < nPreseal; i++ {
|
||||
nic.Next()
|
||||
}
|
||||
nic.Next()
|
||||
|
||||
err = lr.Close()
|
||||
require.NoError(t, err)
|
||||
@ -256,7 +256,7 @@ func builder(t *testing.T, nFull int, storage []int) ([]test.TestNode, []test.Te
|
||||
|
||||
storers[i] = testStorageNode(ctx, t, wa, genMiner, pk, f, mn, node.Options())
|
||||
if err := storers[i].StorageAddLocal(ctx, presealDirs[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
/*
|
||||
sma := storers[i].StorageMiner.(*impl.StorageMinerAPI)
|
||||
@ -309,7 +309,7 @@ func mockSbBuilder(t *testing.T, nFull int, storage []int) ([]test.TestNode, []t
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genm, k, err := sbmock.PreSeal(2048, maddr, nPreseal)
|
||||
genm, k, err := mock.PreSeal(2048, maddr, nPreseal)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -355,7 +355,7 @@ func mockSbBuilder(t *testing.T, nFull int, storage []int) ([]test.TestNode, []t
|
||||
node.MockHost(mn),
|
||||
node.Test(),
|
||||
|
||||
node.Override(new(sectorbuilder.Verifier), sbmock.MockVerifier),
|
||||
node.Override(new(sectorbuilder.Verifier), mock.MockVerifier),
|
||||
|
||||
genesis,
|
||||
)
|
||||
@ -385,10 +385,10 @@ func mockSbBuilder(t *testing.T, nFull int, storage []int) ([]test.TestNode, []t
|
||||
wa := genms[i].Worker
|
||||
|
||||
storers[i] = testStorageNode(ctx, t, wa, genMiner, pk, f, mn, node.Options(
|
||||
node.Override(new(sealmgr.Manager), func() (sealmgr.Manager, error) {
|
||||
return sealmgr.NewSimpleManager(storedcounter.New(datastore.NewMapDatastore(), datastore.NewKey("/potato")), genMiner, sbmock.NewMockSectorBuilder(5, build.SectorSizes[0]))
|
||||
node.Override(new(sectorstorage.SectorManager), func() (sectorstorage.SectorManager, error) {
|
||||
return mock.NewMockSectorMgr(5, build.SectorSizes[0]), nil
|
||||
}),
|
||||
node.Unset(new(*advmgr.Manager)),
|
||||
node.Unset(new(*sectorstorage.Manager)),
|
||||
))
|
||||
}
|
||||
|
||||
|
@ -138,8 +138,17 @@ func as(in interface{}, as interface{}) interface{} {
|
||||
|
||||
return reflect.MakeFunc(ctype, func(args []reflect.Value) (results []reflect.Value) {
|
||||
outs := reflect.ValueOf(in).Call(args)
|
||||
|
||||
out := reflect.New(outType.Elem())
|
||||
out.Elem().Set(outs[0])
|
||||
if outs[0].Type().AssignableTo(outType.Elem()) {
|
||||
// Out: Iface = In: *Struct; Out: Iface = In: OtherIface
|
||||
out.Elem().Set(outs[0])
|
||||
} else {
|
||||
// Out: Iface = &(In: Struct)
|
||||
t := reflect.New(outs[0].Type())
|
||||
t.Elem().Set(outs[0])
|
||||
out.Elem().Set(t)
|
||||
}
|
||||
outs[0] = out.Elem()
|
||||
|
||||
return outs
|
||||
|
@ -40,6 +40,7 @@ const (
|
||||
_ = iota // Default is invalid
|
||||
FullNode RepoType = iota
|
||||
StorageMiner
|
||||
Worker
|
||||
)
|
||||
|
||||
func defConfForType(t RepoType) interface{} {
|
||||
@ -48,6 +49,8 @@ func defConfForType(t RepoType) interface{} {
|
||||
return config.DefaultFullNode()
|
||||
case StorageMiner:
|
||||
return config.DefaultStorageMiner()
|
||||
case Worker:
|
||||
return &struct{}{}
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown RepoType(%d)", int(t)))
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
type MemRepo struct {
|
||||
@ -81,8 +82,8 @@ func (lmem *lockedMemRepo) Path() string {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(&config.StorageMeta{
|
||||
ID: uuid.New().String(),
|
||||
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
||||
ID: stores.ID(uuid.New().String()),
|
||||
Weight: 10,
|
||||
CanSeal: true,
|
||||
CanStore: true,
|
||||
|
23
scripts/miner-mon.sh
Executable file
23
scripts/miner-mon.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
SESSION=$(cat /proc/sys/kernel/random/uuid)
|
||||
|
||||
tmux -2 new-session -d -s $SESSION
|
||||
|
||||
tmux new-window -t $SESSION:1 -n 'Storage Miner'
|
||||
|
||||
tmux split-window -h
|
||||
|
||||
tmux select-pane -t 0
|
||||
tmux send-keys "watch -n1 './lotus-storage-miner info'" C-m
|
||||
|
||||
tmux split-window -v
|
||||
|
||||
tmux select-pane -t 1
|
||||
tmux send-keys "watch -n1 './lotus-storage-miner workers list'" C-m
|
||||
|
||||
tmux select-pane -t 2
|
||||
tmux send-keys "watch -n1 './lotus-storage-miner storage list'" C-m
|
||||
|
||||
|
||||
tmux -2 attach-session -t $SESSION
|
@ -3,6 +3,7 @@ package storage
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"github.com/filecoin-project/go-address"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||
@ -187,19 +188,24 @@ func (s *FPoStScheduler) runPost(ctx context.Context, eps abi.ChainEpoch, ts *ty
|
||||
"sectors", len(ssi),
|
||||
"faults", len(faults))
|
||||
|
||||
scandidates, proof, err := s.sb.GenerateFallbackPoSt(ssi, abi.PoStRandomness(rand), faults)
|
||||
mid, err := address.IDFromAddress(s.actor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
postOut, err := s.sb.GenerateFallbackPoSt(ctx, abi.ActorID(mid), ssi, abi.PoStRandomness(rand), faults)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("running post failed: %w", err)
|
||||
}
|
||||
|
||||
if len(scandidates) == 0 {
|
||||
if len(postOut.PoStInputs) == 0 {
|
||||
return nil, xerrors.Errorf("received zero candidates back from generate fallback post")
|
||||
}
|
||||
|
||||
// TODO: until we figure out how fallback post is really supposed to work,
|
||||
// let's just pass a single candidate...
|
||||
scandidates = scandidates[:1]
|
||||
proof = proof[:1]
|
||||
scandidates := postOut.PoStInputs[:1]
|
||||
proof := postOut.Proof[:1]
|
||||
|
||||
elapsed := time.Since(tsStart)
|
||||
log.Infow("submitting PoSt", "pLen", len(proof), "elapsed", elapsed)
|
||||
|
@ -3,6 +3,7 @@ package storage
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -13,7 +14,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
@ -33,9 +34,10 @@ var log = logging.Logger("storageminer")
|
||||
type Miner struct {
|
||||
api storageMinerApi
|
||||
h host.Host
|
||||
sealer sealmgr.Manager
|
||||
sealer sectorstorage.SectorManager
|
||||
ds datastore.Batching
|
||||
tktFn sealing.TicketFn
|
||||
sc sealing.SectorIDCounter
|
||||
|
||||
maddr address.Address
|
||||
worker address.Address
|
||||
@ -72,13 +74,14 @@ type storageMinerApi interface {
|
||||
WalletHas(context.Context, address.Address) (bool, error)
|
||||
}
|
||||
|
||||
func NewMiner(api storageMinerApi, maddr, worker address.Address, h host.Host, ds datastore.Batching, sealer sealmgr.Manager, tktFn sealing.TicketFn) (*Miner, error) {
|
||||
func NewMiner(api storageMinerApi, maddr, worker address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, tktFn sealing.TicketFn) (*Miner, error) {
|
||||
m := &Miner{
|
||||
api: api,
|
||||
h: h,
|
||||
sealer: sealer,
|
||||
ds: ds,
|
||||
tktFn: tktFn,
|
||||
sc: sc,
|
||||
|
||||
maddr: maddr,
|
||||
worker: worker,
|
||||
@ -93,7 +96,7 @@ func (m *Miner) Run(ctx context.Context) error {
|
||||
}
|
||||
|
||||
evts := events.NewEvents(ctx, m.api)
|
||||
m.sealing = sealing.New(m.api, evts, m.maddr, m.worker, m.ds, m.sealer, m.tktFn)
|
||||
m.sealing = sealing.New(m.api, evts, m.maddr, m.worker, m.ds, m.sealer, m.sc, m.tktFn)
|
||||
|
||||
go m.sealing.Run(ctx)
|
||||
|
||||
@ -121,10 +124,11 @@ func (m *Miner) runPreflightChecks(ctx context.Context) error {
|
||||
|
||||
type SectorBuilderEpp struct {
|
||||
prover storage.Prover
|
||||
miner abi.ActorID
|
||||
}
|
||||
|
||||
func NewElectionPoStProver(sb storage.Prover) *SectorBuilderEpp {
|
||||
return &SectorBuilderEpp{sb}
|
||||
func NewElectionPoStProver(sb storage.Prover, miner dtypes.MinerID) *SectorBuilderEpp {
|
||||
return &SectorBuilderEpp{sb, abi.ActorID(miner)}
|
||||
}
|
||||
|
||||
var _ gen.ElectionPoStProver = (*SectorBuilderEpp)(nil)
|
||||
@ -133,7 +137,7 @@ func (epp *SectorBuilderEpp) GenerateCandidates(ctx context.Context, ssi []abi.S
|
||||
start := time.Now()
|
||||
var faults []abi.SectorNumber // TODO
|
||||
|
||||
cds, err := epp.prover.GenerateEPostCandidates(ssi, rand, faults)
|
||||
cds, err := epp.prover.GenerateEPostCandidates(ctx, epp.miner, ssi, rand, faults)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to generate candidates: %w", err)
|
||||
}
|
||||
@ -153,7 +157,7 @@ func (epp *SectorBuilderEpp) ComputeProof(ctx context.Context, ssi []abi.SectorI
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
proof, err := epp.prover.ComputeElectionPoSt(ssi, rand, owins)
|
||||
proof, err := epp.prover.ComputeElectionPoSt(ctx, epp.miner, ssi, rand, owins)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/filecoin-project/go-address"
|
||||
"io"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
@ -12,6 +13,10 @@ import (
|
||||
|
||||
// TODO: refactor this to be direct somehow
|
||||
|
||||
func (m *Miner) Address() address.Address {
|
||||
return m.sealing.Address()
|
||||
}
|
||||
|
||||
func (m *Miner) AllocatePiece(size abi.UnpaddedPieceSize) (sectorID abi.SectorNumber, offset uint64, err error) {
|
||||
return m.sealing.AllocatePiece(size)
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package sealing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
@ -83,9 +84,15 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
||||
// First process all events
|
||||
|
||||
for _, event := range events {
|
||||
e, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
log.Errorf("marshaling event for logging: %+v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
l := Log{
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Message: fmt.Sprintf("%+v", event),
|
||||
Message: string(e),
|
||||
Kind: fmt.Sprintf("event;%T", event.User),
|
||||
}
|
||||
|
||||
@ -201,7 +208,7 @@ func planCommitting(events []statemachine.Event, state *SectorInfo) error {
|
||||
e.apply(state)
|
||||
state.State = api.CommitWait
|
||||
case SectorSeedReady: // seed changed :/
|
||||
if e.seed.Equals(&state.Seed) {
|
||||
if e.Seed.Equals(&state.Seed) {
|
||||
log.Warnf("planCommitting: got SectorSeedReady, but the seed didn't change")
|
||||
continue // or it didn't!
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package sealing
|
||||
import (
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
)
|
||||
@ -26,6 +27,8 @@ func (evt SectorRestart) applyGlobal(*SectorInfo) bool { return false }
|
||||
|
||||
type SectorFatalError struct{ error }
|
||||
|
||||
func (evt SectorFatalError) FormatError(xerrors.Printer) (next error) { return evt.error }
|
||||
|
||||
func (evt SectorFatalError) applyGlobal(state *SectorInfo) bool {
|
||||
log.Errorf("Fatal error on sector %d: %+v", state.SectorID, evt.error)
|
||||
// TODO: Do we want to mark the state as unrecoverable?
|
||||
@ -35,32 +38,32 @@ func (evt SectorFatalError) applyGlobal(state *SectorInfo) bool {
|
||||
}
|
||||
|
||||
type SectorForceState struct {
|
||||
state api.SectorState
|
||||
State api.SectorState
|
||||
}
|
||||
|
||||
func (evt SectorForceState) applyGlobal(state *SectorInfo) bool {
|
||||
state.State = evt.state
|
||||
state.State = evt.State
|
||||
return true
|
||||
}
|
||||
|
||||
// Normal path
|
||||
|
||||
type SectorStart struct {
|
||||
id abi.SectorNumber
|
||||
sectorType abi.RegisteredProof
|
||||
pieces []Piece
|
||||
ID abi.SectorNumber
|
||||
SectorType abi.RegisteredProof
|
||||
Pieces []Piece
|
||||
}
|
||||
|
||||
func (evt SectorStart) apply(state *SectorInfo) {
|
||||
state.SectorID = evt.id
|
||||
state.Pieces = evt.pieces
|
||||
state.SectorType = evt.sectorType
|
||||
state.SectorID = evt.ID
|
||||
state.Pieces = evt.Pieces
|
||||
state.SectorType = evt.SectorType
|
||||
}
|
||||
|
||||
type SectorPacked struct{ pieces []Piece }
|
||||
type SectorPacked struct{ Pieces []Piece }
|
||||
|
||||
func (evt SectorPacked) apply(state *SectorInfo) {
|
||||
state.Pieces = append(state.Pieces, evt.pieces...)
|
||||
state.Pieces = append(state.Pieces, evt.Pieces...)
|
||||
}
|
||||
|
||||
type SectorPackingFailed struct{ error }
|
||||
@ -68,57 +71,63 @@ type SectorPackingFailed struct{ error }
|
||||
func (evt SectorPackingFailed) apply(*SectorInfo) {}
|
||||
|
||||
type SectorSealed struct {
|
||||
commR cid.Cid
|
||||
commD cid.Cid
|
||||
ticket api.SealTicket
|
||||
Sealed cid.Cid
|
||||
Unsealed cid.Cid
|
||||
Ticket api.SealTicket
|
||||
}
|
||||
|
||||
func (evt SectorSealed) apply(state *SectorInfo) {
|
||||
commd := evt.commD
|
||||
commd := evt.Unsealed
|
||||
state.CommD = &commd
|
||||
commr := evt.commR
|
||||
commr := evt.Sealed
|
||||
state.CommR = &commr
|
||||
state.Ticket = evt.ticket
|
||||
state.Ticket = evt.Ticket
|
||||
}
|
||||
|
||||
type SectorSealFailed struct{ error }
|
||||
|
||||
func (evt SectorSealFailed) apply(*SectorInfo) {}
|
||||
func (evt SectorSealFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
|
||||
func (evt SectorSealFailed) apply(*SectorInfo) {}
|
||||
|
||||
type SectorPreCommitFailed struct{ error }
|
||||
|
||||
func (evt SectorPreCommitFailed) apply(*SectorInfo) {}
|
||||
func (evt SectorPreCommitFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
|
||||
func (evt SectorPreCommitFailed) apply(*SectorInfo) {}
|
||||
|
||||
type SectorPreCommitted struct {
|
||||
message cid.Cid
|
||||
Message cid.Cid
|
||||
}
|
||||
|
||||
func (evt SectorPreCommitted) apply(state *SectorInfo) {
|
||||
state.PreCommitMessage = &evt.message
|
||||
state.PreCommitMessage = &evt.Message
|
||||
}
|
||||
|
||||
type SectorSeedReady struct {
|
||||
seed api.SealSeed
|
||||
Seed api.SealSeed
|
||||
}
|
||||
|
||||
func (evt SectorSeedReady) apply(state *SectorInfo) {
|
||||
state.Seed = evt.seed
|
||||
state.Seed = evt.Seed
|
||||
}
|
||||
|
||||
type SectorComputeProofFailed struct{ error }
|
||||
|
||||
func (evt SectorComputeProofFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
|
||||
func (evt SectorComputeProofFailed) apply(*SectorInfo) {}
|
||||
|
||||
type SectorCommitFailed struct{ error }
|
||||
|
||||
func (evt SectorCommitFailed) apply(*SectorInfo) {}
|
||||
func (evt SectorCommitFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
|
||||
func (evt SectorCommitFailed) apply(*SectorInfo) {}
|
||||
|
||||
type SectorCommitted struct {
|
||||
message cid.Cid
|
||||
proof []byte
|
||||
Message cid.Cid
|
||||
Proof []byte
|
||||
}
|
||||
|
||||
func (evt SectorCommitted) apply(state *SectorInfo) {
|
||||
state.Proof = evt.proof
|
||||
state.CommitMessage = &evt.message
|
||||
state.Proof = evt.Proof
|
||||
state.CommitMessage = &evt.Message
|
||||
}
|
||||
|
||||
type SectorProving struct{}
|
||||
@ -131,7 +140,8 @@ func (evt SectorFinalized) apply(*SectorInfo) {}
|
||||
|
||||
type SectorFinalizeFailed struct{ error }
|
||||
|
||||
func (evt SectorFinalizeFailed) apply(*SectorInfo) {}
|
||||
func (evt SectorFinalizeFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
|
||||
func (evt SectorFinalizeFailed) apply(*SectorInfo) {}
|
||||
|
||||
// Failed state recovery
|
||||
|
||||
|
@ -76,12 +76,12 @@ func TestSeedRevert(t *testing.T) {
|
||||
m.planSingle(SectorSeedReady{})
|
||||
require.Equal(m.t, m.state.State, api.Committing)
|
||||
|
||||
_, err := m.s.plan([]statemachine.Event{{SectorSeedReady{seed: api.SealSeed{Epoch: 5}}}, {SectorCommitted{}}}, m.state)
|
||||
_, err := m.s.plan([]statemachine.Event{{SectorSeedReady{Seed: api.SealSeed{Epoch: 5}}}, {SectorCommitted{}}}, m.state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(m.t, m.state.State, api.Committing)
|
||||
|
||||
// not changing the seed this time
|
||||
_, err = m.s.plan([]statemachine.Event{{SectorSeedReady{seed: api.SealSeed{Epoch: 5}}}, {SectorCommitted{}}}, m.state)
|
||||
_, err = m.s.plan([]statemachine.Event{{SectorSeedReady{Seed: api.SealSeed{Epoch: 5}}}, {SectorCommitted{}}}, m.state)
|
||||
require.Equal(m.t, m.state.State, api.CommitWait)
|
||||
|
||||
m.planSingle(SectorProving{})
|
||||
|
@ -16,7 +16,7 @@ func (m *Sealing) pledgeReader(size abi.UnpaddedPieceSize) io.Reader {
|
||||
return io.LimitReader(&nullreader.Reader{}, int64(size))
|
||||
}
|
||||
|
||||
func (m *Sealing) pledgeSector(ctx context.Context, sectorID abi.SectorNumber, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]Piece, error) {
|
||||
func (m *Sealing) pledgeSector(ctx context.Context, sectorID abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]Piece, error) {
|
||||
if len(sizes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@ -55,13 +55,18 @@ func (m *Sealing) PledgeSector() error {
|
||||
return
|
||||
}
|
||||
|
||||
sid, err := m.sealer.NewSector()
|
||||
sid, err := m.sc.Next()
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
err = m.sealer.NewSector(ctx, m.minerSector(sid))
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
pieces, err := m.pledgeSector(ctx, sid, []abi.UnpaddedPieceSize{}, size)
|
||||
pieces, err := m.pledgeSector(ctx, m.minerSector(sid), []abi.UnpaddedPieceSize{}, size)
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
)
|
||||
|
||||
const SectorStorePrefix = "/sectors"
|
||||
@ -30,6 +30,10 @@ var log = logging.Logger("sectors")
|
||||
|
||||
type TicketFn func(context.Context) (*api.SealTicket, error)
|
||||
|
||||
type SectorIDCounter interface {
|
||||
Next() (abi.SectorNumber, error)
|
||||
}
|
||||
|
||||
type sealingApi interface { // TODO: trim down
|
||||
// Call a read only method on actors (no interaction with the chain required)
|
||||
StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error)
|
||||
@ -65,12 +69,13 @@ type Sealing struct {
|
||||
maddr address.Address
|
||||
worker address.Address
|
||||
|
||||
sealer sealmgr.Manager
|
||||
sealer sectorstorage.SectorManager
|
||||
sectors *statemachine.StateGroup
|
||||
tktFn TicketFn
|
||||
sc SectorIDCounter
|
||||
}
|
||||
|
||||
func New(api sealingApi, events *events.Events, maddr address.Address, worker address.Address, ds datastore.Batching, sealer sealmgr.Manager, tktFn TicketFn) *Sealing {
|
||||
func New(api sealingApi, events *events.Events, maddr address.Address, worker address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, tktFn TicketFn) *Sealing {
|
||||
s := &Sealing{
|
||||
api: api,
|
||||
events: events,
|
||||
@ -79,6 +84,7 @@ func New(api sealingApi, events *events.Events, maddr address.Address, worker ad
|
||||
worker: worker,
|
||||
sealer: sealer,
|
||||
tktFn: tktFn,
|
||||
sc: sc,
|
||||
}
|
||||
|
||||
s.sectors = statemachine.New(namespace.Wrap(ds, datastore.NewKey(SectorStorePrefix)), s, SectorInfo{})
|
||||
@ -104,9 +110,14 @@ func (m *Sealing) AllocatePiece(size abi.UnpaddedPieceSize) (sectorID abi.Sector
|
||||
return 0, 0, xerrors.Errorf("cannot allocate unpadded piece")
|
||||
}
|
||||
|
||||
sid, err := m.sealer.NewSector() // TODO: Put more than one thing in a sector
|
||||
sid, err := m.sc.Next()
|
||||
if err != nil {
|
||||
return 0, 0, xerrors.Errorf("acquiring sector ID: %w", err)
|
||||
return 0, 0, xerrors.Errorf("getting sector number: %w", err)
|
||||
}
|
||||
|
||||
err = m.sealer.NewSector(context.TODO(), m.minerSector(sid)) // TODO: Put more than one thing in a sector
|
||||
if err != nil {
|
||||
return 0, 0, xerrors.Errorf("initializing sector: %w", err)
|
||||
}
|
||||
|
||||
// offset hard-coded to 0 since we only put one thing in a sector for now
|
||||
@ -116,7 +127,7 @@ func (m *Sealing) AllocatePiece(size abi.UnpaddedPieceSize) (sectorID abi.Sector
|
||||
func (m *Sealing) SealPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, sectorID abi.SectorNumber, dealID abi.DealID) error {
|
||||
log.Infof("Seal piece for deal %d", dealID)
|
||||
|
||||
ppi, err := m.sealer.AddPiece(ctx, sectorID, []abi.UnpaddedPieceSize{}, size, r)
|
||||
ppi, err := m.sealer.AddPiece(ctx, m.minerSector(sectorID), []abi.UnpaddedPieceSize{}, size, r)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("adding piece to sector: %w", err)
|
||||
}
|
||||
@ -139,8 +150,24 @@ func (m *Sealing) SealPiece(ctx context.Context, size abi.UnpaddedPieceSize, r i
|
||||
func (m *Sealing) newSector(sid abi.SectorNumber, rt abi.RegisteredProof, pieces []Piece) error {
|
||||
log.Infof("Start sealing %d", sid)
|
||||
return m.sectors.Send(uint64(sid), SectorStart{
|
||||
id: sid,
|
||||
pieces: pieces,
|
||||
sectorType: rt,
|
||||
ID: sid,
|
||||
Pieces: pieces,
|
||||
SectorType: rt,
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Sealing) minerSector(num abi.SectorNumber) abi.SectorID {
|
||||
mid, err := address.IDFromAddress(m.maddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return abi.SectorID{
|
||||
Number: num,
|
||||
Miner: abi.ActorID(mid),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Sealing) Address() address.Address {
|
||||
return m.maddr
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package sealing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||
|
||||
@ -40,12 +41,12 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err
|
||||
log.Warnf("Creating %d filler pieces for sector %d", len(fillerSizes), sector.SectorID)
|
||||
}
|
||||
|
||||
pieces, err := m.pledgeSector(ctx.Context(), sector.SectorID, sector.existingPieces(), fillerSizes...)
|
||||
pieces, err := m.pledgeSector(ctx.Context(), m.minerSector(sector.SectorID), sector.existingPieces(), fillerSizes...)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("filling up the sector (%v): %w", fillerSizes, err)
|
||||
}
|
||||
|
||||
return ctx.Send(SectorPacked{pieces: pieces})
|
||||
return ctx.Send(SectorPacked{Pieces: pieces})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleUnsealed(ctx statemachine.Context, sector SectorInfo) error {
|
||||
@ -69,20 +70,20 @@ func (m *Sealing) handleUnsealed(ctx statemachine.Context, sector SectorInfo) er
|
||||
return ctx.Send(SectorSealFailed{xerrors.Errorf("getting ticket failed: %w", err)})
|
||||
}
|
||||
|
||||
pc1o, err := m.sealer.SealPreCommit1(ctx.Context(), sector.SectorID, ticket.Value, sector.pieceInfos())
|
||||
pc1o, err := m.sealer.SealPreCommit1(ctx.Context(), m.minerSector(sector.SectorID), ticket.Value, sector.pieceInfos())
|
||||
if err != nil {
|
||||
return ctx.Send(SectorSealFailed{xerrors.Errorf("seal pre commit failed: %w", err)})
|
||||
return ctx.Send(SectorSealFailed{xerrors.Errorf("seal pre commit(1) failed: %w", err)})
|
||||
}
|
||||
|
||||
sealed, unsealed, err := m.sealer.SealPreCommit2(ctx.Context(), sector.SectorID, pc1o)
|
||||
cids, err := m.sealer.SealPreCommit2(ctx.Context(), m.minerSector(sector.SectorID), pc1o)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorSealFailed{xerrors.Errorf("seal pre commit failed: %w", err)})
|
||||
return ctx.Send(SectorSealFailed{xerrors.Errorf("seal pre commit(2) failed: %w", err)})
|
||||
}
|
||||
|
||||
return ctx.Send(SectorSealed{
|
||||
commD: unsealed,
|
||||
commR: sealed,
|
||||
ticket: *ticket,
|
||||
Unsealed: cids.Unsealed,
|
||||
Sealed: cids.Sealed,
|
||||
Ticket: *ticket,
|
||||
})
|
||||
}
|
||||
|
||||
@ -131,7 +132,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
|
||||
return ctx.Send(SectorPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
|
||||
}
|
||||
|
||||
return ctx.Send(SectorPreCommitted{message: smsg.Cid()})
|
||||
return ctx.Send(SectorPreCommitted{Message: smsg.Cid()})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleWaitSeed(ctx statemachine.Context, sector SectorInfo) error {
|
||||
@ -161,7 +162,7 @@ func (m *Sealing) handleWaitSeed(ctx statemachine.Context, sector SectorInfo) er
|
||||
return err
|
||||
}
|
||||
|
||||
ctx.Send(SectorSeedReady{seed: api.SealSeed{
|
||||
ctx.Send(SectorSeedReady{Seed: api.SealSeed{
|
||||
Epoch: randHeight,
|
||||
Value: abi.InteractiveSealRandomness(rand),
|
||||
}})
|
||||
@ -184,12 +185,16 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
|
||||
|
||||
log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%x; d:%x", sector.SectorID, sector.Ticket.Value, sector.Ticket.Epoch, sector.Seed.Value, sector.Seed.Epoch, sector.pieceInfos(), sector.CommR, sector.CommD)
|
||||
|
||||
c2in, err := m.sealer.SealCommit1(ctx.Context(), sector.SectorID, sector.Ticket.Value, sector.Seed.Value, sector.pieceInfos(), *sector.CommR, *sector.CommD)
|
||||
cids := storage.SectorCids{
|
||||
Unsealed: *sector.CommD,
|
||||
Sealed: *sector.CommR,
|
||||
}
|
||||
c2in, err := m.sealer.SealCommit1(ctx.Context(), m.minerSector(sector.SectorID), sector.Ticket.Value, sector.Seed.Value, sector.pieceInfos(), cids)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed: %w", err)})
|
||||
}
|
||||
|
||||
proof, err := m.sealer.SealCommit2(ctx.Context(), sector.SectorID, c2in)
|
||||
proof, err := m.sealer.SealCommit2(ctx.Context(), m.minerSector(sector.SectorID), c2in)
|
||||
if err != nil {
|
||||
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed: %w", err)})
|
||||
}
|
||||
@ -224,8 +229,8 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
|
||||
}
|
||||
|
||||
return ctx.Send(SectorCommitted{
|
||||
proof: proof,
|
||||
message: smsg.Cid(),
|
||||
Proof: proof,
|
||||
Message: smsg.Cid(),
|
||||
})
|
||||
}
|
||||
|
||||
@ -250,7 +255,7 @@ func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo)
|
||||
func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorInfo) error {
|
||||
// TODO: Maybe wait for some finality
|
||||
|
||||
if err := m.sealer.FinalizeSector(ctx.Context(), sector.SectorID); err != nil {
|
||||
if err := m.sealer.FinalizeSector(ctx.Context(), m.minerSector(sector.SectorID)); err != nil {
|
||||
return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)})
|
||||
}
|
||||
|
||||
|
@ -1,107 +0,0 @@
|
||||
package advmgr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr"
|
||||
)
|
||||
|
||||
type localWorker struct {
|
||||
scfg *sectorbuilder.Config
|
||||
storage *storage
|
||||
}
|
||||
|
||||
type localWorkerPathProvider struct {
|
||||
w *localWorker
|
||||
}
|
||||
|
||||
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, id abi.SectorNumber, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, func(), error) {
|
||||
mid, err := address.IDFromAddress(l.w.scfg.Miner)
|
||||
if err != nil {
|
||||
return sectorbuilder.SectorPaths{}, nil, xerrors.Errorf("get miner ID: %w", err)
|
||||
}
|
||||
|
||||
return l.w.storage.acquireSector(abi.ActorID(mid), id, existing, allocate, sealing)
|
||||
}
|
||||
|
||||
func (l *localWorker) sb() (sectorbuilder.Basic, error) {
|
||||
return sectorbuilder.New(&localWorkerPathProvider{w: l}, l.scfg)
|
||||
}
|
||||
|
||||
func (l *localWorker) AddPiece(ctx context.Context, sn abi.SectorNumber, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, err
|
||||
}
|
||||
|
||||
return sb.AddPiece(ctx, sn, epcs, sz, r)
|
||||
}
|
||||
|
||||
func (l *localWorker) SealPreCommit1(ctx context.Context, sectorNum abi.SectorNumber, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage2.PreCommit1Out, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sb.SealPreCommit1(ctx, sectorNum, ticket, pieces)
|
||||
}
|
||||
|
||||
func (l *localWorker) SealPreCommit2(ctx context.Context, sectorNum abi.SectorNumber, phase1Out storage2.PreCommit1Out) (sealedCID cid.Cid, unsealedCID cid.Cid, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, err
|
||||
}
|
||||
|
||||
return sb.SealPreCommit2(ctx, sectorNum, phase1Out)
|
||||
}
|
||||
|
||||
func (l *localWorker) SealCommit1(ctx context.Context, sectorNum abi.SectorNumber, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, sealedCID cid.Cid, unsealedCID cid.Cid) (output storage2.Commit1Out, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sb.SealCommit1(ctx, sectorNum, ticket, seed, pieces, sealedCID, unsealedCID)
|
||||
}
|
||||
|
||||
func (l *localWorker) SealCommit2(ctx context.Context, sectorNum abi.SectorNumber, phase1Out storage2.Commit1Out) (proof storage2.Proof, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sb.SealCommit2(ctx, sectorNum, phase1Out)
|
||||
}
|
||||
|
||||
func (l *localWorker) FinalizeSector(ctx context.Context, sectorNum abi.SectorNumber) error {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sb.FinalizeSector(ctx, sectorNum)
|
||||
}
|
||||
|
||||
func (l *localWorker) TaskTypes() map[sealmgr.TaskType]struct{} {
|
||||
return map[sealmgr.TaskType]struct{}{
|
||||
sealmgr.TTAddPiece: {},
|
||||
sealmgr.TTPreCommit1: {},
|
||||
sealmgr.TTPreCommit2: {},
|
||||
sealmgr.TTCommit2: {},
|
||||
}
|
||||
}
|
||||
|
||||
func (l *localWorker) Paths() []Path {
|
||||
return l.storage.local()
|
||||
}
|
||||
|
||||
var _ Worker = &localWorker{}
|
@ -1,254 +0,0 @@
|
||||
package advmgr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/sealmgr"
|
||||
)
|
||||
|
||||
type SectorIDCounter interface {
|
||||
Next() (abi.SectorNumber, error)
|
||||
}
|
||||
|
||||
type LocalStorage interface {
|
||||
GetStorage() (config.StorageConfig, error)
|
||||
SetStorage(func(*config.StorageConfig)) error
|
||||
}
|
||||
|
||||
type Path struct {
|
||||
ID string
|
||||
Weight uint64
|
||||
|
||||
LocalPath string
|
||||
|
||||
CanSeal bool
|
||||
CanStore bool
|
||||
}
|
||||
|
||||
type Worker interface {
|
||||
sectorbuilder.Sealer
|
||||
|
||||
TaskTypes() map[sealmgr.TaskType]struct{}
|
||||
Paths() []Path
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
workers []Worker
|
||||
scfg *sectorbuilder.Config
|
||||
sc SectorIDCounter
|
||||
|
||||
storage *storage
|
||||
|
||||
storage2.Prover
|
||||
}
|
||||
|
||||
func New(ls LocalStorage, cfg *sectorbuilder.Config, sc SectorIDCounter) (*Manager, error) {
|
||||
stor := &storage{
|
||||
localStorage: ls,
|
||||
}
|
||||
if err := stor.open(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(cfg.Miner)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting miner id: %w", err)
|
||||
}
|
||||
|
||||
prover, err := sectorbuilder.New(&readonlyProvider{stor: stor, miner: abi.ActorID(mid)}, cfg)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("creating prover instance: %w", err)
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
workers: []Worker{
|
||||
&localWorker{scfg: cfg, storage: stor},
|
||||
},
|
||||
scfg: cfg,
|
||||
sc: sc,
|
||||
|
||||
storage: stor,
|
||||
|
||||
Prover: prover,
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *Manager) AddLocalStorage(path string) error {
|
||||
path, err := homedir.Expand(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expanding local path: %w", err)
|
||||
}
|
||||
|
||||
if err := m.storage.openPath(path); err != nil {
|
||||
return xerrors.Errorf("opening local path: %w", err)
|
||||
}
|
||||
|
||||
if err := m.storage.localStorage.SetStorage(func(sc *config.StorageConfig) {
|
||||
sc.StoragePaths = append(sc.StoragePaths, config.LocalPath{Path: path})
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("get storage config: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) SectorSize() abi.SectorSize {
|
||||
sz, _ := m.scfg.SealProofType.SectorSize()
|
||||
return sz
|
||||
}
|
||||
|
||||
func (m *Manager) NewSector() (abi.SectorNumber, error) {
|
||||
return m.sc.Next()
|
||||
}
|
||||
|
||||
func (m *Manager) ReadPieceFromSealedSector(context.Context, abi.SectorNumber, sectorbuilder.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *Manager) getWorkersByPaths(task sealmgr.TaskType, inPaths []config.StorageMeta) ([]Worker, map[int]config.StorageMeta) {
|
||||
var workers []Worker
|
||||
paths := map[int]config.StorageMeta{}
|
||||
|
||||
for i, worker := range m.workers {
|
||||
if _, ok := worker.TaskTypes()[task]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// check if the worker has access to the path we selected
|
||||
var st *config.StorageMeta
|
||||
for _, p := range worker.Paths() {
|
||||
for _, m := range inPaths {
|
||||
if p.ID == m.ID {
|
||||
if st != nil && st.Weight > p.Weight {
|
||||
continue
|
||||
}
|
||||
|
||||
p := m // copy
|
||||
st = &p
|
||||
}
|
||||
}
|
||||
}
|
||||
if st == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
paths[i] = *st
|
||||
workers = append(workers, worker)
|
||||
}
|
||||
|
||||
return workers, paths
|
||||
}
|
||||
|
||||
func (m *Manager) AddPiece(ctx context.Context, sn abi.SectorNumber, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
// TODO: consider multiple paths vs workers when initially allocating
|
||||
|
||||
var best []config.StorageMeta
|
||||
var err error
|
||||
if len(existingPieces) == 0 { // new
|
||||
best, err = m.storage.findBestAllocStorage(sectorbuilder.FTUnsealed, true)
|
||||
} else { // append to existing
|
||||
best, err = m.storage.findSector(m.minerID(), sn, sectorbuilder.FTUnsealed)
|
||||
}
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("finding sector path: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealmgr.TTAddPiece, best)
|
||||
|
||||
if len(candidateWorkers) == 0 {
|
||||
return abi.PieceInfo{}, xerrors.New("no worker found")
|
||||
}
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return candidateWorkers[0].AddPiece(ctx, sn, existingPieces, sz, r)
|
||||
}
|
||||
|
||||
func (m *Manager) SealPreCommit1(ctx context.Context, sectorNum abi.SectorNumber, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage2.PreCommit1Out, err error) {
|
||||
// TODO: also consider where the unsealed data sits
|
||||
|
||||
best, err := m.storage.findBestAllocStorage(sectorbuilder.FTCache|sectorbuilder.FTSealed, true)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("finding path for sector sealing: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealmgr.TTPreCommit1, best)
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return candidateWorkers[0].SealPreCommit1(ctx, sectorNum, ticket, pieces)
|
||||
}
|
||||
|
||||
func (m *Manager) SealPreCommit2(ctx context.Context, sectorNum abi.SectorNumber, phase1Out storage2.PreCommit1Out) (sealedCID cid.Cid, unsealedCID cid.Cid, err error) {
|
||||
// TODO: allow workers to fetch the sectors
|
||||
|
||||
best, err := m.storage.findSector(m.minerID(), sectorNum, sectorbuilder.FTCache|sectorbuilder.FTSealed)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("finding path for sector sealing: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealmgr.TTPreCommit2, best)
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return candidateWorkers[0].SealPreCommit2(ctx, sectorNum, phase1Out)
|
||||
}
|
||||
|
||||
func (m *Manager) SealCommit1(ctx context.Context, sectorNum abi.SectorNumber, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, sealedCID cid.Cid, unsealedCID cid.Cid) (output storage2.Commit1Out, err error) {
|
||||
best, err := m.storage.findSector(m.minerID(), sectorNum, sectorbuilder.FTCache|sectorbuilder.FTSealed)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("finding path for sector sealing: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealmgr.TTPreCommit2, best)
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return candidateWorkers[0].SealCommit1(ctx, sectorNum, ticket, seed, pieces, sealedCID, unsealedCID)
|
||||
}
|
||||
|
||||
func (m *Manager) SealCommit2(ctx context.Context, sectorNum abi.SectorNumber, phase1Out storage2.Commit1Out) (proof storage2.Proof, err error) {
|
||||
for _, worker := range m.workers {
|
||||
if _, ok := worker.TaskTypes()[sealmgr.TTCommit2]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
return worker.SealCommit2(ctx, sectorNum, phase1Out)
|
||||
}
|
||||
|
||||
return nil, xerrors.New("no worker found")
|
||||
}
|
||||
|
||||
func (m *Manager) FinalizeSector(ctx context.Context, sectorNum abi.SectorNumber) error {
|
||||
best, err := m.storage.findSector(m.minerID(), sectorNum, sectorbuilder.FTCache|sectorbuilder.FTSealed|sectorbuilder.FTUnsealed)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding sealed sector: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealmgr.TTPreCommit2, best) // find last worker with the sector
|
||||
|
||||
// TODO: Move the sector to long-term storage
|
||||
return candidateWorkers[0].FinalizeSector(ctx, sectorNum)
|
||||
}
|
||||
|
||||
func (m *Manager) minerID() abi.ActorID {
|
||||
mid, err := address.IDFromAddress(m.scfg.Miner)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return abi.ActorID(mid)
|
||||
}
|
||||
|
||||
var _ sealmgr.Manager = &Manager{}
|
@ -1,22 +0,0 @@
|
||||
package advmgr
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type readonlyProvider struct {
|
||||
miner abi.ActorID
|
||||
stor *storage
|
||||
}
|
||||
|
||||
func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorNumber, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, func(), error) {
|
||||
if allocate != 0 {
|
||||
return sectorbuilder.SectorPaths{}, nil, xerrors.New("read-only storage")
|
||||
}
|
||||
|
||||
return l.stor.acquireSector(l.miner, id, existing, allocate, sealing)
|
||||
}
|
@ -1,277 +0,0 @@
|
||||
package advmgr
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
)
|
||||
|
||||
const metaFile = "sectorstore.json"
|
||||
|
||||
var pathTypes = []sectorbuilder.SectorFileType{sectorbuilder.FTUnsealed, sectorbuilder.FTSealed, sectorbuilder.FTCache}
|
||||
|
||||
type storage struct {
|
||||
localLk sync.RWMutex
|
||||
localStorage LocalStorage
|
||||
|
||||
paths []*path
|
||||
}
|
||||
|
||||
type path struct {
|
||||
lk sync.Mutex
|
||||
|
||||
meta config.StorageMeta
|
||||
local string
|
||||
|
||||
sectors map[abi.SectorID]sectorbuilder.SectorFileType
|
||||
}
|
||||
|
||||
func (st *storage) openPath(p string) error {
|
||||
mb, err := ioutil.ReadFile(filepath.Join(p, metaFile))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading storage metadata for %s: %w", p, err)
|
||||
}
|
||||
|
||||
var meta config.StorageMeta
|
||||
if err := json.Unmarshal(mb, &meta); err != nil {
|
||||
return xerrors.Errorf("unmarshalling storage metadata for %s: %w", p, err)
|
||||
}
|
||||
|
||||
// TODO: Check existing / dedupe
|
||||
|
||||
out := &path{
|
||||
meta: meta,
|
||||
local: p,
|
||||
sectors: map[abi.SectorID]sectorbuilder.SectorFileType{},
|
||||
}
|
||||
|
||||
for _, t := range pathTypes {
|
||||
ents, err := ioutil.ReadDir(filepath.Join(p, t.String()))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(filepath.Join(p, t.String()), 0755); err != nil {
|
||||
return xerrors.Errorf("openPath mkdir '%s': %w", filepath.Join(p, t.String()), err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
return xerrors.Errorf("listing %s: %w", filepath.Join(p, t.String()), err)
|
||||
}
|
||||
|
||||
for _, ent := range ents {
|
||||
sid, err := parseSectorID(ent.Name())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err)
|
||||
}
|
||||
|
||||
out.sectors[sid] |= t
|
||||
}
|
||||
}
|
||||
|
||||
st.paths = append(st.paths, out)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *storage) open() error {
|
||||
st.localLk.Lock()
|
||||
defer st.localLk.Unlock()
|
||||
|
||||
cfg, err := st.localStorage.GetStorage()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting local storage config: %w", err)
|
||||
}
|
||||
|
||||
for _, path := range cfg.StoragePaths {
|
||||
err := st.openPath(path.Path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening path %s: %w", path.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *storage) acquireSector(mid abi.ActorID, id abi.SectorNumber, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, func(), error) {
|
||||
if existing|allocate != existing^allocate {
|
||||
return sectorbuilder.SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector")
|
||||
}
|
||||
|
||||
st.localLk.RLock()
|
||||
|
||||
var out sectorbuilder.SectorPaths
|
||||
|
||||
for _, fileType := range pathTypes {
|
||||
if fileType&existing == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, p := range st.paths {
|
||||
p.lk.Lock()
|
||||
s, ok := p.sectors[abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: id,
|
||||
}]
|
||||
p.lk.Unlock()
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if s&fileType == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
spath := filepath.Join(p.local, fileType.String(), fmt.Sprintf("s-t0%d-%d", mid, id))
|
||||
|
||||
switch fileType {
|
||||
case sectorbuilder.FTUnsealed:
|
||||
out.Unsealed = spath
|
||||
case sectorbuilder.FTSealed:
|
||||
out.Sealed = spath
|
||||
case sectorbuilder.FTCache:
|
||||
out.Cache = spath
|
||||
}
|
||||
|
||||
existing ^= fileType
|
||||
}
|
||||
}
|
||||
|
||||
for _, fileType := range pathTypes {
|
||||
if fileType&allocate == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var best string
|
||||
|
||||
for _, p := range st.paths {
|
||||
if sealing && !p.meta.CanSeal {
|
||||
continue
|
||||
}
|
||||
if !sealing && !p.meta.CanStore {
|
||||
continue
|
||||
}
|
||||
|
||||
p.lk.Lock()
|
||||
p.sectors[abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: id,
|
||||
}] |= fileType
|
||||
p.lk.Unlock()
|
||||
|
||||
// TODO: Check free space
|
||||
// TODO: Calc weights
|
||||
|
||||
best = filepath.Join(p.local, fileType.String(), fmt.Sprintf("s-t0%d-%d", mid, id))
|
||||
break // todo: the first path won't always be the best
|
||||
}
|
||||
|
||||
if best == "" {
|
||||
st.localLk.RUnlock()
|
||||
return sectorbuilder.SectorPaths{}, nil, xerrors.Errorf("couldn't find a suitable path for a sector")
|
||||
}
|
||||
|
||||
switch fileType {
|
||||
case sectorbuilder.FTUnsealed:
|
||||
out.Unsealed = best
|
||||
case sectorbuilder.FTSealed:
|
||||
out.Sealed = best
|
||||
case sectorbuilder.FTCache:
|
||||
out.Cache = best
|
||||
}
|
||||
|
||||
allocate ^= fileType
|
||||
}
|
||||
|
||||
return out, st.localLk.RUnlock, nil
|
||||
}
|
||||
|
||||
func (st *storage) findBestAllocStorage(allocate sectorbuilder.SectorFileType, sealing bool) ([]config.StorageMeta, error) {
|
||||
var out []config.StorageMeta
|
||||
|
||||
for _, p := range st.paths {
|
||||
if sealing && !p.meta.CanSeal {
|
||||
continue
|
||||
}
|
||||
if !sealing && !p.meta.CanStore {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: filter out of space
|
||||
|
||||
out = append(out, p.meta)
|
||||
}
|
||||
|
||||
if len(out) == 0 {
|
||||
return nil, xerrors.New("no good path found")
|
||||
}
|
||||
|
||||
// todo: sort by some kind of preference
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (st *storage) findSector(mid abi.ActorID, sn abi.SectorNumber, typ sectorbuilder.SectorFileType) ([]config.StorageMeta, error) {
|
||||
var out []config.StorageMeta
|
||||
for _, p := range st.paths {
|
||||
p.lk.Lock()
|
||||
t := p.sectors[abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: sn,
|
||||
}]
|
||||
if t|typ == 0 {
|
||||
continue
|
||||
}
|
||||
p.lk.Unlock()
|
||||
out = append(out, p.meta)
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return nil, xerrors.Errorf("sector %s/s-t0%d-%d not found", typ, mid, sn)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (st *storage) local() []Path {
|
||||
var out []Path
|
||||
for _, p := range st.paths {
|
||||
if p.local == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
out = append(out, Path{
|
||||
ID: p.meta.ID,
|
||||
Weight: p.meta.Weight,
|
||||
LocalPath: p.local,
|
||||
CanSeal: p.meta.CanSeal,
|
||||
CanStore: p.meta.CanStore,
|
||||
})
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func parseSectorID(baseName string) (abi.SectorID, error) {
|
||||
var n abi.SectorNumber
|
||||
var mid abi.ActorID
|
||||
read, err := fmt.Sscanf(baseName, "s-t0%d-%d", &mid, &n)
|
||||
if err != nil {
|
||||
return abi.SectorID{}, xerrors.Errorf(": %w", err)
|
||||
}
|
||||
|
||||
if read != 2 {
|
||||
return abi.SectorID{}, xerrors.Errorf("parseSectorID expected to scan 2 values, got %d", read)
|
||||
}
|
||||
|
||||
return abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: n,
|
||||
}, nil
|
||||
}
|
@ -1,121 +0,0 @@
|
||||
package sealmgr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/storedcounter"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
|
||||
type LocalWorker struct {
|
||||
sectorbuilder.Basic
|
||||
}
|
||||
|
||||
var _ Worker = &LocalWorker{}
|
||||
|
||||
// Simple implements a very basic storage manager which has one local worker,
|
||||
// running one thing locally
|
||||
type Simple struct {
|
||||
sc *storedcounter.StoredCounter
|
||||
maddr address.Address
|
||||
|
||||
rateLimiter sync.Mutex
|
||||
worker Worker
|
||||
}
|
||||
|
||||
type sszgetter interface {
|
||||
SectorSize() abi.SectorSize
|
||||
}
|
||||
|
||||
func (s *Simple) SectorSize() abi.SectorSize {
|
||||
return s.worker.(sszgetter).SectorSize()
|
||||
}
|
||||
|
||||
func NewSimpleManager(sc *storedcounter.StoredCounter, maddr address.Address, sb sectorbuilder.Basic) (*Simple, error) {
|
||||
w := &LocalWorker{
|
||||
sb,
|
||||
}
|
||||
|
||||
return &Simple{
|
||||
sc: sc,
|
||||
maddr: maddr,
|
||||
worker: w,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Simple) NewSector() (abi.SectorNumber, error) {
|
||||
n, err := s.sc.Next()
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("acquire sector number: %w", err)
|
||||
}
|
||||
|
||||
return abi.SectorNumber(n), nil
|
||||
}
|
||||
|
||||
func (s *Simple) AddPiece(ctx context.Context, sectorNum abi.SectorNumber, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r storage.Data) (abi.PieceInfo, error) {
|
||||
s.rateLimiter.Lock()
|
||||
defer s.rateLimiter.Unlock()
|
||||
|
||||
return s.worker.AddPiece(ctx, sectorNum, existingPieces, sz, r)
|
||||
}
|
||||
|
||||
func (s *Simple) SealPreCommit1(ctx context.Context, sectorNum abi.SectorNumber, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
s.rateLimiter.Lock()
|
||||
defer s.rateLimiter.Unlock()
|
||||
|
||||
return s.worker.SealPreCommit1(ctx, sectorNum, ticket, pieces)
|
||||
}
|
||||
|
||||
func (s *Simple) SealPreCommit2(ctx context.Context, sectorNum abi.SectorNumber, phase1Out storage.PreCommit1Out) (sealedCID cid.Cid, unsealedCID cid.Cid, err error) {
|
||||
s.rateLimiter.Lock()
|
||||
defer s.rateLimiter.Unlock()
|
||||
|
||||
return s.worker.SealPreCommit2(ctx, sectorNum, phase1Out)
|
||||
}
|
||||
|
||||
func (s *Simple) SealCommit1(ctx context.Context, sectorNum abi.SectorNumber, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, sealedCID cid.Cid, unsealedCID cid.Cid) (output storage.Commit1Out, err error) {
|
||||
s.rateLimiter.Lock()
|
||||
defer s.rateLimiter.Unlock()
|
||||
|
||||
return s.worker.SealCommit1(ctx, sectorNum, ticket, seed, pieces, sealedCID, unsealedCID)
|
||||
}
|
||||
|
||||
func (s *Simple) SealCommit2(ctx context.Context, sectorNum abi.SectorNumber, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
|
||||
s.rateLimiter.Lock()
|
||||
defer s.rateLimiter.Unlock()
|
||||
|
||||
return s.worker.SealCommit2(ctx, sectorNum, phase1Out)
|
||||
}
|
||||
|
||||
func (s *Simple) FinalizeSector(ctx context.Context, sectorNum abi.SectorNumber) error {
|
||||
s.rateLimiter.Lock()
|
||||
defer s.rateLimiter.Unlock()
|
||||
|
||||
return s.worker.FinalizeSector(ctx, sectorNum)
|
||||
}
|
||||
|
||||
func (s *Simple) GenerateEPostCandidates(sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, error) {
|
||||
return s.worker.GenerateEPostCandidates(sectorInfo, challengeSeed, faults)
|
||||
}
|
||||
|
||||
func (s *Simple) GenerateFallbackPoSt(sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, []abi.PoStProof, error) {
|
||||
return s.worker.GenerateFallbackPoSt(sectorInfo, challengeSeed, faults)
|
||||
}
|
||||
|
||||
func (s *Simple) ComputeElectionPoSt(sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) {
|
||||
return s.worker.ComputeElectionPoSt(sectorInfo, challengeSeed, winners)
|
||||
}
|
||||
|
||||
func (s *Simple) ReadPieceFromSealedSector(context.Context, abi.SectorNumber, sectorbuilder.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) {
|
||||
panic("todo")
|
||||
}
|
||||
|
||||
var _ Manager = &Simple{}
|
@ -1,10 +0,0 @@
|
||||
package sealmgr
|
||||
|
||||
type TaskType string
|
||||
|
||||
const (
|
||||
TTAddPiece TaskType = "seal/v0/addpiece"
|
||||
TTPreCommit1 TaskType = "seal/v0/precommit/1"
|
||||
TTPreCommit2 TaskType = "seal/v0/precommit/2" // Commit1 is called here too
|
||||
TTCommit2 TaskType = "seal/v0/commit/2"
|
||||
)
|
@ -1,34 +0,0 @@
|
||||
package sealmgr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
|
||||
type Worker interface {
|
||||
sectorbuilder.Sealer
|
||||
storage.Prover
|
||||
}
|
||||
|
||||
type Manager interface {
|
||||
SectorSize() abi.SectorSize
|
||||
|
||||
// NewSector allocates staging area for data
|
||||
// Storage manager forwards proof-related calls
|
||||
NewSector() (abi.SectorNumber, error)
|
||||
|
||||
// TODO: Can[Pre]Commit[1,2]
|
||||
// TODO: Scrub() []Faults
|
||||
|
||||
// TODO: Separate iface
|
||||
ReadPieceFromSealedSector(context.Context, abi.SectorNumber, sectorbuilder.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error)
|
||||
|
||||
sectorbuilder.Sealer
|
||||
storage.Prover
|
||||
}
|
435
storage/sectorstorage/manager.go
Normal file
435
storage/sectorstorage/manager.go
Normal file
@ -0,0 +1,435 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
var log = logging.Logger("advmgr")
|
||||
|
||||
var ErrNoWorkers = errors.New("no suitable workers found")
|
||||
|
||||
type URLs []string
|
||||
|
||||
type Worker interface {
|
||||
sectorbuilder.Sealer
|
||||
|
||||
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
|
||||
|
||||
// Returns paths accessible to the worker
|
||||
Paths(context.Context) ([]stores.StoragePath, error)
|
||||
|
||||
Info(context.Context) (api.WorkerInfo, error)
|
||||
|
||||
Close() error
|
||||
}
|
||||
|
||||
type SectorManager interface {
|
||||
SectorSize() abi.SectorSize
|
||||
|
||||
ReadPieceFromSealedSector(context.Context, abi.SectorID, sectorbuilder.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error)
|
||||
|
||||
sectorbuilder.Sealer
|
||||
storage.Prover
|
||||
}
|
||||
|
||||
type WorkerID uint64
|
||||
|
||||
type Manager struct {
|
||||
scfg *sectorbuilder.Config
|
||||
|
||||
ls stores.LocalStorage
|
||||
storage *stores.Remote
|
||||
localStore *stores.Local
|
||||
remoteHnd *stores.FetchHandler
|
||||
index stores.SectorIndex
|
||||
|
||||
storage.Prover
|
||||
|
||||
workersLk sync.Mutex
|
||||
nextWorker WorkerID
|
||||
workers map[WorkerID]*workerHandle
|
||||
|
||||
newWorkers chan *workerHandle
|
||||
schedule chan *workerRequest
|
||||
workerFree chan WorkerID
|
||||
closing chan struct{}
|
||||
|
||||
schedQueue *list.List // List[*workerRequest]
|
||||
}
|
||||
|
||||
func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *sectorbuilder.Config, sc config.Storage, urls URLs, ca api.Common) (*Manager, error) {
|
||||
lstor, err := stores.NewLocal(ctx, ls, si, urls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prover, err := sectorbuilder.New(&readonlyProvider{stor: lstor}, cfg)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("creating prover instance: %w", err)
|
||||
}
|
||||
|
||||
token, err := ca.AuthNew(ctx, []api.Permission{"admin"})
|
||||
headers := http.Header{}
|
||||
headers.Add("Authorization", "Bearer "+string(token))
|
||||
stor := stores.NewRemote(lstor, si, headers)
|
||||
|
||||
m := &Manager{
|
||||
scfg: cfg,
|
||||
|
||||
ls: ls,
|
||||
storage: stor,
|
||||
localStore: lstor,
|
||||
remoteHnd: &stores.FetchHandler{Local: lstor},
|
||||
index: si,
|
||||
|
||||
nextWorker: 0,
|
||||
workers: map[WorkerID]*workerHandle{},
|
||||
|
||||
newWorkers: make(chan *workerHandle),
|
||||
schedule: make(chan *workerRequest),
|
||||
workerFree: make(chan WorkerID),
|
||||
closing: make(chan struct{}),
|
||||
|
||||
schedQueue: list.New(),
|
||||
|
||||
Prover: prover,
|
||||
}
|
||||
|
||||
go m.runSched()
|
||||
|
||||
localTasks := []sealtasks.TaskType{
|
||||
sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize,
|
||||
}
|
||||
if sc.AllowPreCommit1 { localTasks = append(localTasks, sealtasks.TTPreCommit1)}
|
||||
if sc.AllowPreCommit2 { localTasks = append(localTasks, sealtasks.TTPreCommit2)}
|
||||
if sc.AllowCommit { localTasks = append(localTasks, sealtasks.TTCommit2)}
|
||||
|
||||
err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{
|
||||
SealProof: cfg.SealProofType,
|
||||
TaskTypes: localTasks,
|
||||
}, stor, lstor, si))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("adding local worker: %w", err)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *Manager) AddLocalStorage(ctx context.Context, path string) error {
|
||||
path, err := homedir.Expand(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expanding local path: %w", err)
|
||||
}
|
||||
|
||||
if err := m.localStore.OpenPath(ctx, path); err != nil {
|
||||
return xerrors.Errorf("opening local path: %w", err)
|
||||
}
|
||||
|
||||
if err := m.ls.SetStorage(func(sc *config.StorageConfig) {
|
||||
sc.StoragePaths = append(sc.StoragePaths, config.LocalPath{Path: path})
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("get storage config: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) AddWorker(ctx context.Context, w Worker) error {
|
||||
info, err := w.Info(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting worker info: %w", err)
|
||||
}
|
||||
|
||||
m.newWorkers <- &workerHandle{
|
||||
w: w,
|
||||
info: info,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
m.remoteHnd.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (m *Manager) SectorSize() abi.SectorSize {
|
||||
sz, _ := m.scfg.SealProofType.SectorSize()
|
||||
return sz
|
||||
}
|
||||
|
||||
func (m *Manager) ReadPieceFromSealedSector(context.Context, abi.SectorID, sectorbuilder.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *Manager) getWorkersByPaths(task sealtasks.TaskType, inPaths []stores.StorageInfo) ([]WorkerID, map[WorkerID]stores.StorageInfo) {
|
||||
m.workersLk.Lock()
|
||||
defer m.workersLk.Unlock()
|
||||
|
||||
var workers []WorkerID
|
||||
paths := map[WorkerID]stores.StorageInfo{}
|
||||
|
||||
for i, worker := range m.workers {
|
||||
tt, err := worker.w.TaskTypes(context.TODO())
|
||||
if err != nil {
|
||||
log.Errorf("error getting supported worker task types: %+v", err)
|
||||
continue
|
||||
}
|
||||
if _, ok := tt[task]; !ok {
|
||||
log.Debugf("dropping worker %d; task %s not supported (supports %v)", i, task, tt)
|
||||
continue
|
||||
}
|
||||
|
||||
phs, err := worker.w.Paths(context.TODO())
|
||||
if err != nil {
|
||||
log.Errorf("error getting worker paths: %+v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// check if the worker has access to the path we selected
|
||||
var st *stores.StorageInfo
|
||||
for _, p := range phs {
|
||||
for _, meta := range inPaths {
|
||||
if p.ID == meta.ID {
|
||||
if st != nil && st.Weight > p.Weight {
|
||||
continue
|
||||
}
|
||||
|
||||
p := meta // copy
|
||||
st = &p
|
||||
}
|
||||
}
|
||||
}
|
||||
if st == nil {
|
||||
log.Debugf("skipping worker %d; doesn't have any of %v", i, inPaths)
|
||||
log.Debugf("skipping worker %d; only has %v", i, phs)
|
||||
continue
|
||||
}
|
||||
|
||||
paths[i] = *st
|
||||
workers = append(workers, i)
|
||||
}
|
||||
|
||||
return workers, paths
|
||||
}
|
||||
|
||||
func (m *Manager) getWorker(ctx context.Context, taskType sealtasks.TaskType, accept []WorkerID) (Worker, func(), error) {
|
||||
ret := make(chan workerResponse)
|
||||
|
||||
select {
|
||||
case m.schedule <- &workerRequest{
|
||||
taskType: taskType,
|
||||
accept: accept,
|
||||
|
||||
cancel: ctx.Done(),
|
||||
ret: ret,
|
||||
}:
|
||||
case <-m.closing:
|
||||
return nil, nil, xerrors.New("closing")
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
|
||||
select {
|
||||
case resp := <-ret:
|
||||
return resp.worker, resp.done, resp.err
|
||||
case <-m.closing:
|
||||
return nil, nil, xerrors.New("closing")
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
log.Warnf("stub NewSector")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
// TODO: consider multiple paths vs workers when initially allocating
|
||||
|
||||
var best []stores.StorageInfo
|
||||
var err error
|
||||
if len(existingPieces) == 0 { // new
|
||||
best, err = m.index.StorageBestAlloc(ctx, sectorbuilder.FTUnsealed, true)
|
||||
} else { // append to existing
|
||||
best, err = m.index.StorageFindSector(ctx, sector, sectorbuilder.FTUnsealed, false)
|
||||
}
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("finding sector path: %w", err)
|
||||
}
|
||||
|
||||
log.Debugf("find workers for %v", best)
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTAddPiece, best)
|
||||
|
||||
if len(candidateWorkers) == 0 {
|
||||
return abi.PieceInfo{}, ErrNoWorkers
|
||||
}
|
||||
|
||||
worker, done, err := m.getWorker(ctx, sealtasks.TTAddPiece, candidateWorkers)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("scheduling worker: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return worker.AddPiece(ctx, sector, existingPieces, sz, r)
|
||||
}
|
||||
|
||||
func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
// TODO: also consider where the unsealed data sits
|
||||
|
||||
best, err := m.index.StorageBestAlloc(ctx, sectorbuilder.FTCache|sectorbuilder.FTSealed, true)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("finding path for sector sealing: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit1, best)
|
||||
if len(candidateWorkers) == 0 {
|
||||
return nil, ErrNoWorkers
|
||||
}
|
||||
|
||||
worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit1, candidateWorkers)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("scheduling worker: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return worker.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||
}
|
||||
|
||||
func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) {
|
||||
// TODO: allow workers to fetch the sectors
|
||||
|
||||
best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed, true)
|
||||
if err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("finding path for sector sealing: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit2, best)
|
||||
if len(candidateWorkers) == 0 {
|
||||
return storage.SectorCids{}, ErrNoWorkers
|
||||
}
|
||||
|
||||
worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit2, candidateWorkers)
|
||||
if err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("scheduling worker: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return worker.SealPreCommit2(ctx, sector, phase1Out)
|
||||
}
|
||||
|
||||
func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) {
|
||||
best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed, true)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("finding path for sector sealing: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTCommit1, best)
|
||||
if len(candidateWorkers) == 0 {
|
||||
return nil, ErrNoWorkers
|
||||
}
|
||||
|
||||
// TODO: Try very hard to execute on worker with access to the sectors
|
||||
worker, done, err := m.getWorker(ctx, sealtasks.TTCommit1, candidateWorkers)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("scheduling worker: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||
}
|
||||
|
||||
func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
|
||||
var candidateWorkers []WorkerID
|
||||
|
||||
m.workersLk.Lock()
|
||||
for id, worker := range m.workers {
|
||||
tt, err := worker.w.TaskTypes(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("error getting supported worker task types: %+v", err)
|
||||
continue
|
||||
}
|
||||
if _, ok := tt[sealtasks.TTCommit2]; !ok {
|
||||
continue
|
||||
}
|
||||
candidateWorkers = append(candidateWorkers, id)
|
||||
}
|
||||
m.workersLk.Unlock()
|
||||
if len(candidateWorkers) == 0 {
|
||||
return nil, ErrNoWorkers
|
||||
}
|
||||
|
||||
worker, done, err := m.getWorker(ctx, sealtasks.TTCommit2, candidateWorkers)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("scheduling worker: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
return worker.SealCommit2(ctx, sector, phase1Out)
|
||||
}
|
||||
|
||||
func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error {
|
||||
best, err := m.index.StorageFindSector(ctx, sector, sectorbuilder.FTCache|sectorbuilder.FTSealed|sectorbuilder.FTUnsealed, true)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding sealed sector: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTFinalize, best)
|
||||
if len(candidateWorkers) == 0 {
|
||||
return ErrNoWorkers
|
||||
}
|
||||
|
||||
// TODO: Remove sector from sealing stores
|
||||
// TODO: Move the sector to long-term storage
|
||||
return m.workers[candidateWorkers[0]].w.FinalizeSector(ctx, sector)
|
||||
}
|
||||
|
||||
func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
|
||||
l, err := m.localStore.Local(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := map[stores.ID]string{}
|
||||
for _, st := range l {
|
||||
out[st.ID] = st.LocalPath
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (m *Manager) FsStat(ctx context.Context, id stores.ID) (stores.FsStat, error) {
|
||||
return m.storage.FsStat(ctx, id)
|
||||
}
|
||||
|
||||
func (m *Manager) Close() error {
|
||||
close(m.closing)
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ SectorManager = &Manager{}
|
@ -1,9 +1,10 @@
|
||||
package sbmock
|
||||
package mock
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
@ -23,8 +24,8 @@ import (
|
||||
|
||||
var log = logging.Logger("sbmock")
|
||||
|
||||
type SBMock struct {
|
||||
sectors map[abi.SectorNumber]*sectorState
|
||||
type SectorMgr struct {
|
||||
sectors map[abi.SectorID]*sectorState
|
||||
sectorSize abi.SectorSize
|
||||
nextSectorID abi.SectorNumber
|
||||
rateLimit chan struct{}
|
||||
@ -35,14 +36,14 @@ type SBMock struct {
|
||||
|
||||
type mockVerif struct{}
|
||||
|
||||
func NewMockSectorBuilder(threads int, ssize abi.SectorSize) *SBMock {
|
||||
func NewMockSectorMgr(threads int, ssize abi.SectorSize) *SectorMgr {
|
||||
rt, _, err := api.ProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &SBMock{
|
||||
sectors: make(map[abi.SectorNumber]*sectorState),
|
||||
return &SectorMgr{
|
||||
sectors: make(map[abi.SectorID]*sectorState),
|
||||
sectorSize: ssize,
|
||||
nextSectorID: 5,
|
||||
rateLimit: make(chan struct{}, threads),
|
||||
@ -65,7 +66,7 @@ type sectorState struct {
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
func (sb *SBMock) RateLimit() func() {
|
||||
func (sb *SectorMgr) RateLimit() func() {
|
||||
sb.rateLimit <- struct{}{}
|
||||
|
||||
// TODO: probably want to copy over rate limit code
|
||||
@ -74,7 +75,11 @@ func (sb *SBMock) RateLimit() func() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sb *SBMock) AddPiece(ctx context.Context, sectorId abi.SectorNumber, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
func (sb *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) AddPiece(ctx context.Context, sectorId abi.SectorID, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
log.Warn("Add piece: ", sectorId, size, sb.proofType)
|
||||
sb.lk.Lock()
|
||||
ss, ok := sb.sectors[sectorId]
|
||||
@ -102,11 +107,11 @@ func (sb *SBMock) AddPiece(ctx context.Context, sectorId abi.SectorNumber, exist
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *SBMock) SectorSize() abi.SectorSize {
|
||||
func (sb *SectorMgr) SectorSize() abi.SectorSize {
|
||||
return sb.sectorSize
|
||||
}
|
||||
|
||||
func (sb *SBMock) AcquireSectorNumber() (abi.SectorNumber, error) {
|
||||
func (sb *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) {
|
||||
sb.lk.Lock()
|
||||
defer sb.lk.Unlock()
|
||||
id := sb.nextSectorID
|
||||
@ -114,11 +119,7 @@ func (sb *SBMock) AcquireSectorNumber() (abi.SectorNumber, error) {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (sb *SBMock) GenerateFallbackPoSt([]abi.SectorInfo, abi.PoStRandomness, []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, []abi.PoStProof, error) {
|
||||
panic("NYI")
|
||||
}
|
||||
|
||||
func (sb *SBMock) SealPreCommit1(ctx context.Context, sid abi.SectorNumber, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
func (sb *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
sb.lk.Lock()
|
||||
ss, ok := sb.sectors[sid]
|
||||
sb.lk.Unlock()
|
||||
@ -173,7 +174,7 @@ func (sb *SBMock) SealPreCommit1(ctx context.Context, sid abi.SectorNumber, tick
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (sb *SBMock) SealPreCommit2(ctx context.Context, sid abi.SectorNumber, phase1Out storage.PreCommit1Out) (sealedCID cid.Cid, unsealedCID cid.Cid, err error) {
|
||||
func (sb *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) {
|
||||
db := []byte(string(phase1Out))
|
||||
db[0] ^= 'd'
|
||||
|
||||
@ -186,10 +187,13 @@ func (sb *SBMock) SealPreCommit2(ctx context.Context, sid abi.SectorNumber, phas
|
||||
|
||||
commR := commcid.DataCommitmentV1ToCID(commr)
|
||||
|
||||
return commR, d, nil
|
||||
return storage.SectorCids{
|
||||
Unsealed: d,
|
||||
Sealed: commR,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *SBMock) SealCommit1(ctx context.Context, sid abi.SectorNumber, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, sealedCid cid.Cid, unsealed cid.Cid) (output storage.Commit1Out, err error) {
|
||||
func (sb *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) {
|
||||
sb.lk.Lock()
|
||||
ss, ok := sb.sectors[sid]
|
||||
sb.lk.Unlock()
|
||||
@ -211,16 +215,16 @@ func (sb *SBMock) SealCommit1(ctx context.Context, sid abi.SectorNumber, ticket
|
||||
|
||||
var out [32]byte
|
||||
for i := range out {
|
||||
out[i] = unsealed.Bytes()[i] + sealedCid.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid&0xff)
|
||||
out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.Number&0xff)
|
||||
}
|
||||
|
||||
return out[:], nil
|
||||
}
|
||||
|
||||
func (sb *SBMock) SealCommit2(ctx context.Context, sectorNum abi.SectorNumber, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
|
||||
func (sb *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
|
||||
var out [32]byte
|
||||
for i := range out {
|
||||
out[i] = phase1Out[i] ^ byte(sectorNum&0xff)
|
||||
out[i] = phase1Out[i] ^ byte(sid.Number&0xff)
|
||||
}
|
||||
|
||||
return out[:], nil
|
||||
@ -228,7 +232,7 @@ func (sb *SBMock) SealCommit2(ctx context.Context, sectorNum abi.SectorNumber, p
|
||||
|
||||
// Test Instrumentation Methods
|
||||
|
||||
func (sb *SBMock) FailSector(sid abi.SectorNumber) error {
|
||||
func (sb *SectorMgr) FailSector(sid abi.SectorID) error {
|
||||
sb.lk.Lock()
|
||||
defer sb.lk.Unlock()
|
||||
ss, ok := sb.sectors[sid]
|
||||
@ -256,11 +260,15 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) {
|
||||
}
|
||||
}
|
||||
|
||||
func (sb *SBMock) ComputeElectionPoSt(sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) {
|
||||
func (sb *SectorMgr) GenerateFallbackPoSt(context.Context, abi.ActorID, []abi.SectorInfo, abi.PoStRandomness, []abi.SectorNumber) (storage.FallbackPostOut, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (sb *SBMock) GenerateEPostCandidates(sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, error) {
|
||||
func (sb *SectorMgr) ComputeElectionPoSt(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) GenerateEPostCandidates(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, error) {
|
||||
if len(faults) > 0 {
|
||||
panic("todo")
|
||||
}
|
||||
@ -280,7 +288,7 @@ func (sb *SBMock) GenerateEPostCandidates(sectorInfo []abi.SectorInfo, challenge
|
||||
Candidate: abi.PoStCandidate{
|
||||
SectorID: abi.SectorID{
|
||||
Number: abi.SectorNumber((int(start) + i) % len(sectorInfo)),
|
||||
Miner: 1125125, //TODO
|
||||
Miner: mid,
|
||||
},
|
||||
PartialTicket: abi.PartialTicket(challengeSeed),
|
||||
},
|
||||
@ -290,32 +298,37 @@ func (sb *SBMock) GenerateEPostCandidates(sectorInfo []abi.SectorInfo, challenge
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (sb *SBMock) ReadPieceFromSealedSector(ctx context.Context, sectorID abi.SectorNumber, offset sectorbuilder.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, commD cid.Cid) (io.ReadCloser, error) {
|
||||
func (sb *SectorMgr) ReadPieceFromSealedSector(ctx context.Context, sectorID abi.SectorID, offset sectorbuilder.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, commD cid.Cid) (io.ReadCloser, error) {
|
||||
if len(sb.sectors[sectorID].pieces) > 1 {
|
||||
panic("implme")
|
||||
}
|
||||
return ioutil.NopCloser(io.LimitReader(bytes.NewReader(sb.sectors[sectorID].pieces[0].Bytes()[offset:]), int64(size))), nil
|
||||
}
|
||||
|
||||
func (sb *SBMock) StageFakeData() (abi.SectorNumber, []abi.PieceInfo, error) {
|
||||
func (sb *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceInfo, error) {
|
||||
usize := abi.PaddedPieceSize(sb.sectorSize).Unpadded()
|
||||
sid, err := sb.AcquireSectorNumber()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
return abi.SectorID{}, nil, err
|
||||
}
|
||||
|
||||
buf := make([]byte, usize)
|
||||
rand.Read(buf)
|
||||
|
||||
pi, err := sb.AddPiece(context.TODO(), sid, nil, usize, bytes.NewReader(buf))
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
id := abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: sid,
|
||||
}
|
||||
|
||||
return sid, []abi.PieceInfo{pi}, nil
|
||||
pi, err := sb.AddPiece(context.TODO(), id, nil, usize, bytes.NewReader(buf))
|
||||
if err != nil {
|
||||
return abi.SectorID{}, nil, err
|
||||
}
|
||||
|
||||
return id, []abi.PieceInfo{pi}, nil
|
||||
}
|
||||
|
||||
func (sb *SBMock) FinalizeSector(context.Context, abi.SectorNumber) error {
|
||||
func (sb *SectorMgr) FinalizeSector(context.Context, abi.SectorID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -355,4 +368,4 @@ func (m mockVerif) GenerateDataCommitment(ssize abi.PaddedPieceSize, pieces []ab
|
||||
var MockVerifier = mockVerif{}
|
||||
|
||||
var _ sectorbuilder.Verifier = MockVerifier
|
||||
var _ sectorbuilder.Basic = &SBMock{}
|
||||
var _ sectorstorage.SectorManager = &SectorMgr{}
|
@ -1,4 +1,4 @@
|
||||
package sbmock
|
||||
package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -9,9 +9,9 @@ import (
|
||||
)
|
||||
|
||||
func TestOpFinish(t *testing.T) {
|
||||
sb := NewMockSectorBuilder(1, 2048)
|
||||
sb := NewMockSectorMgr(1, 2048)
|
||||
|
||||
sid, pieces, err := sb.StageFakeData()
|
||||
sid, pieces, err := sb.StageFakeData(123)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package sbmock
|
||||
package mock
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
@ -1,4 +1,4 @@
|
||||
package sbmock
|
||||
package mock
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
135
storage/sectorstorage/resources.go
Normal file
135
storage/sectorstorage/resources.go
Normal file
@ -0,0 +1,135 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
)
|
||||
|
||||
var FSOverheadSeal = map[sectorbuilder.SectorFileType]int{ // 10x overheads
|
||||
sectorbuilder.FTUnsealed: 10,
|
||||
sectorbuilder.FTSealed: 10,
|
||||
sectorbuilder.FTCache: 70, // TODO: confirm for 32G
|
||||
}
|
||||
|
||||
var FsOverheadFinalized = map[sectorbuilder.SectorFileType]int{
|
||||
sectorbuilder.FTUnsealed: 10,
|
||||
sectorbuilder.FTSealed: 10,
|
||||
sectorbuilder.FTCache: 2,
|
||||
}
|
||||
|
||||
type Resources struct {
|
||||
MinMemory uint64 // What Must be in RAM for decent perf
|
||||
MaxMemory uint64 // Memory required (swap + ram)
|
||||
|
||||
MultiThread bool
|
||||
CanGPU bool
|
||||
|
||||
BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads)
|
||||
}
|
||||
|
||||
const MaxCachingOverhead = 32 << 30
|
||||
|
||||
var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{
|
||||
sealtasks.TTAddPiece: {
|
||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ // This is probably a bit conservative
|
||||
MaxMemory: 32 << 30,
|
||||
MinMemory: 32 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
abi.RegisteredProof_StackedDRG512MiBSeal: Resources{
|
||||
MaxMemory: 1 << 30,
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
},
|
||||
sealtasks.TTPreCommit1: {
|
||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
||||
MaxMemory: 64 << 30,
|
||||
MinMemory: 32 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 30 << 30,
|
||||
},
|
||||
abi.RegisteredProof_StackedDRG512MiBSeal: Resources{
|
||||
MaxMemory: 3 << 29, // 1.5G
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
},
|
||||
sealtasks.TTPreCommit2: {
|
||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
||||
MaxMemory: 96 << 30,
|
||||
MinMemory: 64 << 30,
|
||||
|
||||
MultiThread: true,
|
||||
|
||||
BaseMinMemory: 30 << 30,
|
||||
},
|
||||
abi.RegisteredProof_StackedDRG512MiBSeal: Resources{
|
||||
MaxMemory: 3 << 29, // 1.5G
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: true,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
},
|
||||
sealtasks.TTCommit1: { // Very short (~100ms), so params are very light
|
||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
||||
MaxMemory: 1 << 30,
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
abi.RegisteredProof_StackedDRG512MiBSeal: Resources{
|
||||
MaxMemory: 1 << 30,
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
},
|
||||
sealtasks.TTCommit2: { // TODO: Measure more accurately
|
||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
||||
MaxMemory: 110 << 30,
|
||||
MinMemory: 60 << 30,
|
||||
|
||||
MultiThread: true,
|
||||
CanGPU: true,
|
||||
|
||||
BaseMinMemory: 64 << 30, // params
|
||||
},
|
||||
abi.RegisteredProof_StackedDRG512MiBSeal: Resources{
|
||||
MaxMemory: 3 << 29, // 1.5G
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: false, // This is fine
|
||||
CanGPU: true,
|
||||
|
||||
BaseMinMemory: 10 << 30,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// for now we just reuse params for 2kib and 8mib from 512mib
|
||||
|
||||
for taskType := range ResourceTable {
|
||||
ResourceTable[taskType][abi.RegisteredProof_StackedDRG8MiBSeal] = ResourceTable[taskType][abi.RegisteredProof_StackedDRG512MiBSeal]
|
||||
ResourceTable[taskType][abi.RegisteredProof_StackedDRG2KiBSeal] = ResourceTable[taskType][abi.RegisteredProof_StackedDRG512MiBSeal]
|
||||
}
|
||||
}
|
25
storage/sectorstorage/roprov.go
Normal file
25
storage/sectorstorage/roprov.go
Normal file
@ -0,0 +1,25 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type readonlyProvider struct {
|
||||
stor *stores.Local
|
||||
}
|
||||
|
||||
func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, func(), error) {
|
||||
if allocate != 0 { // 0 - don't allocate anything
|
||||
return sectorbuilder.SectorPaths{}, nil, xerrors.New("read-only storage")
|
||||
}
|
||||
|
||||
p, _, done, err := l.stor.AcquireSector(ctx, id, existing, allocate, sealing)
|
||||
|
||||
return p, done, err
|
||||
}
|
256
storage/sectorstorage/sched.go
Normal file
256
storage/sectorstorage/sched.go
Normal file
@ -0,0 +1,256 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
)
|
||||
|
||||
const mib = 1 << 20
|
||||
|
||||
type workerRequest struct {
|
||||
taskType sealtasks.TaskType
|
||||
accept []WorkerID // ordered by preference
|
||||
|
||||
ret chan<- workerResponse
|
||||
cancel <-chan struct{}
|
||||
}
|
||||
|
||||
type workerResponse struct {
|
||||
err error
|
||||
|
||||
worker Worker
|
||||
done func()
|
||||
}
|
||||
|
||||
func (r *workerRequest) respond(resp workerResponse) {
|
||||
select {
|
||||
case r.ret <- resp:
|
||||
case <-r.cancel:
|
||||
log.Warnf("request got cancelled before we could respond")
|
||||
if resp.done != nil {
|
||||
resp.done()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type workerHandle struct {
|
||||
w Worker
|
||||
|
||||
info api.WorkerInfo
|
||||
|
||||
memUsedMin uint64
|
||||
memUsedMax uint64
|
||||
gpuUsed bool
|
||||
cpuUse int // -1 - multicore thing; 0 - free; 1+ - singlecore things
|
||||
}
|
||||
|
||||
func (m *Manager) runSched() {
|
||||
for {
|
||||
select {
|
||||
case w := <-m.newWorkers:
|
||||
m.schedNewWorker(w)
|
||||
case req := <-m.schedule:
|
||||
resp, err := m.maybeSchedRequest(req)
|
||||
if err != nil {
|
||||
req.respond(workerResponse{err: err})
|
||||
continue
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
req.respond(*resp)
|
||||
continue
|
||||
}
|
||||
|
||||
m.schedQueue.PushBack(req)
|
||||
case wid := <-m.workerFree:
|
||||
m.onWorkerFreed(wid)
|
||||
case <-m.closing:
|
||||
m.schedClose()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) onWorkerFreed(wid WorkerID) {
|
||||
for e := m.schedQueue.Front(); e != nil; e = e.Next() {
|
||||
req := e.Value.(*workerRequest)
|
||||
var ok bool
|
||||
for _, id := range req.accept {
|
||||
if id == wid {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
resp, err := m.maybeSchedRequest(req)
|
||||
if err != nil {
|
||||
req.respond(workerResponse{err: err})
|
||||
continue
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
req.respond(*resp)
|
||||
|
||||
pe := e.Prev()
|
||||
m.schedQueue.Remove(e)
|
||||
if pe == nil {
|
||||
pe = m.schedQueue.Front()
|
||||
}
|
||||
if pe == nil {
|
||||
break
|
||||
}
|
||||
e = pe
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) maybeSchedRequest(req *workerRequest) (*workerResponse, error) {
|
||||
m.workersLk.Lock()
|
||||
defer m.workersLk.Unlock()
|
||||
|
||||
tried := 0
|
||||
|
||||
for _, id := range req.accept {
|
||||
w, ok := m.workers[id]
|
||||
if !ok {
|
||||
log.Warnf("requested worker %d is not in scheduler", id)
|
||||
}
|
||||
tried++
|
||||
|
||||
canDo, err := m.canHandleRequest(id, w, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !canDo {
|
||||
continue
|
||||
}
|
||||
|
||||
return m.makeResponse(id, w, req), nil
|
||||
}
|
||||
|
||||
if tried == 0 {
|
||||
return nil, xerrors.New("maybeSchedRequest didn't find any good workers")
|
||||
}
|
||||
|
||||
return nil, nil // put in waiting queue
|
||||
}
|
||||
|
||||
func (m *Manager) makeResponse(wid WorkerID, w *workerHandle, req *workerRequest) *workerResponse {
|
||||
needRes := ResourceTable[req.taskType][m.scfg.SealProofType]
|
||||
|
||||
w.gpuUsed = needRes.CanGPU
|
||||
if needRes.MultiThread {
|
||||
w.cpuUse = -1
|
||||
} else {
|
||||
if w.cpuUse != -1 {
|
||||
w.cpuUse++
|
||||
} else {
|
||||
log.Warnf("sched: makeResponse for worker %d: worker cpu is in multicore use, but a single core task was scheduled", wid)
|
||||
}
|
||||
}
|
||||
|
||||
w.memUsedMin += needRes.MinMemory
|
||||
w.memUsedMax += needRes.MaxMemory
|
||||
|
||||
return &workerResponse{
|
||||
err: nil,
|
||||
worker: w.w,
|
||||
done: func() {
|
||||
m.workersLk.Lock()
|
||||
|
||||
if needRes.CanGPU {
|
||||
w.gpuUsed = false
|
||||
}
|
||||
|
||||
if needRes.MultiThread {
|
||||
w.cpuUse = 0
|
||||
} else if w.cpuUse != -1 {
|
||||
w.cpuUse--
|
||||
}
|
||||
|
||||
w.memUsedMin -= needRes.MinMemory
|
||||
w.memUsedMax -= needRes.MaxMemory
|
||||
|
||||
m.workersLk.Unlock()
|
||||
|
||||
select {
|
||||
case m.workerFree <- wid:
|
||||
case <-m.closing:
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) canHandleRequest(wid WorkerID, w *workerHandle, req *workerRequest) (bool, error) {
|
||||
needRes, ok := ResourceTable[req.taskType][m.scfg.SealProofType]
|
||||
if !ok {
|
||||
return false, xerrors.Errorf("canHandleRequest: missing ResourceTable entry for %s/%d", req.taskType, m.scfg.SealProofType)
|
||||
}
|
||||
|
||||
res := w.info.Resources
|
||||
|
||||
// TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running)
|
||||
minNeedMem := res.MemReserved + w.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory
|
||||
if minNeedMem > res.MemPhysical {
|
||||
log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
maxNeedMem := res.MemReserved + w.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory
|
||||
if m.scfg.SealProofType == abi.RegisteredProof_StackedDRG32GiBSeal {
|
||||
maxNeedMem += MaxCachingOverhead
|
||||
}
|
||||
if maxNeedMem > res.MemSwap+res.MemPhysical {
|
||||
log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if needRes.MultiThread {
|
||||
if w.cpuUse != 0 {
|
||||
log.Debugf("sched: not scheduling on worker %d; multicore process needs free CPU", wid)
|
||||
return false, nil
|
||||
}
|
||||
} else {
|
||||
if w.cpuUse == -1 {
|
||||
log.Debugf("sched: not scheduling on worker %d; CPU in use by a multicore process", wid)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(res.GPUs) > 0 && needRes.CanGPU {
|
||||
if w.gpuUsed {
|
||||
log.Debugf("sched: not scheduling on worker %d; GPU in use", wid)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m *Manager) schedNewWorker(w *workerHandle) {
|
||||
m.workersLk.Lock()
|
||||
defer m.workersLk.Unlock()
|
||||
|
||||
id := m.nextWorker
|
||||
m.workers[id] = w
|
||||
m.nextWorker++
|
||||
}
|
||||
|
||||
func (m *Manager) schedClose() {
|
||||
m.workersLk.Lock()
|
||||
defer m.workersLk.Unlock()
|
||||
|
||||
for i, w := range m.workers {
|
||||
if err := w.w.Close(); err != nil {
|
||||
log.Errorf("closing worker %d: %+v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
13
storage/sectorstorage/sealtasks/task.go
Normal file
13
storage/sectorstorage/sealtasks/task.go
Normal file
@ -0,0 +1,13 @@
|
||||
package sealtasks
|
||||
|
||||
type TaskType string
|
||||
|
||||
const (
|
||||
TTAddPiece TaskType = "seal/v0/addpiece"
|
||||
TTPreCommit1 TaskType = "seal/v0/precommit/1"
|
||||
TTPreCommit2 TaskType = "seal/v0/precommit/2"
|
||||
TTCommit1 TaskType = "seal/v0/commit/1" // NOTE: We use this to transfer the sector into miner-local storage for now; Don't use on workers!
|
||||
TTCommit2 TaskType = "seal/v0/commit/2"
|
||||
|
||||
TTFinalize TaskType = "seal/v0/finalize"
|
||||
)
|
56
storage/sectorstorage/sectorutil/utils.go
Normal file
56
storage/sectorstorage/sectorutil/utils.go
Normal file
@ -0,0 +1,56 @@
|
||||
package sectorutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
)
|
||||
|
||||
func ParseSectorID(baseName string) (abi.SectorID, error) {
|
||||
var n abi.SectorNumber
|
||||
var mid abi.ActorID
|
||||
read, err := fmt.Sscanf(baseName, "s-t0%d-%d", &mid, &n)
|
||||
if err != nil {
|
||||
return abi.SectorID{}, xerrors.Errorf("sscanf sector name ('%s'): %w", baseName, err)
|
||||
}
|
||||
|
||||
if read != 2 {
|
||||
return abi.SectorID{}, xerrors.Errorf("parseSectorID expected to scan 2 values, got %d", read)
|
||||
}
|
||||
|
||||
return abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: n,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SectorName(sid abi.SectorID) string {
|
||||
return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number)
|
||||
}
|
||||
|
||||
func PathByType(sps sectorbuilder.SectorPaths, fileType sectorbuilder.SectorFileType) string {
|
||||
switch fileType {
|
||||
case sectorbuilder.FTUnsealed:
|
||||
return sps.Unsealed
|
||||
case sectorbuilder.FTSealed:
|
||||
return sps.Sealed
|
||||
case sectorbuilder.FTCache:
|
||||
return sps.Cache
|
||||
}
|
||||
|
||||
panic("requested unknown path type")
|
||||
}
|
||||
|
||||
func SetPathByType(sps *sectorbuilder.SectorPaths, fileType sectorbuilder.SectorFileType, p string) {
|
||||
switch fileType {
|
||||
case sectorbuilder.FTUnsealed:
|
||||
sps.Unsealed = p
|
||||
case sectorbuilder.FTSealed:
|
||||
sps.Sealed = p
|
||||
case sectorbuilder.FTCache:
|
||||
sps.Cache = p
|
||||
}
|
||||
}
|
22
storage/sectorstorage/stats.go
Normal file
22
storage/sectorstorage/stats.go
Normal file
@ -0,0 +1,22 @@
|
||||
package sectorstorage
|
||||
|
||||
import "github.com/filecoin-project/lotus/api"
|
||||
|
||||
func (m *Manager) WorkerStats() map[uint64]api.WorkerStats {
|
||||
m.workersLk.Lock()
|
||||
defer m.workersLk.Unlock()
|
||||
|
||||
out := map[uint64]api.WorkerStats{}
|
||||
|
||||
for id, handle := range m.workers {
|
||||
out[uint64(id)] = api.WorkerStats{
|
||||
Info: handle.info,
|
||||
MemUsedMin: handle.memUsedMin,
|
||||
MemUsedMax: handle.memUsedMax,
|
||||
GpuUsed: handle.gpuUsed,
|
||||
CpuUse: handle.cpuUse,
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
153
storage/sectorstorage/stores/http_handler.go
Normal file
153
storage/sectorstorage/stores/http_handler.go
Normal file
@ -0,0 +1,153 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/lotus/lib/tarutil"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil"
|
||||
)
|
||||
|
||||
var log = logging.Logger("stores")
|
||||
|
||||
type FetchHandler struct {
|
||||
*Local
|
||||
}
|
||||
|
||||
func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // /remote/
|
||||
mux := mux.NewRouter()
|
||||
|
||||
mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET")
|
||||
mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET")
|
||||
mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE")
|
||||
|
||||
mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request) {
|
||||
log.Debugf("SERVE STAT %s", r.URL)
|
||||
vars := mux.Vars(r)
|
||||
id := ID(vars["id"])
|
||||
|
||||
st, err := handler.Local.FsStat(r.Context(), id)
|
||||
switch err {
|
||||
case errPathNotFound:
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
case nil:
|
||||
break
|
||||
default:
|
||||
w.WriteHeader(500)
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(&st); err != nil {
|
||||
log.Warnf("error writing stat response: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) {
|
||||
log.Infof("SERVE GET %s", r.URL)
|
||||
vars := mux.Vars(r)
|
||||
|
||||
id, err := sectorutil.ParseSectorID(vars["id"])
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
ft, err := ftFromString(vars["type"])
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
paths, _, done, err := handler.Local.AcquireSector(r.Context(), id, ft, 0, false)
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
defer done()
|
||||
|
||||
path := sectorutil.PathByType(paths, ft)
|
||||
if path == "" {
|
||||
log.Error("acquired path was empty")
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
stat, err := os.Stat(path)
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
var rd io.Reader
|
||||
if stat.IsDir() {
|
||||
rd, err = tarutil.TarDirectory(path)
|
||||
w.Header().Set("Content-Type", "application/x-tar")
|
||||
} else {
|
||||
rd, err = os.OpenFile(path, os.O_RDONLY, 0644)
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(200)
|
||||
if _, err := io.Copy(w, rd); err != nil { // TODO: default 32k buf may be too small
|
||||
log.Error("%+v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.Request) {
|
||||
log.Infof("SERVE DELETE %s", r.URL)
|
||||
vars := mux.Vars(r)
|
||||
|
||||
id, err := sectorutil.ParseSectorID(vars["id"])
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
ft, err := ftFromString(vars["type"])
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
if err := handler.Remove(r.Context(), id, ft); err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func ftFromString(t string) (sectorbuilder.SectorFileType, error) {
|
||||
switch t {
|
||||
case sectorbuilder.FTUnsealed.String():
|
||||
return sectorbuilder.FTUnsealed, nil
|
||||
case sectorbuilder.FTSealed.String():
|
||||
return sectorbuilder.FTSealed, nil
|
||||
case sectorbuilder.FTCache.String():
|
||||
return sectorbuilder.FTCache, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unknown sector file type: '%s'", t)
|
||||
}
|
||||
}
|
322
storage/sectorstorage/stores/index.go
Normal file
322
storage/sectorstorage/stores/index.go
Normal file
@ -0,0 +1,322 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
gopath "path"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil"
|
||||
)
|
||||
|
||||
// ID identifies sector storage by UUID. One sector storage should map to one
|
||||
// filesystem, local or networked / shared by multiple machines
|
||||
type ID string
|
||||
|
||||
type StorageInfo struct {
|
||||
ID ID
|
||||
URLs []string // TODO: Support non-http transports
|
||||
Weight uint64
|
||||
|
||||
CanSeal bool
|
||||
CanStore bool
|
||||
}
|
||||
|
||||
type SectorIndex interface { // part of storage-miner api
|
||||
StorageAttach(context.Context, StorageInfo, FsStat) error
|
||||
StorageInfo(context.Context, ID) (StorageInfo, error)
|
||||
// TODO: StorageUpdateStats(FsStat)
|
||||
|
||||
StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error
|
||||
StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error
|
||||
StorageFindSector(ctx context.Context, sector abi.SectorID, ft sectorbuilder.SectorFileType, allowFetch bool) ([]StorageInfo, error)
|
||||
|
||||
StorageBestAlloc(ctx context.Context, allocate sectorbuilder.SectorFileType, sealing bool) ([]StorageInfo, error)
|
||||
}
|
||||
|
||||
type Decl struct {
|
||||
abi.SectorID
|
||||
sectorbuilder.SectorFileType
|
||||
}
|
||||
|
||||
type storageEntry struct {
|
||||
info *StorageInfo
|
||||
fsi FsStat
|
||||
}
|
||||
|
||||
type Index struct {
|
||||
lk sync.RWMutex
|
||||
|
||||
sectors map[Decl][]ID
|
||||
stores map[ID]*storageEntry
|
||||
}
|
||||
|
||||
func NewIndex() *Index {
|
||||
return &Index{
|
||||
sectors: map[Decl][]ID{},
|
||||
stores: map[ID]*storageEntry{},
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) {
|
||||
byID := map[ID]map[abi.SectorID]sectorbuilder.SectorFileType{}
|
||||
|
||||
for id := range i.stores {
|
||||
byID[id] = map[abi.SectorID]sectorbuilder.SectorFileType{}
|
||||
}
|
||||
for decl, ids := range i.sectors {
|
||||
for _, id := range ids {
|
||||
byID[id][decl.SectorID] |= decl.SectorFileType
|
||||
}
|
||||
}
|
||||
|
||||
out := map[ID][]Decl{}
|
||||
for id, m := range byID {
|
||||
out[id] = []Decl{}
|
||||
for sectorID, fileType := range m {
|
||||
out[id] = append(out[id], Decl{
|
||||
SectorID: sectorID,
|
||||
SectorFileType: fileType,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st FsStat) error {
|
||||
i.lk.Lock()
|
||||
defer i.lk.Unlock()
|
||||
|
||||
log.Infof("New sector storage: %s", si.ID)
|
||||
|
||||
if _, ok := i.stores[si.ID]; ok {
|
||||
for _, u := range si.URLs {
|
||||
if _, err := url.Parse(u); err != nil {
|
||||
return xerrors.Errorf("failed to parse url %s: %w", si.URLs, err)
|
||||
}
|
||||
}
|
||||
|
||||
uloop:
|
||||
for _, u := range si.URLs {
|
||||
for _, l := range i.stores[si.ID].info.URLs {
|
||||
if u == l {
|
||||
continue uloop
|
||||
}
|
||||
}
|
||||
|
||||
i.stores[si.ID].info.URLs = append(i.stores[si.ID].info.URLs, u)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
i.stores[si.ID] = &storageEntry{
|
||||
info: &si,
|
||||
fsi: st,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error {
|
||||
i.lk.Lock()
|
||||
defer i.lk.Unlock()
|
||||
|
||||
for _, fileType := range pathTypes {
|
||||
if fileType&ft == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
d := Decl{s, fileType}
|
||||
|
||||
for _, sid := range i.sectors[d] {
|
||||
if sid == storageId {
|
||||
log.Warnf("sector %v redeclared in %s", s, storageId)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
i.sectors[d] = append(i.sectors[d], storageId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft sectorbuilder.SectorFileType) error {
|
||||
i.lk.Lock()
|
||||
defer i.lk.Unlock()
|
||||
|
||||
for _, fileType := range pathTypes {
|
||||
if fileType&ft == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
d := Decl{s, fileType}
|
||||
|
||||
if len(i.sectors[d]) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
rewritten := make([]ID, 0, len(i.sectors[d])-1)
|
||||
for _, sid := range i.sectors[d] {
|
||||
if sid == storageId {
|
||||
continue
|
||||
}
|
||||
|
||||
rewritten = append(rewritten, sid)
|
||||
}
|
||||
if len(rewritten) == 0 {
|
||||
delete(i.sectors, d)
|
||||
return nil
|
||||
}
|
||||
|
||||
i.sectors[d] = rewritten
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft sectorbuilder.SectorFileType, allowFetch bool) ([]StorageInfo, error) {
|
||||
i.lk.RLock()
|
||||
defer i.lk.RUnlock()
|
||||
|
||||
storageIDs := map[ID]uint64{}
|
||||
|
||||
for _, pathType := range pathTypes {
|
||||
if ft&pathType == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, id := range i.sectors[Decl{s, pathType}] {
|
||||
storageIDs[id]++
|
||||
}
|
||||
}
|
||||
|
||||
out := make([]StorageInfo, 0, len(storageIDs))
|
||||
|
||||
for id, n := range storageIDs {
|
||||
st, ok := i.stores[id]
|
||||
if !ok {
|
||||
log.Warnf("storage %s is not present in sector index (referenced by sector %v)", id, s)
|
||||
continue
|
||||
}
|
||||
|
||||
urls := make([]string, len(st.info.URLs))
|
||||
for k, u := range st.info.URLs {
|
||||
rl, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to parse url: %w", err)
|
||||
}
|
||||
|
||||
rl.Path = gopath.Join(rl.Path, ft.String(), sectorutil.SectorName(s))
|
||||
urls[k] = rl.String()
|
||||
}
|
||||
|
||||
out = append(out, StorageInfo{
|
||||
ID: id,
|
||||
URLs: urls,
|
||||
Weight: st.info.Weight * n, // storage with more sector types is better
|
||||
CanSeal: st.info.CanSeal,
|
||||
CanStore: st.info.CanStore,
|
||||
})
|
||||
}
|
||||
|
||||
if allowFetch {
|
||||
for id, st := range i.stores {
|
||||
if _, ok := storageIDs[id]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
urls := make([]string, len(st.info.URLs))
|
||||
for k, u := range st.info.URLs {
|
||||
rl, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to parse url: %w", err)
|
||||
}
|
||||
|
||||
rl.Path = gopath.Join(rl.Path, ft.String(), sectorutil.SectorName(s))
|
||||
urls[k] = rl.String()
|
||||
}
|
||||
|
||||
out = append(out, StorageInfo{
|
||||
ID: id,
|
||||
URLs: urls,
|
||||
Weight: st.info.Weight * 0, // TODO: something better than just '0'
|
||||
CanSeal: st.info.CanSeal,
|
||||
CanStore: st.info.CanStore,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageInfo(ctx context.Context, id ID) (StorageInfo, error) {
|
||||
i.lk.RLock()
|
||||
defer i.lk.RUnlock()
|
||||
|
||||
si, found := i.stores[id]
|
||||
if !found {
|
||||
return StorageInfo{}, xerrors.Errorf("sector store not found")
|
||||
}
|
||||
|
||||
return *si.info, nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageBestAlloc(ctx context.Context, allocate sectorbuilder.SectorFileType, sealing bool) ([]StorageInfo, error) {
|
||||
i.lk.RLock()
|
||||
defer i.lk.RUnlock()
|
||||
|
||||
var candidates []storageEntry
|
||||
|
||||
for _, p := range i.stores {
|
||||
if sealing && !p.info.CanSeal {
|
||||
log.Debugf("alloc: not considering %s; can't seal", p.info.ID)
|
||||
continue
|
||||
}
|
||||
if !sealing && !p.info.CanStore {
|
||||
log.Debugf("alloc: not considering %s; can't store", p.info.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: filter out of space
|
||||
|
||||
candidates = append(candidates, *p)
|
||||
}
|
||||
|
||||
if len(candidates) == 0 {
|
||||
return nil, xerrors.New("no good path found")
|
||||
}
|
||||
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
iw := big.Mul(big.NewInt(int64(candidates[i].fsi.Available)), big.NewInt(int64(candidates[i].info.Weight)))
|
||||
jw := big.Mul(big.NewInt(int64(candidates[j].fsi.Available)), big.NewInt(int64(candidates[j].info.Weight)))
|
||||
|
||||
return iw.GreaterThan(jw)
|
||||
})
|
||||
|
||||
out := make([]StorageInfo, len(candidates))
|
||||
for i, candidate := range candidates {
|
||||
out[i] = *candidate.info
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (i *Index) FindSector(id abi.SectorID, typ sectorbuilder.SectorFileType) ([]ID, error) {
|
||||
i.lk.RLock()
|
||||
defer i.lk.RUnlock()
|
||||
|
||||
return i.sectors[Decl{
|
||||
SectorID: id,
|
||||
SectorFileType: typ,
|
||||
}], nil
|
||||
}
|
||||
|
||||
var _ SectorIndex = &Index{}
|
35
storage/sectorstorage/stores/interface.go
Normal file
35
storage/sectorstorage/stores/interface.go
Normal file
@ -0,0 +1,35 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"context"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
)
|
||||
|
||||
type Store interface {
|
||||
AcquireSector(ctx context.Context, s abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (paths sectorbuilder.SectorPaths, stores sectorbuilder.SectorPaths, done func(), err error)
|
||||
Remove(ctx context.Context, s abi.SectorID, types sectorbuilder.SectorFileType) error
|
||||
FsStat(ctx context.Context, id ID) (FsStat, error)
|
||||
}
|
||||
|
||||
func Stat(path string) (FsStat, error) {
|
||||
var stat syscall.Statfs_t
|
||||
if err := syscall.Statfs(path, &stat); err != nil {
|
||||
return FsStat{}, xerrors.Errorf("statfs: %w", err)
|
||||
}
|
||||
|
||||
return FsStat{
|
||||
Capacity: stat.Blocks * uint64(stat.Bsize),
|
||||
Available: stat.Bavail * uint64(stat.Bsize),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type FsStat struct {
|
||||
Capacity uint64
|
||||
Available uint64 // Available to use for sector storage
|
||||
Used uint64
|
||||
}
|
326
storage/sectorstorage/stores/local.go
Normal file
326
storage/sectorstorage/stores/local.go
Normal file
@ -0,0 +1,326 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math/bits"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil"
|
||||
)
|
||||
|
||||
type StoragePath struct {
|
||||
ID ID
|
||||
Weight uint64
|
||||
|
||||
LocalPath string
|
||||
|
||||
CanSeal bool
|
||||
CanStore bool
|
||||
}
|
||||
|
||||
// [path]/sectorstore.json
|
||||
type LocalStorageMeta struct {
|
||||
ID ID
|
||||
Weight uint64 // 0 = readonly
|
||||
|
||||
CanSeal bool
|
||||
CanStore bool
|
||||
}
|
||||
|
||||
type LocalStorage interface {
|
||||
GetStorage() (config.StorageConfig, error)
|
||||
SetStorage(func(*config.StorageConfig)) error
|
||||
}
|
||||
|
||||
const MetaFile = "sectorstore.json"
|
||||
|
||||
var pathTypes = []sectorbuilder.SectorFileType{sectorbuilder.FTUnsealed, sectorbuilder.FTSealed, sectorbuilder.FTCache}
|
||||
|
||||
type Local struct {
|
||||
localStorage LocalStorage
|
||||
index SectorIndex
|
||||
urls []string
|
||||
|
||||
paths map[ID]*path
|
||||
|
||||
localLk sync.RWMutex
|
||||
}
|
||||
|
||||
type path struct {
|
||||
local string // absolute local path
|
||||
}
|
||||
|
||||
func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) {
|
||||
l := &Local{
|
||||
localStorage: ls,
|
||||
index: index,
|
||||
urls: urls,
|
||||
|
||||
paths: map[ID]*path{},
|
||||
}
|
||||
return l, l.open(ctx)
|
||||
}
|
||||
|
||||
func (st *Local) OpenPath(ctx context.Context, p string) error {
|
||||
st.localLk.Lock()
|
||||
defer st.localLk.Unlock()
|
||||
|
||||
mb, err := ioutil.ReadFile(filepath.Join(p, MetaFile))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading storage metadata for %s: %w", p, err)
|
||||
}
|
||||
|
||||
var meta LocalStorageMeta
|
||||
if err := json.Unmarshal(mb, &meta); err != nil {
|
||||
return xerrors.Errorf("unmarshalling storage metadata for %s: %w", p, err)
|
||||
}
|
||||
|
||||
// TODO: Check existing / dedupe
|
||||
|
||||
out := &path{
|
||||
local: p,
|
||||
}
|
||||
|
||||
fst, err := Stat(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = st.index.StorageAttach(ctx, StorageInfo{
|
||||
ID: meta.ID,
|
||||
URLs: st.urls,
|
||||
Weight: meta.Weight,
|
||||
CanSeal: meta.CanSeal,
|
||||
CanStore: meta.CanStore,
|
||||
}, fst)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("declaring storage in index: %w", err)
|
||||
}
|
||||
|
||||
for _, t := range pathTypes {
|
||||
ents, err := ioutil.ReadDir(filepath.Join(p, t.String()))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(filepath.Join(p, t.String()), 0755); err != nil {
|
||||
return xerrors.Errorf("openPath mkdir '%s': %w", filepath.Join(p, t.String()), err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
return xerrors.Errorf("listing %s: %w", filepath.Join(p, t.String()), err)
|
||||
}
|
||||
|
||||
for _, ent := range ents {
|
||||
sid, err := sectorutil.ParseSectorID(ent.Name())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err)
|
||||
}
|
||||
|
||||
if err := st.index.StorageDeclareSector(ctx, meta.ID, sid, t); err != nil {
|
||||
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", sid, t, meta.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
st.paths[meta.ID] = out
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *Local) open(ctx context.Context) error {
|
||||
cfg, err := st.localStorage.GetStorage()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting local storage config: %w", err)
|
||||
}
|
||||
|
||||
for _, path := range cfg.StoragePaths {
|
||||
err := st.OpenPath(ctx, path.Path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening path %s: %w", path.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, sectorbuilder.SectorPaths, func(), error) {
|
||||
if existing|allocate != existing^allocate {
|
||||
return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector")
|
||||
}
|
||||
|
||||
st.localLk.RLock()
|
||||
|
||||
var out sectorbuilder.SectorPaths
|
||||
var storageIDs sectorbuilder.SectorPaths
|
||||
|
||||
for _, fileType := range pathTypes {
|
||||
if fileType&existing == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
si, err := st.index.StorageFindSector(ctx, sid, fileType, false)
|
||||
if err != nil {
|
||||
log.Warnf("finding existing sector %d(t:%d) failed: %+v", sid, fileType, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, info := range si {
|
||||
p, ok := st.paths[info.ID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.local == "" { // TODO: can that even be the case?
|
||||
continue
|
||||
}
|
||||
|
||||
spath := filepath.Join(p.local, fileType.String(), sectorutil.SectorName(sid))
|
||||
sectorutil.SetPathByType(&out, fileType, spath)
|
||||
sectorutil.SetPathByType(&storageIDs, fileType, string(info.ID))
|
||||
|
||||
existing ^= fileType
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, fileType := range pathTypes {
|
||||
if fileType&allocate == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sis, err := st.index.StorageBestAlloc(ctx, fileType, sealing)
|
||||
if err != nil {
|
||||
st.localLk.RUnlock()
|
||||
return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.Errorf("finding best storage for allocating : %w", err)
|
||||
}
|
||||
|
||||
var best string
|
||||
var bestID ID
|
||||
|
||||
for _, si := range sis {
|
||||
p, ok := st.paths[si.ID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.local == "" { // TODO: can that even be the case?
|
||||
continue
|
||||
}
|
||||
|
||||
if sealing && !si.CanSeal {
|
||||
continue
|
||||
}
|
||||
|
||||
if !sealing && !si.CanStore {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: Check free space
|
||||
|
||||
best = filepath.Join(p.local, fileType.String(), sectorutil.SectorName(sid))
|
||||
bestID = si.ID
|
||||
}
|
||||
|
||||
if best == "" {
|
||||
st.localLk.RUnlock()
|
||||
return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.Errorf("couldn't find a suitable path for a sector")
|
||||
}
|
||||
|
||||
sectorutil.SetPathByType(&out, fileType, best)
|
||||
sectorutil.SetPathByType(&storageIDs, fileType, string(bestID))
|
||||
allocate ^= fileType
|
||||
}
|
||||
|
||||
return out, storageIDs, st.localLk.RUnlock, nil
|
||||
}
|
||||
|
||||
func (st *Local) Local(ctx context.Context) ([]StoragePath, error) {
|
||||
st.localLk.RLock()
|
||||
defer st.localLk.RUnlock()
|
||||
|
||||
var out []StoragePath
|
||||
for id, p := range st.paths {
|
||||
if p.local == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
si, err := st.index.StorageInfo(ctx, id)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get storage info for %s: %w", id, err)
|
||||
}
|
||||
|
||||
out = append(out, StoragePath{
|
||||
ID: id,
|
||||
Weight: si.Weight,
|
||||
LocalPath: p.local,
|
||||
CanSeal: si.CanSeal,
|
||||
CanStore: si.CanStore,
|
||||
})
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ sectorbuilder.SectorFileType) error {
|
||||
if bits.OnesCount(uint(typ)) != 1 {
|
||||
return xerrors.New("delete expects one file type")
|
||||
}
|
||||
|
||||
si, err := st.index.StorageFindSector(ctx, sid, typ, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err)
|
||||
}
|
||||
|
||||
if len(si) == 0 {
|
||||
return xerrors.Errorf("can't delete sector %v(%d), not found", sid, typ)
|
||||
}
|
||||
|
||||
for _, info := range si {
|
||||
p, ok := st.paths[info.ID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.local == "" { // TODO: can that even be the case?
|
||||
continue
|
||||
}
|
||||
|
||||
if err := st.index.StorageDropSector(ctx, info.ID, sid, typ); err != nil {
|
||||
return xerrors.Errorf("dropping sector from index: %w", err)
|
||||
}
|
||||
|
||||
spath := filepath.Join(p.local, typ.String(), sectorutil.SectorName(sid))
|
||||
log.Infof("remove %s", spath)
|
||||
|
||||
if err := os.RemoveAll(spath); err != nil {
|
||||
log.Errorf("removing sector (%v) from %s: %+v", sid, spath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var errPathNotFound = xerrors.Errorf("fsstat: path not found")
|
||||
|
||||
func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) {
|
||||
st.localLk.RLock()
|
||||
defer st.localLk.RUnlock()
|
||||
|
||||
p, ok := st.paths[id]
|
||||
if !ok {
|
||||
return FsStat{}, errPathNotFound
|
||||
}
|
||||
|
||||
return Stat(p.local)
|
||||
}
|
||||
|
||||
var _ Store = &Local{}
|
296
storage/sectorstorage/stores/remote.go
Normal file
296
storage/sectorstorage/stores/remote.go
Normal file
@ -0,0 +1,296 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math/bits"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
gopath "path"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/lib/tarutil"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil"
|
||||
)
|
||||
|
||||
type Remote struct {
|
||||
local *Local
|
||||
index SectorIndex
|
||||
auth http.Header
|
||||
|
||||
fetchLk sync.Mutex // TODO: this can be much smarter
|
||||
// TODO: allow multiple parallel fetches
|
||||
// (make sure to not fetch the same sector data twice)
|
||||
}
|
||||
|
||||
func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote {
|
||||
return &Remote{
|
||||
local: local,
|
||||
index: index,
|
||||
auth: auth,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, sectorbuilder.SectorPaths, func(), error) {
|
||||
if existing|allocate != existing^allocate {
|
||||
return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector")
|
||||
}
|
||||
|
||||
r.fetchLk.Lock()
|
||||
defer r.fetchLk.Unlock()
|
||||
|
||||
paths, stores, done, err := r.local.AcquireSector(ctx, s, existing, allocate, sealing)
|
||||
if err != nil {
|
||||
return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, xerrors.Errorf("local acquire error: %w", err)
|
||||
}
|
||||
|
||||
for _, fileType := range pathTypes {
|
||||
if fileType&existing == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if sectorutil.PathByType(paths, fileType) != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ap, storageID, url, rdone, err := r.acquireFromRemote(ctx, s, fileType, sealing)
|
||||
if err != nil {
|
||||
done()
|
||||
return sectorbuilder.SectorPaths{}, sectorbuilder.SectorPaths{}, nil, err
|
||||
}
|
||||
|
||||
done = mergeDone(done, rdone)
|
||||
sectorutil.SetPathByType(&paths, fileType, ap)
|
||||
sectorutil.SetPathByType(&stores, fileType, string(storageID))
|
||||
|
||||
if err := r.index.StorageDeclareSector(ctx, storageID, s, fileType); err != nil {
|
||||
log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: some way to allow having duplicated sectors in the system for perf
|
||||
if err := r.deleteFromRemote(ctx, url); err != nil {
|
||||
log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err)
|
||||
}
|
||||
}
|
||||
|
||||
return paths, stores, done, nil
|
||||
}
|
||||
|
||||
func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType sectorbuilder.SectorFileType, sealing bool) (string, ID, string, func(), error) {
|
||||
si, err := r.index.StorageFindSector(ctx, s, fileType, false)
|
||||
if err != nil {
|
||||
return "", "", "", nil, err
|
||||
}
|
||||
|
||||
sort.Slice(si, func(i, j int) bool {
|
||||
return si[i].Weight < si[j].Weight
|
||||
})
|
||||
|
||||
apaths, ids, done, err := r.local.AcquireSector(ctx, s, 0, fileType, sealing)
|
||||
if err != nil {
|
||||
return "", "", "", nil, xerrors.Errorf("allocate local sector for fetching: %w", err)
|
||||
}
|
||||
dest := sectorutil.PathByType(apaths, fileType)
|
||||
storageID := sectorutil.PathByType(ids, fileType)
|
||||
|
||||
var merr error
|
||||
for _, info := range si {
|
||||
for _, url := range info.URLs {
|
||||
err := r.fetch(ctx, url, dest)
|
||||
if err != nil {
|
||||
merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, dest, err))
|
||||
continue
|
||||
}
|
||||
|
||||
if merr != nil {
|
||||
log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr)
|
||||
}
|
||||
return dest, ID(storageID), url, done, nil
|
||||
}
|
||||
}
|
||||
|
||||
done()
|
||||
return "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr)
|
||||
}
|
||||
|
||||
func (r *Remote) fetch(ctx context.Context, url, outname string) error {
|
||||
log.Infof("Fetch %s -> %s", url, outname)
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("request: %w", err)
|
||||
}
|
||||
req.Header = r.auth
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("do request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("non-200 code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
/*bar := pb.New64(w.sizeForType(typ))
|
||||
bar.ShowPercent = true
|
||||
bar.ShowSpeed = true
|
||||
bar.Units = pb.U_BYTES
|
||||
|
||||
barreader := bar.NewProxyReader(resp.Body)
|
||||
|
||||
bar.Start()
|
||||
defer bar.Finish()*/
|
||||
|
||||
mediatype, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse media type: %w", err)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(outname); err != nil {
|
||||
return xerrors.Errorf("removing dest: %w", err)
|
||||
}
|
||||
|
||||
switch mediatype {
|
||||
case "application/x-tar":
|
||||
return tarutil.ExtractTar(resp.Body, outname)
|
||||
case "application/octet-stream":
|
||||
return files.WriteTo(files.NewReaderFile(resp.Body), outname)
|
||||
default:
|
||||
return xerrors.Errorf("unknown content type: '%s'", mediatype)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ sectorbuilder.SectorFileType) error {
|
||||
if bits.OnesCount(uint(typ)) != 1 {
|
||||
return xerrors.New("delete expects one file type")
|
||||
}
|
||||
|
||||
if err := r.local.Remove(ctx, sid, typ); err != nil {
|
||||
return xerrors.Errorf("remove from local: %w", err)
|
||||
}
|
||||
|
||||
si, err := r.index.StorageFindSector(ctx, sid, typ, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err)
|
||||
}
|
||||
|
||||
for _, info := range si {
|
||||
for _, url := range info.URLs {
|
||||
if err := r.deleteFromRemote(ctx, url); err != nil {
|
||||
log.Warnf("remove %s: %+v", url, err)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Remote) deleteFromRemote(ctx context.Context, url string) error {
|
||||
log.Infof("Delete %s", url)
|
||||
|
||||
req, err := http.NewRequest("DELETE", url, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("request: %w", err)
|
||||
}
|
||||
req.Header = r.auth
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("do request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("non-200 code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) {
|
||||
st, err := r.local.FsStat(ctx, id)
|
||||
switch err {
|
||||
case nil:
|
||||
return st, nil
|
||||
case errPathNotFound:
|
||||
break
|
||||
default:
|
||||
return FsStat{}, xerrors.Errorf("local stat: %w", err)
|
||||
}
|
||||
|
||||
si, err := r.index.StorageInfo(ctx, id)
|
||||
if err != nil {
|
||||
return FsStat{}, xerrors.Errorf("getting remote storage info: %w", err)
|
||||
}
|
||||
|
||||
if len(si.URLs) == 0 {
|
||||
return FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id)
|
||||
}
|
||||
|
||||
rl, err := url.Parse(si.URLs[0])
|
||||
if err != nil {
|
||||
return FsStat{}, xerrors.Errorf("failed to parse url: %w", err)
|
||||
}
|
||||
|
||||
rl.Path = gopath.Join(rl.Path, "stat", string(id))
|
||||
|
||||
req, err := http.NewRequest("GET", rl.String(), nil)
|
||||
if err != nil {
|
||||
return FsStat{}, xerrors.Errorf("request: %w", err)
|
||||
}
|
||||
req.Header = r.auth
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return FsStat{}, xerrors.Errorf("do request: %w", err)
|
||||
}
|
||||
switch resp.StatusCode {
|
||||
case 200:
|
||||
break
|
||||
case 404:
|
||||
return FsStat{}, errPathNotFound
|
||||
case 500:
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err)
|
||||
}
|
||||
|
||||
return FsStat{}, xerrors.Errorf("fsstat: got http 500: %s", string(b))
|
||||
}
|
||||
|
||||
var out FsStat
|
||||
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
|
||||
return FsStat{}, xerrors.Errorf("decoding fsstat: %w", err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func mergeDone(a func(), b func()) func() {
|
||||
return func() {
|
||||
a()
|
||||
b()
|
||||
}
|
||||
}
|
||||
|
||||
var _ Store = &Remote{}
|
210
storage/sectorstorage/worker_local.go
Normal file
210
storage/sectorstorage/worker_local.go
Normal file
@ -0,0 +1,210 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/elastic/go-sysinfo"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sectorutil"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
var pathTypes = []sectorbuilder.SectorFileType{sectorbuilder.FTUnsealed, sectorbuilder.FTSealed, sectorbuilder.FTCache}
|
||||
|
||||
type WorkerConfig struct {
|
||||
SealProof abi.RegisteredProof
|
||||
TaskTypes []sealtasks.TaskType
|
||||
}
|
||||
|
||||
type LocalWorker struct {
|
||||
scfg *sectorbuilder.Config
|
||||
storage stores.Store
|
||||
localStore *stores.Local
|
||||
sindex stores.SectorIndex
|
||||
|
||||
acceptTasks map[sealtasks.TaskType]struct{}
|
||||
}
|
||||
|
||||
func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex) *LocalWorker {
|
||||
ppt, err := wcfg.SealProof.RegisteredPoStProof()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
acceptTasks := map[sealtasks.TaskType]struct{}{}
|
||||
for _, taskType := range wcfg.TaskTypes {
|
||||
acceptTasks[taskType] = struct{}{}
|
||||
}
|
||||
|
||||
return &LocalWorker{
|
||||
scfg: §orbuilder.Config{
|
||||
SealProofType: wcfg.SealProof,
|
||||
PoStProofType: ppt,
|
||||
},
|
||||
storage: store,
|
||||
localStore: local,
|
||||
sindex: sindex,
|
||||
|
||||
acceptTasks: acceptTasks,
|
||||
}
|
||||
}
|
||||
|
||||
type localWorkerPathProvider struct {
|
||||
w *LocalWorker
|
||||
}
|
||||
|
||||
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing sectorbuilder.SectorFileType, allocate sectorbuilder.SectorFileType, sealing bool) (sectorbuilder.SectorPaths, func(), error) {
|
||||
paths, storageIDs, done, err := l.w.storage.AcquireSector(ctx, sector, existing, allocate, sealing)
|
||||
if err != nil {
|
||||
return sectorbuilder.SectorPaths{}, nil, err
|
||||
}
|
||||
|
||||
log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths)
|
||||
|
||||
return paths, func() {
|
||||
done()
|
||||
|
||||
for _, fileType := range pathTypes {
|
||||
if fileType&allocate == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sid := sectorutil.PathByType(storageIDs, fileType)
|
||||
|
||||
if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType); err != nil {
|
||||
log.Errorf("declare sector error: %+v", err)
|
||||
}
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *LocalWorker) sb() (sectorbuilder.Basic, error) {
|
||||
return sectorbuilder.New(&localWorkerPathProvider{w: l}, l.scfg)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sb.NewSector(ctx, sector)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, err
|
||||
}
|
||||
|
||||
return sb.AddPiece(ctx, sector, epcs, sz, r)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage2.PreCommit1Out, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sb.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (cids storage2.SectorCids, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return storage2.SectorCids{}, err
|
||||
}
|
||||
|
||||
return sb.SealPreCommit2(ctx, sector, phase1Out)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (output storage2.Commit1Out, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sb.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (proof storage2.Proof, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sb.SealCommit2(ctx, sector, phase1Out)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) error {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sb.FinalizeSector(ctx, sector); err != nil {
|
||||
return xerrors.Errorf("finalizing sector: %w", err)
|
||||
}
|
||||
|
||||
if err := l.storage.Remove(ctx, sector, sectorbuilder.FTUnsealed); err != nil {
|
||||
return xerrors.Errorf("removing unsealed data: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
||||
return l.acceptTasks, nil
|
||||
}
|
||||
|
||||
func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
|
||||
return l.localStore.Local(ctx)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) Info(context.Context) (api.WorkerInfo, error) {
|
||||
hostname, err := os.Hostname() // TODO: allow overriding from config
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
gpus, err := ffi.GetGPUDevices()
|
||||
if err != nil {
|
||||
log.Errorf("getting gpu devices failed: %+v", err)
|
||||
}
|
||||
|
||||
h, err := sysinfo.Host()
|
||||
if err != nil {
|
||||
return api.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err)
|
||||
}
|
||||
|
||||
mem, err := h.Memory()
|
||||
if err != nil {
|
||||
return api.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err)
|
||||
}
|
||||
|
||||
return api.WorkerInfo{
|
||||
Hostname: hostname,
|
||||
Resources: api.WorkerResources{
|
||||
MemPhysical: mem.Total,
|
||||
MemSwap: mem.VirtualTotal,
|
||||
MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process
|
||||
GPUs: gpus,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *LocalWorker) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ Worker = &LocalWorker{}
|
51
storage/sectorstorage/worker_remote.go
Normal file
51
storage/sectorstorage/worker_remote.go
Normal file
@ -0,0 +1,51 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
"github.com/filecoin-project/lotus/lib/jsonrpc"
|
||||
)
|
||||
|
||||
type remote struct {
|
||||
api.WorkerApi
|
||||
closer jsonrpc.ClientCloser
|
||||
}
|
||||
|
||||
func (r *remote) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
return xerrors.New("unsupported")
|
||||
}
|
||||
|
||||
func (r *remote) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage2.Data) (abi.PieceInfo, error) {
|
||||
return abi.PieceInfo{}, xerrors.New("unsupported")
|
||||
}
|
||||
|
||||
func ConnectRemote(ctx context.Context, fa api.Common, url string) (*remote, error) {
|
||||
token, err := fa.AuthNew(ctx, []api.Permission{"admin"})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("creating auth token for remote connection: %w", err)
|
||||
}
|
||||
|
||||
headers := http.Header{}
|
||||
headers.Add("Authorization", "Bearer "+string(token))
|
||||
|
||||
wapi, closer, err := client.NewWorkerRPC(url, headers)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("creating jsonrpc client: %w", err)
|
||||
}
|
||||
|
||||
return &remote{wapi, closer}, nil
|
||||
}
|
||||
|
||||
func (r *remote) Close() error {
|
||||
r.closer()
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ Worker = &remote{}
|
Loading…
Reference in New Issue
Block a user