Merge pull request #3618 from filecoin-project/feat/async-restartable-workers
Track sealing processes across lotus-miner restarts
This commit is contained in:
commit
32ea060e99
@ -4,6 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||||
"github.com/libp2p/go-libp2p-core/network"
|
"github.com/libp2p/go-libp2p-core/network"
|
||||||
@ -58,6 +60,9 @@ type Common interface {
|
|||||||
// trigger graceful shutdown
|
// trigger graceful shutdown
|
||||||
Shutdown(context.Context) error
|
Shutdown(context.Context) error
|
||||||
|
|
||||||
|
// Session returns a random UUID of api provider session
|
||||||
|
Session(context.Context) (uuid.UUID, error)
|
||||||
|
|
||||||
Closing(context.Context) (<-chan struct{}, error)
|
Closing(context.Context) (<-chan struct{}, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -374,6 +374,8 @@ type FullNode interface {
|
|||||||
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
||||||
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
|
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
|
||||||
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
|
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
|
||||||
|
// StateMinerSectorAllocated checks if a sector is allocated
|
||||||
|
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error)
|
||||||
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
|
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
|
||||||
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
|
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
|
||||||
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
|
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
|
||||||
@ -64,8 +65,9 @@ type StorageMiner interface {
|
|||||||
|
|
||||||
// WorkerConnect tells the node to connect to workers RPC
|
// WorkerConnect tells the node to connect to workers RPC
|
||||||
WorkerConnect(context.Context, string) error
|
WorkerConnect(context.Context, string) error
|
||||||
WorkerStats(context.Context) (map[uint64]storiface.WorkerStats, error)
|
WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error)
|
||||||
WorkerJobs(context.Context) (map[uint64][]storiface.WorkerJob, error)
|
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error)
|
||||||
|
storiface.WorkerReturn
|
||||||
|
|
||||||
// SealingSchedDiag dumps internal sealing scheduler state
|
// SealingSchedDiag dumps internal sealing scheduler state
|
||||||
SealingSchedDiag(context.Context) (interface{}, error)
|
SealingSchedDiag(context.Context) (interface{}, error)
|
||||||
|
@ -2,15 +2,13 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
)
|
)
|
||||||
@ -23,18 +21,12 @@ type WorkerAPI interface {
|
|||||||
Paths(context.Context) ([]stores.StoragePath, error)
|
Paths(context.Context) ([]stores.StoragePath, error)
|
||||||
Info(context.Context) (storiface.WorkerInfo, error)
|
Info(context.Context) (storiface.WorkerInfo, error)
|
||||||
|
|
||||||
AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error)
|
storiface.WorkerCalls
|
||||||
|
|
||||||
storage.Sealer
|
// Storage / Other
|
||||||
|
Remove(ctx context.Context, sector abi.SectorID) error
|
||||||
MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error
|
|
||||||
|
|
||||||
UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
|
||||||
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error)
|
|
||||||
|
|
||||||
StorageAddLocal(ctx context.Context, path string) error
|
StorageAddLocal(ctx context.Context, path string) error
|
||||||
|
|
||||||
Fetch(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error
|
Session(context.Context) (uuid.UUID, error)
|
||||||
|
|
||||||
Closing(context.Context) (<-chan struct{}, error)
|
|
||||||
}
|
}
|
||||||
|
@ -5,8 +5,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
stnetwork "github.com/filecoin-project/go-state-types/network"
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||||
"github.com/libp2p/go-libp2p-core/network"
|
"github.com/libp2p/go-libp2p-core/network"
|
||||||
@ -25,6 +24,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/go-state-types/dline"
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
|
stnetwork "github.com/filecoin-project/go-state-types/network"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
@ -68,6 +68,7 @@ type CommonStruct struct {
|
|||||||
LogSetLevel func(context.Context, string, string) error `perm:"write"`
|
LogSetLevel func(context.Context, string, string) error `perm:"write"`
|
||||||
|
|
||||||
Shutdown func(context.Context) error `perm:"admin"`
|
Shutdown func(context.Context) error `perm:"admin"`
|
||||||
|
Session func(context.Context) (uuid.UUID, error) `perm:"read"`
|
||||||
Closing func(context.Context) (<-chan struct{}, error) `perm:"read"`
|
Closing func(context.Context) (<-chan struct{}, error) `perm:"read"`
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -190,6 +191,7 @@ type FullNodeStruct struct {
|
|||||||
StateMinerPreCommitDepositForPower func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
StateMinerPreCommitDepositForPower func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||||
StateMinerInitialPledgeCollateral func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
StateMinerInitialPledgeCollateral func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||||
StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||||
|
StateMinerSectorAllocated func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) `perm:"read"`
|
||||||
StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
|
StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
|
||||||
StateSectorGetInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
|
StateSectorGetInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
|
||||||
StateSectorExpiration func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"`
|
StateSectorExpiration func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"`
|
||||||
@ -303,24 +305,36 @@ type StorageMinerStruct struct {
|
|||||||
SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"`
|
SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"`
|
||||||
SectorMarkForUpgrade func(ctx context.Context, id abi.SectorNumber) error `perm:"admin"`
|
SectorMarkForUpgrade func(ctx context.Context, id abi.SectorNumber) error `perm:"admin"`
|
||||||
|
|
||||||
WorkerConnect func(context.Context, string) error `perm:"admin"` // TODO: worker perm
|
WorkerConnect func(context.Context, string) error `perm:"admin" retry:"true"` // TODO: worker perm
|
||||||
WorkerStats func(context.Context) (map[uint64]storiface.WorkerStats, error) `perm:"admin"`
|
WorkerStats func(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) `perm:"admin"`
|
||||||
WorkerJobs func(context.Context) (map[uint64][]storiface.WorkerJob, error) `perm:"admin"`
|
WorkerJobs func(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) `perm:"admin"`
|
||||||
|
|
||||||
|
ReturnAddPiece func(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error `perm:"admin" retry:"true"`
|
||||||
|
ReturnSealPreCommit1 func(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error `perm:"admin" retry:"true"`
|
||||||
|
ReturnSealPreCommit2 func(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error `perm:"admin" retry:"true"`
|
||||||
|
ReturnSealCommit1 func(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error `perm:"admin" retry:"true"`
|
||||||
|
ReturnSealCommit2 func(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error `perm:"admin" retry:"true"`
|
||||||
|
ReturnFinalizeSector func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||||
|
ReturnReleaseUnsealed func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||||
|
ReturnMoveStorage func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||||
|
ReturnUnsealPiece func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||||
|
ReturnReadPiece func(ctx context.Context, callID storiface.CallID, ok bool, err string) error `perm:"admin" retry:"true"`
|
||||||
|
ReturnFetch func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
|
||||||
|
|
||||||
SealingSchedDiag func(context.Context) (interface{}, error) `perm:"admin"`
|
SealingSchedDiag func(context.Context) (interface{}, error) `perm:"admin"`
|
||||||
|
|
||||||
StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"`
|
StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"`
|
||||||
StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"`
|
StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"`
|
||||||
StorageStat func(context.Context, stores.ID) (fsutil.FsStat, error) `perm:"admin"`
|
StorageStat func(context.Context, stores.ID) (fsutil.FsStat, error) `perm:"admin"`
|
||||||
StorageAttach func(context.Context, stores.StorageInfo, fsutil.FsStat) error `perm:"admin"`
|
StorageAttach func(context.Context, stores.StorageInfo, fsutil.FsStat) error `perm:"admin"`
|
||||||
StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType, bool) error `perm:"admin"`
|
StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, storiface.SectorFileType, bool) error `perm:"admin"`
|
||||||
StorageDropSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType) error `perm:"admin"`
|
StorageDropSector func(context.Context, stores.ID, abi.SectorID, storiface.SectorFileType) error `perm:"admin"`
|
||||||
StorageFindSector func(context.Context, abi.SectorID, stores.SectorFileType, abi.SectorSize, bool) ([]stores.SectorStorageInfo, error) `perm:"admin"`
|
StorageFindSector func(context.Context, abi.SectorID, storiface.SectorFileType, abi.SectorSize, bool) ([]stores.SectorStorageInfo, error) `perm:"admin"`
|
||||||
StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"`
|
StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"`
|
||||||
StorageBestAlloc func(ctx context.Context, allocate stores.SectorFileType, ssize abi.SectorSize, sealing stores.PathType) ([]stores.StorageInfo, error) `perm:"admin"`
|
StorageBestAlloc func(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, sealing storiface.PathType) ([]stores.StorageInfo, error) `perm:"admin"`
|
||||||
StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"`
|
StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"`
|
||||||
StorageLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error `perm:"admin"`
|
StorageLock func(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error `perm:"admin"`
|
||||||
StorageTryLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) `perm:"admin"`
|
StorageTryLock func(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) `perm:"admin"`
|
||||||
|
|
||||||
DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
|
DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
|
||||||
DealsList func(ctx context.Context) ([]api.MarketDeal, error) `perm:"read"`
|
DealsList func(ctx context.Context) ([]api.MarketDeal, error) `perm:"read"`
|
||||||
@ -356,23 +370,22 @@ type WorkerStruct struct {
|
|||||||
Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"`
|
Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"`
|
||||||
Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
|
Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) `perm:"admin"`
|
AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"`
|
||||||
SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) `perm:"admin"`
|
SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"`
|
||||||
SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"`
|
SealPreCommit2 func(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||||
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"`
|
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
|
||||||
SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"`
|
SealCommit2 func(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
|
||||||
FinalizeSector func(context.Context, abi.SectorID, []storage.Range) error `perm:"admin"`
|
FinalizeSector func(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||||
ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error `perm:"admin"`
|
ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||||
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
|
MoveStorage func(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
||||||
MoveStorage func(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error `perm:"admin"`
|
UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||||
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
|
ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"`
|
||||||
|
Fetch func(context.Context, abi.SectorID, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
|
||||||
|
|
||||||
UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error `perm:"admin"`
|
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
|
||||||
ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error) `perm:"admin"`
|
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
|
||||||
|
|
||||||
Fetch func(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error `perm:"admin"`
|
Session func(context.Context) (uuid.UUID, error) `perm:"admin"`
|
||||||
|
|
||||||
Closing func(context.Context) (<-chan struct{}, error) `perm:"admin"`
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -499,6 +512,10 @@ func (c *CommonStruct) Shutdown(ctx context.Context) error {
|
|||||||
return c.Internal.Shutdown(ctx)
|
return c.Internal.Shutdown(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *CommonStruct) Session(ctx context.Context) (uuid.UUID, error) {
|
||||||
|
return c.Internal.Session(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *CommonStruct) Closing(ctx context.Context) (<-chan struct{}, error) {
|
func (c *CommonStruct) Closing(ctx context.Context) (<-chan struct{}, error) {
|
||||||
return c.Internal.Closing(ctx)
|
return c.Internal.Closing(ctx)
|
||||||
}
|
}
|
||||||
@ -905,6 +922,10 @@ func (c *FullNodeStruct) StateMinerAvailableBalance(ctx context.Context, maddr a
|
|||||||
return c.Internal.StateMinerAvailableBalance(ctx, maddr, tsk)
|
return c.Internal.StateMinerAvailableBalance(ctx, maddr, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) {
|
||||||
|
return c.Internal.StateMinerSectorAllocated(ctx, maddr, s, tsk)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
|
func (c *FullNodeStruct) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
|
||||||
return c.Internal.StateSectorPreCommitInfo(ctx, maddr, n, tsk)
|
return c.Internal.StateSectorPreCommitInfo(ctx, maddr, n, tsk)
|
||||||
}
|
}
|
||||||
@ -1225,14 +1246,58 @@ func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) erro
|
|||||||
return c.Internal.WorkerConnect(ctx, url)
|
return c.Internal.WorkerConnect(ctx, url)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (map[uint64]storiface.WorkerStats, error) {
|
func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (map[uuid.UUID]storiface.WorkerStats, error) {
|
||||||
return c.Internal.WorkerStats(ctx)
|
return c.Internal.WorkerStats(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) WorkerJobs(ctx context.Context) (map[uint64][]storiface.WorkerJob, error) {
|
func (c *StorageMinerStruct) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) {
|
||||||
return c.Internal.WorkerJobs(ctx)
|
return c.Internal.WorkerJobs(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error {
|
||||||
|
return c.Internal.ReturnAddPiece(ctx, callID, pi, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error {
|
||||||
|
return c.Internal.ReturnSealPreCommit1(ctx, callID, p1o, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error {
|
||||||
|
return c.Internal.ReturnSealPreCommit2(ctx, callID, sealed, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error {
|
||||||
|
return c.Internal.ReturnSealCommit1(ctx, callID, out, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error {
|
||||||
|
return c.Internal.ReturnSealCommit2(ctx, callID, proof, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
return c.Internal.ReturnFinalizeSector(ctx, callID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
return c.Internal.ReturnReleaseUnsealed(ctx, callID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
return c.Internal.ReturnMoveStorage(ctx, callID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
return c.Internal.ReturnUnsealPiece(ctx, callID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error {
|
||||||
|
return c.Internal.ReturnReadPiece(ctx, callID, ok, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *StorageMinerStruct) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
return c.Internal.ReturnFetch(ctx, callID, err)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) SealingSchedDiag(ctx context.Context) (interface{}, error) {
|
func (c *StorageMinerStruct) SealingSchedDiag(ctx context.Context) (interface{}, error) {
|
||||||
return c.Internal.SealingSchedDiag(ctx)
|
return c.Internal.SealingSchedDiag(ctx)
|
||||||
}
|
}
|
||||||
@ -1241,15 +1306,15 @@ func (c *StorageMinerStruct) StorageAttach(ctx context.Context, si stores.Storag
|
|||||||
return c.Internal.StorageAttach(ctx, si, st)
|
return c.Internal.StorageAttach(ctx, si, st)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType, primary bool) error {
|
func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error {
|
||||||
return c.Internal.StorageDeclareSector(ctx, storageId, s, ft, primary)
|
return c.Internal.StorageDeclareSector(ctx, storageId, s, ft, primary)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType) error {
|
func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft storiface.SectorFileType) error {
|
||||||
return c.Internal.StorageDropSector(ctx, storageId, s, ft)
|
return c.Internal.StorageDropSector(ctx, storageId, s, ft)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types stores.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) {
|
func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) {
|
||||||
return c.Internal.StorageFindSector(ctx, si, types, ssize, allowFetch)
|
return c.Internal.StorageFindSector(ctx, si, types, ssize, allowFetch)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1269,7 +1334,7 @@ func (c *StorageMinerStruct) StorageInfo(ctx context.Context, id stores.ID) (sto
|
|||||||
return c.Internal.StorageInfo(ctx, id)
|
return c.Internal.StorageInfo(ctx, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate stores.SectorFileType, ssize abi.SectorSize, pt stores.PathType) ([]stores.StorageInfo, error) {
|
func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pt storiface.PathType) ([]stores.StorageInfo, error) {
|
||||||
return c.Internal.StorageBestAlloc(ctx, allocate, ssize, pt)
|
return c.Internal.StorageBestAlloc(ctx, allocate, ssize, pt)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1277,11 +1342,11 @@ func (c *StorageMinerStruct) StorageReportHealth(ctx context.Context, id stores.
|
|||||||
return c.Internal.StorageReportHealth(ctx, id, report)
|
return c.Internal.StorageReportHealth(ctx, id, report)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) StorageLock(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error {
|
func (c *StorageMinerStruct) StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error {
|
||||||
return c.Internal.StorageLock(ctx, sector, read, write)
|
return c.Internal.StorageLock(ctx, sector, read, write)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *StorageMinerStruct) StorageTryLock(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) {
|
func (c *StorageMinerStruct) StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) {
|
||||||
return c.Internal.StorageTryLock(ctx, sector, read, write)
|
return c.Internal.StorageTryLock(ctx, sector, read, write)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1427,60 +1492,60 @@ func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) {
|
|||||||
return w.Internal.Info(ctx)
|
return w.Internal.Info(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||||
return w.Internal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
|
return w.Internal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||||
return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces)
|
return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, p1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||||
return w.Internal.SealPreCommit2(ctx, sector, p1o)
|
return w.Internal.SealPreCommit2(ctx, sector, pc1o)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||||
return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
|
func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||||
return w.Internal.SealCommit2(ctx, sector, c1o)
|
return w.Internal.SealCommit2(ctx, sector, c1o)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||||
return w.Internal.FinalizeSector(ctx, sector, keepUnsealed)
|
return w.Internal.FinalizeSector(ctx, sector, keepUnsealed)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||||
return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree)
|
return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||||
|
return w.Internal.MoveStorage(ctx, sector, types)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) {
|
||||||
|
return w.Internal.UnsealPiece(ctx, sector, offset, size, ticket, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||||
|
return w.Internal.ReadPiece(ctx, sink, sector, offset, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||||
|
return w.Internal.Fetch(ctx, id, fileType, ptype, am)
|
||||||
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error {
|
func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||||
return w.Internal.Remove(ctx, sector)
|
return w.Internal.Remove(ctx, sector)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error {
|
|
||||||
return w.Internal.MoveStorage(ctx, sector, types)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WorkerStruct) StorageAddLocal(ctx context.Context, path string) error {
|
func (w *WorkerStruct) StorageAddLocal(ctx context.Context, path string) error {
|
||||||
return w.Internal.StorageAddLocal(ctx, path)
|
return w.Internal.StorageAddLocal(ctx, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error {
|
func (w *WorkerStruct) Session(ctx context.Context) (uuid.UUID, error) {
|
||||||
return w.Internal.UnsealPiece(ctx, id, index, size, randomness, c)
|
return w.Internal.Session(ctx)
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WorkerStruct) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
|
||||||
return w.Internal.ReadPiece(ctx, writer, id, index, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
|
|
||||||
return w.Internal.Fetch(ctx, id, fileType, ptype, am)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WorkerStruct) Closing(ctx context.Context) (<-chan struct{}, error) {
|
|
||||||
return w.Internal.Closing(ctx)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g GatewayStruct) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) {
|
func (g GatewayStruct) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) {
|
||||||
|
@ -84,8 +84,8 @@ func VersionForType(nodeType NodeType) (Version, error) {
|
|||||||
// semver versions of the rpc api exposed
|
// semver versions of the rpc api exposed
|
||||||
var (
|
var (
|
||||||
FullAPIVersion = newVer(0, 17, 0)
|
FullAPIVersion = newVer(0, 17, 0)
|
||||||
MinerAPIVersion = newVer(0, 16, 0)
|
MinerAPIVersion = newVer(0, 17, 0)
|
||||||
WorkerAPIVersion = newVer(0, 15, 0)
|
WorkerAPIVersion = newVer(0, 16, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint:varcheck,deadcode
|
//nolint:varcheck,deadcode
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
tbig "github.com/filecoin-project/go-state-types/big"
|
tbig "github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
36
cli/cmd.go
36
cli/cmd.go
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"strings"
|
"strings"
|
||||||
@ -206,7 +207,22 @@ func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error
|
|||||||
return client.NewFullNodeRPC(ctx.Context, addr, headers)
|
return client.NewFullNodeRPC(ctx.Context, addr, headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetStorageMinerAPI(ctx *cli.Context, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
|
type GetStorageMinerOptions struct {
|
||||||
|
PreferHttp bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetStorageMinerOption func(*GetStorageMinerOptions)
|
||||||
|
|
||||||
|
func StorageMinerUseHttp(opts *GetStorageMinerOptions) {
|
||||||
|
opts.PreferHttp = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetStorageMinerAPI(ctx *cli.Context, opts ...GetStorageMinerOption) (api.StorageMiner, jsonrpc.ClientCloser, error) {
|
||||||
|
var options GetStorageMinerOptions
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&options)
|
||||||
|
}
|
||||||
|
|
||||||
if tn, ok := ctx.App.Metadata["testnode-storage"]; ok {
|
if tn, ok := ctx.App.Metadata["testnode-storage"]; ok {
|
||||||
return tn.(api.StorageMiner), func() {}, nil
|
return tn.(api.StorageMiner), func() {}, nil
|
||||||
}
|
}
|
||||||
@ -216,7 +232,23 @@ func GetStorageMinerAPI(ctx *cli.Context, opts ...jsonrpc.Option) (api.StorageMi
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return client.NewStorageMinerRPC(ctx.Context, addr, headers, opts...)
|
if options.PreferHttp {
|
||||||
|
u, err := url.Parse(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch u.Scheme {
|
||||||
|
case "ws":
|
||||||
|
u.Scheme = "http"
|
||||||
|
case "wss":
|
||||||
|
u.Scheme = "https"
|
||||||
|
}
|
||||||
|
|
||||||
|
addr = u.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.NewStorageMinerRPC(ctx.Context, addr, headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetWorkerAPI(ctx *cli.Context) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
|
func GetWorkerAPI(ctx *cli.Context) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
lapi "github.com/filecoin-project/lotus/api"
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
@ -614,7 +614,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
|||||||
if !skipunseal {
|
if !skipunseal {
|
||||||
log.Infof("[%d] Unsealing sector", i)
|
log.Infof("[%d] Unsealing sector", i)
|
||||||
{
|
{
|
||||||
p, done, err := sbfs.AcquireSector(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, stores.FTUnsealed, stores.FTNone, stores.PathSealing)
|
p, done, err := sbfs.AcquireSector(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire unsealed sector for removing: %w", err)
|
return xerrors.Errorf("acquire unsealed sector for removing: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -10,11 +10,11 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/ipfs/go-datastore/namespace"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
manet "github.com/multiformats/go-multiaddr/net"
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@ -25,6 +25,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-jsonrpc"
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
paramfetch "github.com/filecoin-project/go-paramfetch"
|
paramfetch "github.com/filecoin-project/go-paramfetch"
|
||||||
|
"github.com/filecoin-project/go-statestore"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/apistruct"
|
"github.com/filecoin-project/lotus/api/apistruct"
|
||||||
@ -37,6 +38,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||||
"github.com/filecoin-project/lotus/metrics"
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
|
"github.com/filecoin-project/lotus/node/modules"
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -173,15 +175,18 @@ var runCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Connect to storage-miner
|
// Connect to storage-miner
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
var nodeApi api.StorageMiner
|
var nodeApi api.StorageMiner
|
||||||
var closer func()
|
var closer func()
|
||||||
var err error
|
var err error
|
||||||
for {
|
for {
|
||||||
nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx,
|
nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx, lcli.StorageMinerUseHttp)
|
||||||
jsonrpc.WithNoReconnect(),
|
|
||||||
jsonrpc.WithTimeout(30*time.Second))
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
_, err = nodeApi.Version(ctx)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
fmt.Printf("\r\x1b[0KConnecting to miner API... (%s)", err)
|
fmt.Printf("\r\x1b[0KConnecting to miner API... (%s)", err)
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
@ -189,7 +194,6 @@ var runCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer closer()
|
defer closer()
|
||||||
ctx := lcli.ReqContext(cctx)
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -209,8 +213,6 @@ var runCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
log.Infof("Remote version %s", v)
|
log.Infof("Remote version %s", v)
|
||||||
|
|
||||||
watchMinerConn(ctx, cctx, nodeApi)
|
|
||||||
|
|
||||||
// Check params
|
// Check params
|
||||||
|
|
||||||
act, err := nodeApi.ActorAddress(ctx)
|
act, err := nodeApi.ActorAddress(ctx)
|
||||||
@ -318,6 +320,15 @@ var runCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := lr.Close(); err != nil {
|
||||||
|
log.Error("closing repo", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
ds, err := lr.Datastore("/metadata")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
log.Info("Opening local storage; connecting to master")
|
log.Info("Opening local storage; connecting to master")
|
||||||
const unspecifiedAddress = "0.0.0.0"
|
const unspecifiedAddress = "0.0.0.0"
|
||||||
@ -357,12 +368,14 @@ var runCmd = &cli.Command{
|
|||||||
|
|
||||||
// Create / expose the worker
|
// Create / expose the worker
|
||||||
|
|
||||||
|
wsts := statestore.New(namespace.Wrap(ds, modules.WorkerCallsPrefix))
|
||||||
|
|
||||||
workerApi := &worker{
|
workerApi := &worker{
|
||||||
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
||||||
SealProof: spt,
|
SealProof: spt,
|
||||||
TaskTypes: taskTypes,
|
TaskTypes: taskTypes,
|
||||||
NoSwap: cctx.Bool("no-swap"),
|
NoSwap: cctx.Bool("no-swap"),
|
||||||
}, remote, localStore, nodeApi),
|
}, remote, localStore, nodeApi, nodeApi, wsts),
|
||||||
localStore: localStore,
|
localStore: localStore,
|
||||||
ls: lr,
|
ls: lr,
|
||||||
}
|
}
|
||||||
@ -433,13 +446,72 @@ var runCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Waiting for tasks")
|
minerSession, err := nodeApi.Session(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting miner session: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := nodeApi.WorkerConnect(ctx, "ws://"+address+"/rpc/v0"); err != nil {
|
heartbeats := time.NewTicker(stores.HeartbeatInterval)
|
||||||
log.Errorf("Registering worker failed: %+v", err)
|
defer heartbeats.Stop()
|
||||||
cancel()
|
|
||||||
return
|
var connected, reconnect bool
|
||||||
|
for {
|
||||||
|
// If we're reconnecting, redeclare storage first
|
||||||
|
if reconnect {
|
||||||
|
log.Info("Redeclaring local storage")
|
||||||
|
|
||||||
|
if err := localStore.Redeclare(ctx); err != nil {
|
||||||
|
log.Errorf("Redeclaring local storage failed: %+v", err)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return // graceful shutdown
|
||||||
|
case <-heartbeats.C:
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
connected = false
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Making sure no local tasks are running")
|
||||||
|
|
||||||
|
// TODO: we could get rid of this, but that requires tracking resources for restarted tasks correctly
|
||||||
|
workerApi.LocalWorker.WaitQuiet()
|
||||||
|
|
||||||
|
for {
|
||||||
|
curSession, err := nodeApi.Session(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("heartbeat: checking remote session failed: %+v", err)
|
||||||
|
} else {
|
||||||
|
if curSession != minerSession {
|
||||||
|
minerSession = curSession
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if !connected {
|
||||||
|
if err := nodeApi.WorkerConnect(ctx, "http://"+address+"/rpc/v0"); err != nil {
|
||||||
|
log.Errorf("Registering worker failed: %+v", err)
|
||||||
|
cancel()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Worker registered successfully, waiting for tasks")
|
||||||
|
connected = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return // graceful shutdown
|
||||||
|
case <-heartbeats.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Errorf("LOTUS-MINER CONNECTION LOST")
|
||||||
|
|
||||||
|
reconnect = true
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -447,55 +519,6 @@ var runCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func watchMinerConn(ctx context.Context, cctx *cli.Context, nodeApi api.StorageMiner) {
|
|
||||||
go func() {
|
|
||||||
closing, err := nodeApi.Closing(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to get remote closing channel: %+v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-closing:
|
|
||||||
case <-ctx.Done():
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return // graceful shutdown
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Warnf("Connection with miner node lost, restarting")
|
|
||||||
|
|
||||||
exe, err := os.Executable()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("getting executable for auto-restart: %+v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = log.Sync()
|
|
||||||
|
|
||||||
// TODO: there are probably cleaner/more graceful ways to restart,
|
|
||||||
// but this is good enough for now (FSM can recover from the mess this creates)
|
|
||||||
//nolint:gosec
|
|
||||||
if err := syscall.Exec(exe, []string{exe,
|
|
||||||
fmt.Sprintf("--worker-repo=%s", cctx.String("worker-repo")),
|
|
||||||
fmt.Sprintf("--miner-repo=%s", cctx.String("miner-repo")),
|
|
||||||
fmt.Sprintf("--enable-gpu-proving=%t", cctx.Bool("enable-gpu-proving")),
|
|
||||||
"run",
|
|
||||||
fmt.Sprintf("--listen=%s", cctx.String("listen")),
|
|
||||||
fmt.Sprintf("--no-local-storage=%t", cctx.Bool("no-local-storage")),
|
|
||||||
fmt.Sprintf("--no-swap=%t", cctx.Bool("no-swap")),
|
|
||||||
fmt.Sprintf("--addpiece=%t", cctx.Bool("addpiece")),
|
|
||||||
fmt.Sprintf("--precommit1=%t", cctx.Bool("precommit1")),
|
|
||||||
fmt.Sprintf("--unseal=%t", cctx.Bool("unseal")),
|
|
||||||
fmt.Sprintf("--precommit2=%t", cctx.Bool("precommit2")),
|
|
||||||
fmt.Sprintf("--commit=%t", cctx.Bool("commit")),
|
|
||||||
fmt.Sprintf("--parallel-fetch-limit=%d", cctx.Int("parallel-fetch-limit")),
|
|
||||||
fmt.Sprintf("--timeout=%s", cctx.String("timeout")),
|
|
||||||
}, os.Environ()); err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractRoutableIP(timeout time.Duration) (string, error) {
|
func extractRoutableIP(timeout time.Duration) (string, error) {
|
||||||
minerMultiAddrKey := "MINER_API_INFO"
|
minerMultiAddrKey := "MINER_API_INFO"
|
||||||
deprecatedMinerMultiAddrKey := "STORAGE_API_INFO"
|
deprecatedMinerMultiAddrKey := "STORAGE_API_INFO"
|
||||||
|
@ -6,11 +6,10 @@ import (
|
|||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
type worker struct {
|
type worker struct {
|
||||||
@ -43,4 +42,4 @@ func (w *worker) StorageAddLocal(ctx context.Context, path string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ storage.Sealer = &worker{}
|
var _ storiface.WorkerCalls = &worker{}
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/genesis"
|
"github.com/filecoin-project/lotus/genesis"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -187,7 +188,7 @@ func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.Sector
|
|||||||
}
|
}
|
||||||
|
|
||||||
func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize) (*genesis.PreSeal, error) {
|
func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize) (*genesis.PreSeal, error) {
|
||||||
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, stores.FTSealed|stores.FTCache, stores.PathSealing)
|
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("acquire unsealed sector: %w", err)
|
return nil, xerrors.Errorf("acquire unsealed sector: %w", err)
|
||||||
}
|
}
|
||||||
@ -211,7 +212,7 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func cleanupUnsealed(sbfs *basicfs.Provider, sid abi.SectorID) error {
|
func cleanupUnsealed(sbfs *basicfs.Provider, sid abi.SectorID) error {
|
||||||
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, stores.FTUnsealed, stores.FTNone, stores.PathSealing)
|
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -137,7 +137,7 @@ var infoAllCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := sectorsStatusCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil {
|
if err := sectorsStatusCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil {
|
||||||
return err
|
fmt.Println("ERROR: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\n##: Sector %d Storage Location\n", s)
|
fmt.Printf("\n##: Sector %d Storage Location\n", s)
|
||||||
@ -148,7 +148,7 @@ var infoAllCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := storageFindCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil {
|
if err := storageFindCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil {
|
||||||
return err
|
fmt.Println("ERROR: ", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,11 +12,10 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
|
||||||
|
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
|
"github.com/ipfs/go-datastore/namespace"
|
||||||
"github.com/libp2p/go-libp2p-core/crypto"
|
"github.com/libp2p/go-libp2p-core/crypto"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
@ -27,6 +26,8 @@ import (
|
|||||||
cborutil "github.com/filecoin-project/go-cbor-util"
|
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||||
paramfetch "github.com/filecoin-project/go-paramfetch"
|
paramfetch "github.com/filecoin-project/go-paramfetch"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-statestore"
|
||||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
@ -447,6 +448,9 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix))
|
||||||
|
smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix))
|
||||||
|
|
||||||
smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), &ffiwrapper.Config{
|
smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), &ffiwrapper.Config{
|
||||||
SealProofType: spt,
|
SealProofType: spt,
|
||||||
}, sectorstorage.SealerConfig{
|
}, sectorstorage.SealerConfig{
|
||||||
@ -456,7 +460,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
|||||||
AllowPreCommit2: true,
|
AllowPreCommit2: true,
|
||||||
AllowCommit: true,
|
AllowCommit: true,
|
||||||
AllowUnseal: true,
|
AllowUnseal: true,
|
||||||
}, nil, sa)
|
}, nil, sa, wsts, smsts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
@ -9,10 +10,10 @@ import (
|
|||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
|
||||||
@ -53,7 +54,7 @@ var sealingWorkersCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
type sortableStat struct {
|
type sortableStat struct {
|
||||||
id uint64
|
id uuid.UUID
|
||||||
storiface.WorkerStats
|
storiface.WorkerStats
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,7 +64,7 @@ var sealingWorkersCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(st, func(i, j int) bool {
|
sort.Slice(st, func(i, j int) bool {
|
||||||
return st[i].id < st[j].id
|
return st[i].id.String() < st[j].id.String()
|
||||||
})
|
})
|
||||||
|
|
||||||
for _, stat := range st {
|
for _, stat := range st {
|
||||||
@ -74,7 +75,12 @@ var sealingWorkersCmd = &cli.Command{
|
|||||||
gpuUse = ""
|
gpuUse = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Worker %d, host %s\n", stat.id, color.MagentaString(stat.Info.Hostname))
|
var disabled string
|
||||||
|
if !stat.Enabled {
|
||||||
|
disabled = color.RedString(" (disabled)")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Worker %s, host %s%s\n", stat.id, color.MagentaString(stat.Info.Hostname), disabled)
|
||||||
|
|
||||||
var barCols = uint64(64)
|
var barCols = uint64(64)
|
||||||
cpuBars := int(stat.CpuUse * barCols / stat.Info.Resources.CPUs)
|
cpuBars := int(stat.CpuUse * barCols / stat.Info.Resources.CPUs)
|
||||||
@ -140,7 +146,7 @@ var sealingJobsCmd = &cli.Command{
|
|||||||
|
|
||||||
type line struct {
|
type line struct {
|
||||||
storiface.WorkerJob
|
storiface.WorkerJob
|
||||||
wid uint64
|
wid uuid.UUID
|
||||||
}
|
}
|
||||||
|
|
||||||
lines := make([]line, 0)
|
lines := make([]line, 0)
|
||||||
@ -159,10 +165,13 @@ var sealingJobsCmd = &cli.Command{
|
|||||||
if lines[i].RunWait != lines[j].RunWait {
|
if lines[i].RunWait != lines[j].RunWait {
|
||||||
return lines[i].RunWait < lines[j].RunWait
|
return lines[i].RunWait < lines[j].RunWait
|
||||||
}
|
}
|
||||||
|
if lines[i].Start.Equal(lines[j].Start) {
|
||||||
|
return lines[i].ID.ID.String() < lines[j].ID.ID.String()
|
||||||
|
}
|
||||||
return lines[i].Start.Before(lines[j].Start)
|
return lines[i].Start.Before(lines[j].Start)
|
||||||
})
|
})
|
||||||
|
|
||||||
workerHostnames := map[uint64]string{}
|
workerHostnames := map[uuid.UUID]string{}
|
||||||
|
|
||||||
wst, err := nodeApi.WorkerStats(ctx)
|
wst, err := nodeApi.WorkerStats(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -178,10 +187,25 @@ var sealingJobsCmd = &cli.Command{
|
|||||||
|
|
||||||
for _, l := range lines {
|
for _, l := range lines {
|
||||||
state := "running"
|
state := "running"
|
||||||
if l.RunWait != 0 {
|
if l.RunWait > 0 {
|
||||||
state = fmt.Sprintf("assigned(%d)", l.RunWait-1)
|
state = fmt.Sprintf("assigned(%d)", l.RunWait-1)
|
||||||
}
|
}
|
||||||
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\t%s\t%s\t%s\n", l.ID, l.Sector.Number, l.wid, workerHostnames[l.wid], l.Task.Short(), state, time.Now().Sub(l.Start).Truncate(time.Millisecond*100))
|
if l.RunWait == -1 {
|
||||||
|
state = "ret-wait"
|
||||||
|
}
|
||||||
|
dur := "n/a"
|
||||||
|
if !l.Start.IsZero() {
|
||||||
|
dur = time.Now().Sub(l.Start).Truncate(time.Millisecond * 100).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _ = fmt.Fprintf(tw, "%s\t%d\t%s\t%s\t%s\t%s\t%s\n",
|
||||||
|
hex.EncodeToString(l.ID.ID[10:]),
|
||||||
|
l.Sector.Number,
|
||||||
|
hex.EncodeToString(l.wid[5:]),
|
||||||
|
workerHostnames[l.wid],
|
||||||
|
l.Task.Short(),
|
||||||
|
state,
|
||||||
|
dur)
|
||||||
}
|
}
|
||||||
|
|
||||||
return tw.Flush()
|
return tw.Flush()
|
||||||
|
@ -19,11 +19,12 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
const metaFile = "sectorstore.json"
|
const metaFile = "sectorstore.json"
|
||||||
@ -317,17 +318,17 @@ var storageFindCmd = &cli.Command{
|
|||||||
Number: abi.SectorNumber(snum),
|
Number: abi.SectorNumber(snum),
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err := nodeApi.StorageFindSector(ctx, sid, stores.FTUnsealed, 0, false)
|
u, err := nodeApi.StorageFindSector(ctx, sid, storiface.FTUnsealed, 0, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("finding unsealed: %w", err)
|
return xerrors.Errorf("finding unsealed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s, err := nodeApi.StorageFindSector(ctx, sid, stores.FTSealed, 0, false)
|
s, err := nodeApi.StorageFindSector(ctx, sid, storiface.FTSealed, 0, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("finding sealed: %w", err)
|
return xerrors.Errorf("finding sealed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := nodeApi.StorageFindSector(ctx, sid, stores.FTCache, 0, false)
|
c, err := nodeApi.StorageFindSector(ctx, sid, storiface.FTCache, 0, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("finding cache: %w", err)
|
return xerrors.Errorf("finding cache: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
# Groups
|
# Groups
|
||||||
* [](#)
|
* [](#)
|
||||||
* [Closing](#Closing)
|
* [Closing](#Closing)
|
||||||
|
* [Session](#Session)
|
||||||
* [Shutdown](#Shutdown)
|
* [Shutdown](#Shutdown)
|
||||||
* [Version](#Version)
|
* [Version](#Version)
|
||||||
* [Auth](#Auth)
|
* [Auth](#Auth)
|
||||||
@ -160,6 +161,7 @@
|
|||||||
* [StateMinerPreCommitDepositForPower](#StateMinerPreCommitDepositForPower)
|
* [StateMinerPreCommitDepositForPower](#StateMinerPreCommitDepositForPower)
|
||||||
* [StateMinerProvingDeadline](#StateMinerProvingDeadline)
|
* [StateMinerProvingDeadline](#StateMinerProvingDeadline)
|
||||||
* [StateMinerRecoveries](#StateMinerRecoveries)
|
* [StateMinerRecoveries](#StateMinerRecoveries)
|
||||||
|
* [StateMinerSectorAllocated](#StateMinerSectorAllocated)
|
||||||
* [StateMinerSectorCount](#StateMinerSectorCount)
|
* [StateMinerSectorCount](#StateMinerSectorCount)
|
||||||
* [StateMinerSectors](#StateMinerSectors)
|
* [StateMinerSectors](#StateMinerSectors)
|
||||||
* [StateNetworkName](#StateNetworkName)
|
* [StateNetworkName](#StateNetworkName)
|
||||||
@ -213,6 +215,15 @@ Inputs: `null`
|
|||||||
|
|
||||||
Response: `{}`
|
Response: `{}`
|
||||||
|
|
||||||
|
### Session
|
||||||
|
|
||||||
|
|
||||||
|
Perms: read
|
||||||
|
|
||||||
|
Inputs: `null`
|
||||||
|
|
||||||
|
Response: `"07070707-0707-0707-0707-070707070707"`
|
||||||
|
|
||||||
### Shutdown
|
### Shutdown
|
||||||
|
|
||||||
|
|
||||||
@ -4030,6 +4041,30 @@ Response:
|
|||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### StateMinerSectorAllocated
|
||||||
|
StateMinerSectorAllocated checks if a sector is allocated
|
||||||
|
|
||||||
|
|
||||||
|
Perms: read
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"f01234",
|
||||||
|
9,
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response: `true`
|
||||||
|
|
||||||
### StateMinerSectorCount
|
### StateMinerSectorCount
|
||||||
StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
|
StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
|
||||||
|
|
||||||
|
492
extern/sector-storage/cbor_gen.go
vendored
Normal file
492
extern/sector-storage/cbor_gen.go
vendored
Normal file
@ -0,0 +1,492 @@
|
|||||||
|
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
sealtasks "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
xerrors "golang.org/x/xerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = xerrors.Errorf
|
||||||
|
|
||||||
|
func (t *Call) MarshalCBOR(w io.Writer) error {
|
||||||
|
if t == nil {
|
||||||
|
_, err := w.Write(cbg.CborNull)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.Write([]byte{164}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scratch := make([]byte, 9)
|
||||||
|
|
||||||
|
// t.ID (storiface.CallID) (struct)
|
||||||
|
if len("ID") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"ID\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("ID")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.ID.MarshalCBOR(w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.RetType (sectorstorage.ReturnType) (string)
|
||||||
|
if len("RetType") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"RetType\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("RetType"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("RetType")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(t.RetType) > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field t.RetType was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.RetType))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string(t.RetType)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.State (sectorstorage.CallState) (uint64)
|
||||||
|
if len("State") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"State\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("State"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("State")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.State)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.Result (sectorstorage.ManyBytes) (struct)
|
||||||
|
if len("Result") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"Result\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Result"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("Result")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.Result.MarshalCBOR(w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Call) UnmarshalCBOR(r io.Reader) error {
|
||||||
|
*t = Call{}
|
||||||
|
|
||||||
|
br := cbg.GetPeeker(r)
|
||||||
|
scratch := make([]byte, 8)
|
||||||
|
|
||||||
|
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajMap {
|
||||||
|
return fmt.Errorf("cbor input should be of type map")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > cbg.MaxLength {
|
||||||
|
return fmt.Errorf("Call: map struct too large (%d)", extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
var name string
|
||||||
|
n := extra
|
||||||
|
|
||||||
|
for i := uint64(0); i < n; i++ {
|
||||||
|
|
||||||
|
{
|
||||||
|
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
name = string(sval)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch name {
|
||||||
|
// t.ID (storiface.CallID) (struct)
|
||||||
|
case "ID":
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
if err := t.ID.UnmarshalCBOR(br); err != nil {
|
||||||
|
return xerrors.Errorf("unmarshaling t.ID: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.RetType (sectorstorage.ReturnType) (string)
|
||||||
|
case "RetType":
|
||||||
|
|
||||||
|
{
|
||||||
|
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.RetType = ReturnType(sval)
|
||||||
|
}
|
||||||
|
// t.State (sectorstorage.CallState) (uint64)
|
||||||
|
case "State":
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajUnsignedInt {
|
||||||
|
return fmt.Errorf("wrong type for uint64 field")
|
||||||
|
}
|
||||||
|
t.State = CallState(extra)
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.Result (sectorstorage.ManyBytes) (struct)
|
||||||
|
case "Result":
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
b, err := br.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b != cbg.CborNull[0] {
|
||||||
|
if err := br.UnreadByte(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Result = new(ManyBytes)
|
||||||
|
if err := t.Result.UnmarshalCBOR(br); err != nil {
|
||||||
|
return xerrors.Errorf("unmarshaling t.Result pointer: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *WorkState) MarshalCBOR(w io.Writer) error {
|
||||||
|
if t == nil {
|
||||||
|
_, err := w.Write(cbg.CborNull)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.Write([]byte{164}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scratch := make([]byte, 9)
|
||||||
|
|
||||||
|
// t.ID (sectorstorage.WorkID) (struct)
|
||||||
|
if len("ID") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"ID\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("ID")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.ID.MarshalCBOR(w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.Status (sectorstorage.WorkStatus) (string)
|
||||||
|
if len("Status") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"Status\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Status"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("Status")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(t.Status) > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field t.Status was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Status))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string(t.Status)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.WorkerCall (storiface.CallID) (struct)
|
||||||
|
if len("WorkerCall") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"WorkerCall\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WorkerCall"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("WorkerCall")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.WorkerCall.MarshalCBOR(w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.WorkError (string) (string)
|
||||||
|
if len("WorkError") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"WorkError\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WorkError"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("WorkError")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(t.WorkError) > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field t.WorkError was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.WorkError))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string(t.WorkError)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *WorkState) UnmarshalCBOR(r io.Reader) error {
|
||||||
|
*t = WorkState{}
|
||||||
|
|
||||||
|
br := cbg.GetPeeker(r)
|
||||||
|
scratch := make([]byte, 8)
|
||||||
|
|
||||||
|
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajMap {
|
||||||
|
return fmt.Errorf("cbor input should be of type map")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > cbg.MaxLength {
|
||||||
|
return fmt.Errorf("WorkState: map struct too large (%d)", extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
var name string
|
||||||
|
n := extra
|
||||||
|
|
||||||
|
for i := uint64(0); i < n; i++ {
|
||||||
|
|
||||||
|
{
|
||||||
|
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
name = string(sval)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch name {
|
||||||
|
// t.ID (sectorstorage.WorkID) (struct)
|
||||||
|
case "ID":
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
if err := t.ID.UnmarshalCBOR(br); err != nil {
|
||||||
|
return xerrors.Errorf("unmarshaling t.ID: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.Status (sectorstorage.WorkStatus) (string)
|
||||||
|
case "Status":
|
||||||
|
|
||||||
|
{
|
||||||
|
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Status = WorkStatus(sval)
|
||||||
|
}
|
||||||
|
// t.WorkerCall (storiface.CallID) (struct)
|
||||||
|
case "WorkerCall":
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
if err := t.WorkerCall.UnmarshalCBOR(br); err != nil {
|
||||||
|
return xerrors.Errorf("unmarshaling t.WorkerCall: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.WorkError (string) (string)
|
||||||
|
case "WorkError":
|
||||||
|
|
||||||
|
{
|
||||||
|
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.WorkError = string(sval)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *WorkID) MarshalCBOR(w io.Writer) error {
|
||||||
|
if t == nil {
|
||||||
|
_, err := w.Write(cbg.CborNull)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.Write([]byte{162}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scratch := make([]byte, 9)
|
||||||
|
|
||||||
|
// t.Method (sealtasks.TaskType) (string)
|
||||||
|
if len("Method") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"Method\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Method"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("Method")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(t.Method) > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field t.Method was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Method))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string(t.Method)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.Params (string) (string)
|
||||||
|
if len("Params") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"Params\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Params"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("Params")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(t.Params) > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field t.Params was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Params))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string(t.Params)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *WorkID) UnmarshalCBOR(r io.Reader) error {
|
||||||
|
*t = WorkID{}
|
||||||
|
|
||||||
|
br := cbg.GetPeeker(r)
|
||||||
|
scratch := make([]byte, 8)
|
||||||
|
|
||||||
|
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajMap {
|
||||||
|
return fmt.Errorf("cbor input should be of type map")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > cbg.MaxLength {
|
||||||
|
return fmt.Errorf("WorkID: map struct too large (%d)", extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
var name string
|
||||||
|
n := extra
|
||||||
|
|
||||||
|
for i := uint64(0); i < n; i++ {
|
||||||
|
|
||||||
|
{
|
||||||
|
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
name = string(sval)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch name {
|
||||||
|
// t.Method (sealtasks.TaskType) (string)
|
||||||
|
case "Method":
|
||||||
|
|
||||||
|
{
|
||||||
|
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Method = sealtasks.TaskType(sval)
|
||||||
|
}
|
||||||
|
// t.Params (string) (string)
|
||||||
|
case "Params":
|
||||||
|
|
||||||
|
{
|
||||||
|
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Params = string(sval)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
7
extern/sector-storage/faults.go
vendored
7
extern/sector-storage/faults.go
vendored
@ -9,7 +9,8 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FaultTracker TODO: Track things more actively
|
// FaultTracker TODO: Track things more actively
|
||||||
@ -32,7 +33,7 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
|
|||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
locked, err := m.index.StorageTryLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTNone)
|
locked, err := m.index.StorageTryLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
@ -43,7 +44,7 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
lp, _, err := m.localStore.AcquireSector(ctx, sector, ssize, stores.FTSealed|stores.FTCache, stores.FTNone, stores.PathStorage, stores.AcquireMove)
|
lp, _, err := m.localStore.AcquireSector(ctx, sector, ssize, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
|
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
|
||||||
bad = append(bad, sector)
|
bad = append(bad, sector)
|
||||||
|
29
extern/sector-storage/ffiwrapper/basicfs/fs.go
vendored
29
extern/sector-storage/ffiwrapper/basicfs/fs.go
vendored
@ -8,13 +8,12 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
type sectorFile struct {
|
type sectorFile struct {
|
||||||
abi.SectorID
|
abi.SectorID
|
||||||
stores.SectorFileType
|
storiface.SectorFileType
|
||||||
}
|
}
|
||||||
|
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
@ -24,24 +23,24 @@ type Provider struct {
|
|||||||
waitSector map[sectorFile]chan struct{}
|
waitSector map[sectorFile]chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, ptype stores.PathType) (stores.SectorPaths, func(), error) {
|
func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||||
if err := os.Mkdir(filepath.Join(b.Root, stores.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
|
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
|
||||||
return stores.SectorPaths{}, nil, err
|
return storiface.SectorPaths{}, nil, err
|
||||||
}
|
}
|
||||||
if err := os.Mkdir(filepath.Join(b.Root, stores.FTSealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
|
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTSealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint
|
||||||
return stores.SectorPaths{}, nil, err
|
return storiface.SectorPaths{}, nil, err
|
||||||
}
|
}
|
||||||
if err := os.Mkdir(filepath.Join(b.Root, stores.FTCache.String()), 0755); err != nil && !os.IsExist(err) { // nolint
|
if err := os.Mkdir(filepath.Join(b.Root, storiface.FTCache.String()), 0755); err != nil && !os.IsExist(err) { // nolint
|
||||||
return stores.SectorPaths{}, nil, err
|
return storiface.SectorPaths{}, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
done := func() {}
|
done := func() {}
|
||||||
|
|
||||||
out := stores.SectorPaths{
|
out := storiface.SectorPaths{
|
||||||
ID: id,
|
ID: id,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, fileType := range stores.PathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if !existing.Has(fileType) && !allocate.Has(fileType) {
|
if !existing.Has(fileType) && !allocate.Has(fileType) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -61,10 +60,10 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing
|
|||||||
case ch <- struct{}{}:
|
case ch <- struct{}{}:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
done()
|
done()
|
||||||
return stores.SectorPaths{}, nil, ctx.Err()
|
return storiface.SectorPaths{}, nil, ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
path := filepath.Join(b.Root, fileType.String(), stores.SectorName(id))
|
path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id))
|
||||||
|
|
||||||
prevDone := done
|
prevDone := done
|
||||||
done = func() {
|
done = func() {
|
||||||
@ -75,11 +74,11 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing
|
|||||||
if !allocate.Has(fileType) {
|
if !allocate.Has(fileType) {
|
||||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||||
done()
|
done()
|
||||||
return stores.SectorPaths{}, nil, storiface.ErrSectorNotFound
|
return storiface.SectorPaths{}, nil, storiface.ErrSectorNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stores.SetPathByType(&out, fileType, path)
|
storiface.SetPathByType(&out, fileType, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
return out, done, nil
|
return out, done, nil
|
||||||
|
25
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
25
extern/sector-storage/ffiwrapper/sealer_cgo.go
vendored
@ -21,7 +21,6 @@ import (
|
|||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
|
"github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
|
||||||
)
|
)
|
||||||
@ -80,9 +79,9 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var stagedPath stores.SectorPaths
|
var stagedPath storiface.SectorPaths
|
||||||
if len(existingPieceSizes) == 0 {
|
if len(existingPieceSizes) == 0 {
|
||||||
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, stores.FTUnsealed, stores.PathSealing)
|
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, storiface.FTUnsealed, storiface.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
|
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
|
||||||
}
|
}
|
||||||
@ -92,7 +91,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
|||||||
return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err)
|
return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathSealing)
|
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, 0, storiface.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
|
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
|
||||||
}
|
}
|
||||||
@ -199,12 +198,12 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s
|
|||||||
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
|
||||||
|
|
||||||
// try finding existing
|
// try finding existing
|
||||||
unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage)
|
unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage)
|
||||||
var pf *partialFile
|
var pf *partialFile
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case xerrors.Is(err, storiface.ErrSectorNotFound):
|
case xerrors.Is(err, storiface.ErrSectorNotFound):
|
||||||
unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTNone, stores.FTUnsealed, stores.PathStorage)
|
unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTNone, storiface.FTUnsealed, storiface.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire unsealed sector path (allocate): %w", err)
|
return xerrors.Errorf("acquire unsealed sector path (allocate): %w", err)
|
||||||
}
|
}
|
||||||
@ -241,7 +240,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache|stores.FTSealed, stores.FTNone, stores.PathStorage)
|
srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire sealed sector paths: %w", err)
|
return xerrors.Errorf("acquire sealed sector paths: %w", err)
|
||||||
}
|
}
|
||||||
@ -358,7 +357,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||||
path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage)
|
path, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("acquire unsealed sector path: %w", err)
|
return false, xerrors.Errorf("acquire unsealed sector path: %w", err)
|
||||||
}
|
}
|
||||||
@ -410,7 +409,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, stores.PathSealing)
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("acquiring sector paths: %w", err)
|
return nil, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||||
}
|
}
|
||||||
@ -467,7 +466,7 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
|
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing)
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err)
|
return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||||
}
|
}
|
||||||
@ -485,7 +484,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing)
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("acquire sector paths: %w", err)
|
return nil, xerrors.Errorf("acquire sector paths: %w", err)
|
||||||
}
|
}
|
||||||
@ -535,7 +534,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathStorage)
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, 0, storiface.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquiring sector cache path: %w", err)
|
return xerrors.Errorf("acquiring sector cache path: %w", err)
|
||||||
}
|
}
|
||||||
@ -575,7 +574,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, stores.PathStorage)
|
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache, 0, storiface.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquiring sector cache path: %w", err)
|
return xerrors.Errorf("acquiring sector cache path: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -125,7 +125,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec
|
|||||||
t.Fatal("read wrong bytes")
|
t.Fatal("read wrong bytes")
|
||||||
}
|
}
|
||||||
|
|
||||||
p, sd, err := sp.AcquireSector(context.TODO(), si, stores.FTUnsealed, stores.FTNone, stores.PathStorage)
|
p, sd, err := sp.AcquireSector(context.TODO(), si, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -206,7 +206,7 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func corrupt(t *testing.T, sealer *Sealer, id abi.SectorID) {
|
func corrupt(t *testing.T, sealer *Sealer, id abi.SectorID) {
|
||||||
paths, done, err := sealer.sectors.AcquireSector(context.Background(), id, stores.FTSealed, 0, stores.PathStorage)
|
paths, done, err := sealer.sectors.AcquireSector(context.Background(), id, storiface.FTSealed, 0, storiface.PathStorage)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer done()
|
defer done()
|
||||||
|
|
||||||
|
7
extern/sector-storage/ffiwrapper/types.go
vendored
7
extern/sector-storage/ffiwrapper/types.go
vendored
@ -12,13 +12,12 @@ import (
|
|||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Validator interface {
|
type Validator interface {
|
||||||
CanCommit(sector stores.SectorPaths) (bool, error)
|
CanCommit(sector storiface.SectorPaths) (bool, error)
|
||||||
CanProve(sector stores.SectorPaths) (bool, error)
|
CanProve(sector storiface.SectorPaths) (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type StorageSealer interface {
|
type StorageSealer interface {
|
||||||
@ -45,7 +44,7 @@ type Verifier interface {
|
|||||||
type SectorProvider interface {
|
type SectorProvider interface {
|
||||||
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
|
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
|
||||||
// * returns an error when allocate is set, and existing isn't, and the sector exists
|
// * returns an error when allocate is set, and existing isn't, and the sector exists
|
||||||
AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, ptype stores.PathType) (stores.SectorPaths, func(), error)
|
AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ SectorProvider = &basicfs.Provider{}
|
var _ SectorProvider = &basicfs.Provider{}
|
||||||
|
13
extern/sector-storage/ffiwrapper/verifier_cgo.go
vendored
13
extern/sector-storage/ffiwrapper/verifier_cgo.go
vendored
@ -5,17 +5,14 @@ package ffiwrapper
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
"go.opencensus.io/trace"
|
||||||
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
|
|
||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) {
|
func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) {
|
||||||
@ -79,7 +76,7 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn
|
|||||||
|
|
||||||
sid := abi.SectorID{Miner: mid, Number: s.SectorNumber}
|
sid := abi.SectorID{Miner: mid, Number: s.SectorNumber}
|
||||||
|
|
||||||
paths, d, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, stores.PathStorage)
|
paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err)
|
log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err)
|
||||||
skipped = append(skipped, sid)
|
skipped = append(skipped, sid)
|
||||||
|
314
extern/sector-storage/localworker.go
vendored
314
extern/sector-storage/localworker.go
vendored
@ -1,314 +0,0 @@
|
|||||||
package sectorstorage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/elastic/go-sysinfo"
|
|
||||||
"github.com/hashicorp/go-multierror"
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
|
||||||
)
|
|
||||||
|
|
||||||
var pathTypes = []stores.SectorFileType{stores.FTUnsealed, stores.FTSealed, stores.FTCache}
|
|
||||||
|
|
||||||
type WorkerConfig struct {
|
|
||||||
SealProof abi.RegisteredSealProof
|
|
||||||
TaskTypes []sealtasks.TaskType
|
|
||||||
NoSwap bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type LocalWorker struct {
|
|
||||||
scfg *ffiwrapper.Config
|
|
||||||
storage stores.Store
|
|
||||||
localStore *stores.Local
|
|
||||||
sindex stores.SectorIndex
|
|
||||||
noSwap bool
|
|
||||||
|
|
||||||
acceptTasks map[sealtasks.TaskType]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex) *LocalWorker {
|
|
||||||
acceptTasks := map[sealtasks.TaskType]struct{}{}
|
|
||||||
for _, taskType := range wcfg.TaskTypes {
|
|
||||||
acceptTasks[taskType] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &LocalWorker{
|
|
||||||
scfg: &ffiwrapper.Config{
|
|
||||||
SealProofType: wcfg.SealProof,
|
|
||||||
},
|
|
||||||
storage: store,
|
|
||||||
localStore: local,
|
|
||||||
sindex: sindex,
|
|
||||||
noSwap: wcfg.NoSwap,
|
|
||||||
|
|
||||||
acceptTasks: acceptTasks,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type localWorkerPathProvider struct {
|
|
||||||
w *LocalWorker
|
|
||||||
op stores.AcquireMode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) {
|
|
||||||
|
|
||||||
ssize, err := l.w.scfg.SealProofType.SectorSize()
|
|
||||||
if err != nil {
|
|
||||||
return stores.SectorPaths{}, nil, err
|
|
||||||
}
|
|
||||||
paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, ssize, existing, allocate, sealing, l.op)
|
|
||||||
if err != nil {
|
|
||||||
return stores.SectorPaths{}, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
releaseStorage, err := l.w.localStore.Reserve(ctx, sector, ssize, allocate, storageIDs, stores.FSOverheadSeal)
|
|
||||||
if err != nil {
|
|
||||||
return stores.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths)
|
|
||||||
|
|
||||||
return paths, func() {
|
|
||||||
releaseStorage()
|
|
||||||
|
|
||||||
for _, fileType := range pathTypes {
|
|
||||||
if fileType&allocate == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
sid := stores.PathByType(storageIDs, fileType)
|
|
||||||
|
|
||||||
if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType, l.op == stores.AcquireMove); err != nil {
|
|
||||||
log.Errorf("declare sector error: %+v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) sb() (ffiwrapper.Storage, error) {
|
|
||||||
return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
|
|
||||||
sb, err := l.sb()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return sb.NewSector(ctx, sector)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
|
||||||
sb, err := l.sb()
|
|
||||||
if err != nil {
|
|
||||||
return abi.PieceInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return sb.AddPiece(ctx, sector, epcs, sz, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
|
|
||||||
_, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, stores.FTNone, ptype)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
done()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage2.PreCommit1Out, err error) {
|
|
||||||
{
|
|
||||||
// cleanup previous failed attempts if they exist
|
|
||||||
if err := l.storage.Remove(ctx, sector, stores.FTSealed, true); err != nil {
|
|
||||||
return nil, xerrors.Errorf("cleaning up sealed data: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.storage.Remove(ctx, sector, stores.FTCache, true); err != nil {
|
|
||||||
return nil, xerrors.Errorf("cleaning up cache data: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sb, err := l.sb()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return sb.SealPreCommit1(ctx, sector, ticket, pieces)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (cids storage2.SectorCids, err error) {
|
|
||||||
sb, err := l.sb()
|
|
||||||
if err != nil {
|
|
||||||
return storage2.SectorCids{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return sb.SealPreCommit2(ctx, sector, phase1Out)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (output storage2.Commit1Out, err error) {
|
|
||||||
sb, err := l.sb()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return sb.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (proof storage2.Proof, err error) {
|
|
||||||
sb, err := l.sb()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return sb.SealCommit2(ctx, sector, phase1Out)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage2.Range) error {
|
|
||||||
sb, err := l.sb()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sb.FinalizeSector(ctx, sector, keepUnsealed); err != nil {
|
|
||||||
return xerrors.Errorf("finalizing sector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(keepUnsealed) == 0 {
|
|
||||||
if err := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); err != nil {
|
|
||||||
return xerrors.Errorf("removing unsealed data: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage2.Range) error {
|
|
||||||
return xerrors.Errorf("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if rerr := l.storage.Remove(ctx, sector, stores.FTSealed, true); rerr != nil {
|
|
||||||
err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr))
|
|
||||||
}
|
|
||||||
if rerr := l.storage.Remove(ctx, sector, stores.FTCache, true); rerr != nil {
|
|
||||||
err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr))
|
|
||||||
}
|
|
||||||
if rerr := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); rerr != nil {
|
|
||||||
err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr))
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error {
|
|
||||||
ssize, err := l.scfg.SealProofType.SectorSize()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := l.storage.MoveStorage(ctx, sector, ssize, types); err != nil {
|
|
||||||
return xerrors.Errorf("moving sealed data to storage: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error {
|
|
||||||
sb, err := l.sb()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sb.UnsealPiece(ctx, sector, index, size, randomness, cid); err != nil {
|
|
||||||
return xerrors.Errorf("unsealing sector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.storage.RemoveCopies(ctx, sector, stores.FTSealed); err != nil {
|
|
||||||
return xerrors.Errorf("removing source data: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.storage.RemoveCopies(ctx, sector, stores.FTCache); err != nil {
|
|
||||||
return xerrors.Errorf("removing source data: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
|
||||||
sb, err := l.sb()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return sb.ReadPiece(ctx, writer, sector, index, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
|
||||||
return l.acceptTasks, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
|
|
||||||
return l.localStore.Local(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) {
|
|
||||||
hostname, err := os.Hostname() // TODO: allow overriding from config
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gpus, err := ffi.GetGPUDevices()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("getting gpu devices failed: %+v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
h, err := sysinfo.Host()
|
|
||||||
if err != nil {
|
|
||||||
return storiface.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
mem, err := h.Memory()
|
|
||||||
if err != nil {
|
|
||||||
return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
memSwap := mem.VirtualTotal
|
|
||||||
if l.noSwap {
|
|
||||||
memSwap = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return storiface.WorkerInfo{
|
|
||||||
Hostname: hostname,
|
|
||||||
Resources: storiface.WorkerResources{
|
|
||||||
MemPhysical: mem.Total,
|
|
||||||
MemSwap: memSwap,
|
|
||||||
MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process
|
|
||||||
CPUs: uint64(runtime.NumCPU()),
|
|
||||||
GPUs: gpus,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) Closing(ctx context.Context) (<-chan struct{}, error) {
|
|
||||||
return make(chan struct{}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalWorker) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ Worker = &LocalWorker{}
|
|
345
extern/sector-storage/manager.go
vendored
345
extern/sector-storage/manager.go
vendored
@ -5,7 +5,9 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
@ -13,6 +15,7 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-statestore"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
@ -29,13 +32,7 @@ var ErrNoWorkers = errors.New("no suitable workers found")
|
|||||||
type URLs []string
|
type URLs []string
|
||||||
|
|
||||||
type Worker interface {
|
type Worker interface {
|
||||||
ffiwrapper.StorageSealer
|
storiface.WorkerCalls
|
||||||
|
|
||||||
MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error
|
|
||||||
|
|
||||||
Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error
|
|
||||||
UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
|
||||||
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error)
|
|
||||||
|
|
||||||
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
|
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
|
||||||
|
|
||||||
@ -44,10 +41,9 @@ type Worker interface {
|
|||||||
|
|
||||||
Info(context.Context) (storiface.WorkerInfo, error)
|
Info(context.Context) (storiface.WorkerInfo, error)
|
||||||
|
|
||||||
// returns channel signalling worker shutdown
|
Session(context.Context) (uuid.UUID, error)
|
||||||
Closing(context.Context) (<-chan struct{}, error)
|
|
||||||
|
|
||||||
Close() error
|
Close() error // TODO: do we need this?
|
||||||
}
|
}
|
||||||
|
|
||||||
type SectorManager interface {
|
type SectorManager interface {
|
||||||
@ -57,10 +53,12 @@ type SectorManager interface {
|
|||||||
|
|
||||||
ffiwrapper.StorageSealer
|
ffiwrapper.StorageSealer
|
||||||
storage.Prover
|
storage.Prover
|
||||||
|
storiface.WorkerReturn
|
||||||
FaultTracker
|
FaultTracker
|
||||||
}
|
}
|
||||||
|
|
||||||
type WorkerID uint64
|
type WorkerID uuid.UUID // worker session UUID
|
||||||
|
var ClosedWorkerID = uuid.UUID{}
|
||||||
|
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
scfg *ffiwrapper.Config
|
scfg *ffiwrapper.Config
|
||||||
@ -74,6 +72,21 @@ type Manager struct {
|
|||||||
sched *scheduler
|
sched *scheduler
|
||||||
|
|
||||||
storage.Prover
|
storage.Prover
|
||||||
|
|
||||||
|
workLk sync.Mutex
|
||||||
|
work *statestore.StateStore
|
||||||
|
|
||||||
|
callToWork map[storiface.CallID]WorkID
|
||||||
|
// used when we get an early return and there's no callToWork mapping
|
||||||
|
callRes map[storiface.CallID]chan result
|
||||||
|
|
||||||
|
results map[WorkID]result
|
||||||
|
waitRes map[WorkID]chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type result struct {
|
||||||
|
r interface{}
|
||||||
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
type SealerConfig struct {
|
type SealerConfig struct {
|
||||||
@ -89,7 +102,10 @@ type SealerConfig struct {
|
|||||||
|
|
||||||
type StorageAuth http.Header
|
type StorageAuth http.Header
|
||||||
|
|
||||||
func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc SealerConfig, urls URLs, sa StorageAuth) (*Manager, error) {
|
type WorkerStateStore *statestore.StateStore
|
||||||
|
type ManagerStateStore *statestore.StateStore
|
||||||
|
|
||||||
|
func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc SealerConfig, urls URLs, sa StorageAuth, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) {
|
||||||
lstor, err := stores.NewLocal(ctx, ls, si, urls)
|
lstor, err := stores.NewLocal(ctx, ls, si, urls)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -114,8 +130,16 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg
|
|||||||
sched: newScheduler(cfg.SealProofType),
|
sched: newScheduler(cfg.SealProofType),
|
||||||
|
|
||||||
Prover: prover,
|
Prover: prover,
|
||||||
|
|
||||||
|
work: mss,
|
||||||
|
callToWork: map[storiface.CallID]WorkID{},
|
||||||
|
callRes: map[storiface.CallID]chan result{},
|
||||||
|
results: map[WorkID]result{},
|
||||||
|
waitRes: map[WorkID]chan struct{}{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m.setupWorkTracker()
|
||||||
|
|
||||||
go m.sched.runSched()
|
go m.sched.runSched()
|
||||||
|
|
||||||
localTasks := []sealtasks.TaskType{
|
localTasks := []sealtasks.TaskType{
|
||||||
@ -140,7 +164,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg
|
|||||||
err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{
|
err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{
|
||||||
SealProof: cfg.SealProofType,
|
SealProof: cfg.SealProofType,
|
||||||
TaskTypes: localTasks,
|
TaskTypes: localTasks,
|
||||||
}, stor, lstor, si))
|
}, stor, lstor, si, m, wss))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("adding local worker: %w", err)
|
return nil, xerrors.Errorf("adding local worker: %w", err)
|
||||||
}
|
}
|
||||||
@ -167,21 +191,7 @@ func (m *Manager) AddLocalStorage(ctx context.Context, path string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) AddWorker(ctx context.Context, w Worker) error {
|
func (m *Manager) AddWorker(ctx context.Context, w Worker) error {
|
||||||
info, err := w.Info(ctx)
|
return m.sched.runWorker(ctx, w)
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("getting worker info: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.sched.newWorkers <- &workerHandle{
|
|
||||||
w: w,
|
|
||||||
wt: &workTracker{
|
|
||||||
running: map[uint64]storiface.WorkerJob{},
|
|
||||||
},
|
|
||||||
info: info,
|
|
||||||
preparing: &activeResources{},
|
|
||||||
active: &activeResources{},
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -197,9 +207,21 @@ func schedNop(context.Context, Worker) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func schedFetch(sector abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) func(context.Context, Worker) error {
|
func (m *Manager) schedFetch(sector abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error {
|
||||||
return func(ctx context.Context, worker Worker) error {
|
return func(ctx context.Context, worker Worker) error {
|
||||||
return worker.Fetch(ctx, sector, ft, ptype, am)
|
_, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, ft, ptype, am))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) readPiece(sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error {
|
||||||
|
return func(ctx context.Context, w Worker) error {
|
||||||
|
r, err := m.waitSimpleCall(ctx)(w.ReadPiece(ctx, sink, sector, offset, size))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*rok = r.(bool)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,13 +231,13 @@ func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sect
|
|||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := m.index.StorageLock(ctx, sector, stores.FTUnsealed, stores.FTNone); err != nil {
|
if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTNone); err != nil {
|
||||||
returnErr = xerrors.Errorf("acquiring read sector lock: %w", err)
|
returnErr = xerrors.Errorf("acquiring read sector lock: %w", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// passing 0 spt because we only need it when allowFetch is true
|
// passing 0 spt because we only need it when allowFetch is true
|
||||||
best, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, 0, false)
|
best, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
|
returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
|
||||||
return
|
return
|
||||||
@ -225,17 +247,15 @@ func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sect
|
|||||||
if foundUnsealed { // append to existing
|
if foundUnsealed { // append to existing
|
||||||
// There is unsealed sector, see if we can read from it
|
// There is unsealed sector, see if we can read from it
|
||||||
|
|
||||||
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove),
|
||||||
readOk, err = w.ReadPiece(ctx, sink, sector, offset, size)
|
m.readPiece(sink, sector, offset, size, &readOk))
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
returnErr = xerrors.Errorf("reading piece from sealed sector: %w", err)
|
returnErr = xerrors.Errorf("reading piece from sealed sector: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
|
selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -251,17 +271,17 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
|||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := m.index.StorageLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTUnsealed); err != nil {
|
if err := m.index.StorageLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil {
|
||||||
return xerrors.Errorf("acquiring unseal sector lock: %w", err)
|
return xerrors.Errorf("acquiring unseal sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
unsealFetch := func(ctx context.Context, worker Worker) error {
|
unsealFetch := func(ctx context.Context, worker Worker) error {
|
||||||
if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, stores.PathSealing, stores.AcquireCopy); err != nil {
|
if _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy)); err != nil {
|
||||||
return xerrors.Errorf("copy sealed/cache sector data: %w", err)
|
return xerrors.Errorf("copy sealed/cache sector data: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if foundUnsealed {
|
if foundUnsealed {
|
||||||
if err := worker.Fetch(ctx, sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove); err != nil {
|
if _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove)); err != nil {
|
||||||
return xerrors.Errorf("copy unsealed sector data: %w", err)
|
return xerrors.Errorf("copy unsealed sector data: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -272,18 +292,18 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
|||||||
return xerrors.Errorf("cannot unseal piece (sector: %d, offset: %d size: %d) - unsealed cid is undefined", sector, offset, size)
|
return xerrors.Errorf("cannot unseal piece (sector: %d, offset: %d size: %d) - unsealed cid is undefined", sector, offset, size)
|
||||||
}
|
}
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error {
|
||||||
return w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed)
|
// TODO: make restartable
|
||||||
|
_, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed))
|
||||||
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove),
|
||||||
readOk, err = w.ReadPiece(ctx, sink, sector, offset, size)
|
m.readPiece(sink, sector, offset, size, &readOk))
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("reading piece from sealed sector: %w", err)
|
return xerrors.Errorf("reading piece from sealed sector: %w", err)
|
||||||
}
|
}
|
||||||
@ -304,25 +324,25 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie
|
|||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTUnsealed); err != nil {
|
if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTUnsealed); err != nil {
|
||||||
return abi.PieceInfo{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
return abi.PieceInfo{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var selector WorkerSelector
|
var selector WorkerSelector
|
||||||
var err error
|
var err error
|
||||||
if len(existingPieces) == 0 { // new
|
if len(existingPieces) == 0 { // new
|
||||||
selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
|
selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing)
|
||||||
} else { // use existing
|
} else { // use existing
|
||||||
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
var out abi.PieceInfo
|
var out abi.PieceInfo
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTAddPiece, selector, schedNop, func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTAddPiece, selector, schedNop, func(ctx context.Context, w Worker) error {
|
||||||
p, err := w.AddPiece(ctx, sector, existingPieces, sz, r)
|
p, err := m.waitSimpleCall(ctx)(w.AddPiece(ctx, sector, existingPieces, sz, r))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
out = p
|
out = p.(abi.PieceInfo)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -333,129 +353,234 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke
|
|||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := m.index.StorageLock(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache); err != nil {
|
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTPreCommit1, sector, ticket, pieces)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getWork: %w", err)
|
||||||
|
}
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var waitErr error
|
||||||
|
waitRes := func() {
|
||||||
|
p, werr := m.waitWork(ctx, wk)
|
||||||
|
if werr != nil {
|
||||||
|
waitErr = werr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
out = p.(storage.PreCommit1Out)
|
||||||
|
}
|
||||||
|
|
||||||
|
if wait { // already in progress
|
||||||
|
waitRes()
|
||||||
|
return out, waitErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache); err != nil {
|
||||||
return nil, xerrors.Errorf("acquiring sector lock: %w", err)
|
return nil, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: also consider where the unsealed data sits
|
// TODO: also consider where the unsealed data sits
|
||||||
|
|
||||||
selector := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathSealing)
|
selector := newAllocSelector(m.index, storiface.FTCache|storiface.FTSealed, storiface.PathSealing)
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
p, err := w.SealPreCommit1(ctx, sector, ticket, pieces)
|
err := m.startWork(ctx, wk)(w.SealPreCommit1(ctx, sector, ticket, pieces))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
out = p
|
|
||||||
|
waitRes()
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return out, err
|
return out, waitErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) {
|
func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := m.index.StorageLock(ctx, sector, stores.FTSealed, stores.FTCache); err != nil {
|
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTPreCommit2, sector, phase1Out)
|
||||||
|
if err != nil {
|
||||||
|
return storage.SectorCids{}, xerrors.Errorf("getWork: %w", err)
|
||||||
|
}
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var waitErr error
|
||||||
|
waitRes := func() {
|
||||||
|
p, werr := m.waitWork(ctx, wk)
|
||||||
|
if werr != nil {
|
||||||
|
waitErr = werr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
out = p.(storage.SectorCids)
|
||||||
|
}
|
||||||
|
|
||||||
|
if wait { // already in progress
|
||||||
|
waitRes()
|
||||||
|
return out, waitErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil {
|
||||||
return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, true)
|
selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, true)
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
p, err := w.SealPreCommit2(ctx, sector, phase1Out)
|
err := m.startWork(ctx, wk)(w.SealPreCommit2(ctx, sector, phase1Out))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
out = p
|
|
||||||
|
waitRes()
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
return out, err
|
if err != nil {
|
||||||
|
return storage.SectorCids{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, waitErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) {
|
func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := m.index.StorageLock(ctx, sector, stores.FTSealed, stores.FTCache); err != nil {
|
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTCommit1, sector, ticket, seed, pieces, cids)
|
||||||
|
if err != nil {
|
||||||
|
return storage.Commit1Out{}, xerrors.Errorf("getWork: %w", err)
|
||||||
|
}
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var waitErr error
|
||||||
|
waitRes := func() {
|
||||||
|
p, werr := m.waitWork(ctx, wk)
|
||||||
|
if werr != nil {
|
||||||
|
waitErr = werr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
out = p.(storage.Commit1Out)
|
||||||
|
}
|
||||||
|
|
||||||
|
if wait { // already in progress
|
||||||
|
waitRes()
|
||||||
|
return out, waitErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil {
|
||||||
return storage.Commit1Out{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
return storage.Commit1Out{}, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: We set allowFetch to false in so that we always execute on a worker
|
// NOTE: We set allowFetch to false in so that we always execute on a worker
|
||||||
// with direct access to the data. We want to do that because this step is
|
// with direct access to the data. We want to do that because this step is
|
||||||
// generally very cheap / fast, and transferring data is not worth the effort
|
// generally very cheap / fast, and transferring data is not worth the effort
|
||||||
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
|
selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false)
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error {
|
||||||
p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
err := m.startWork(ctx, wk)(w.SealCommit1(ctx, sector, ticket, seed, pieces, cids))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
out = p
|
|
||||||
|
waitRes()
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
return out, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, waitErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (out storage.Proof, err error) {
|
func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (out storage.Proof, err error) {
|
||||||
|
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTCommit2, sector, phase1Out)
|
||||||
|
if err != nil {
|
||||||
|
return storage.Proof{}, xerrors.Errorf("getWork: %w", err)
|
||||||
|
}
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var waitErr error
|
||||||
|
waitRes := func() {
|
||||||
|
p, werr := m.waitWork(ctx, wk)
|
||||||
|
if werr != nil {
|
||||||
|
waitErr = werr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
out = p.(storage.Proof)
|
||||||
|
}
|
||||||
|
|
||||||
|
if wait { // already in progress
|
||||||
|
waitRes()
|
||||||
|
return out, waitErr
|
||||||
|
}
|
||||||
|
|
||||||
selector := newTaskSelector()
|
selector := newTaskSelector()
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit2, selector, schedNop, func(ctx context.Context, w Worker) error {
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit2, selector, schedNop, func(ctx context.Context, w Worker) error {
|
||||||
p, err := w.SealCommit2(ctx, sector, phase1Out)
|
err := m.startWork(ctx, wk)(w.SealCommit2(ctx, sector, phase1Out))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
out = p
|
|
||||||
|
waitRes()
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
return out, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, waitErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTSealed|stores.FTUnsealed|stores.FTCache); err != nil {
|
if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil {
|
||||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
unsealed := stores.FTUnsealed
|
unsealed := storiface.FTUnsealed
|
||||||
{
|
{
|
||||||
unsealedStores, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, 0, false)
|
unsealedStores, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("finding unsealed sector: %w", err)
|
return xerrors.Errorf("finding unsealed sector: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(unsealedStores) == 0 { // Is some edge-cases unsealed sector may not exist already, that's fine
|
if len(unsealedStores) == 0 { // Is some edge-cases unsealed sector may not exist already, that's fine
|
||||||
unsealed = stores.FTNone
|
unsealed = storiface.FTNone
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
|
selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false)
|
||||||
|
|
||||||
err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
|
||||||
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathSealing, stores.AcquireMove),
|
m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|unsealed, storiface.PathSealing, storiface.AcquireMove),
|
||||||
func(ctx context.Context, w Worker) error {
|
func(ctx context.Context, w Worker) error {
|
||||||
return w.FinalizeSector(ctx, sector, keepUnsealed)
|
_, err := m.waitSimpleCall(ctx)(w.FinalizeSector(ctx, sector, keepUnsealed))
|
||||||
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fetchSel := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathStorage)
|
fetchSel := newAllocSelector(m.index, storiface.FTCache|storiface.FTSealed, storiface.PathStorage)
|
||||||
moveUnsealed := unsealed
|
moveUnsealed := unsealed
|
||||||
{
|
{
|
||||||
if len(keepUnsealed) == 0 {
|
if len(keepUnsealed) == 0 {
|
||||||
moveUnsealed = stores.FTNone
|
moveUnsealed = storiface.FTNone
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel,
|
err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel,
|
||||||
schedFetch(sector, stores.FTCache|stores.FTSealed|moveUnsealed, stores.PathStorage, stores.AcquireMove),
|
m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|moveUnsealed, storiface.PathStorage, storiface.AcquireMove),
|
||||||
func(ctx context.Context, w Worker) error {
|
func(ctx context.Context, w Worker) error {
|
||||||
return w.MoveStorage(ctx, sector, stores.FTCache|stores.FTSealed|moveUnsealed)
|
_, err := m.waitSimpleCall(ctx)(w.MoveStorage(ctx, sector, storiface.FTCache|storiface.FTSealed|moveUnsealed))
|
||||||
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("moving sector to storage: %w", err)
|
return xerrors.Errorf("moving sector to storage: %w", err)
|
||||||
@ -473,25 +598,69 @@ func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error {
|
|||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTSealed|stores.FTUnsealed|stores.FTCache); err != nil {
|
if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil {
|
||||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if rerr := m.storage.Remove(ctx, sector, stores.FTSealed, true); rerr != nil {
|
if rerr := m.storage.Remove(ctx, sector, storiface.FTSealed, true); rerr != nil {
|
||||||
err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr))
|
err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr))
|
||||||
}
|
}
|
||||||
if rerr := m.storage.Remove(ctx, sector, stores.FTCache, true); rerr != nil {
|
if rerr := m.storage.Remove(ctx, sector, storiface.FTCache, true); rerr != nil {
|
||||||
err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr))
|
err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr))
|
||||||
}
|
}
|
||||||
if rerr := m.storage.Remove(ctx, sector, stores.FTUnsealed, true); rerr != nil {
|
if rerr := m.storage.Remove(ctx, sector, storiface.FTUnsealed, true); rerr != nil {
|
||||||
err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr))
|
err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr))
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error {
|
||||||
|
return m.returnResult(callID, pi, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error {
|
||||||
|
return m.returnResult(callID, p1o, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error {
|
||||||
|
return m.returnResult(callID, sealed, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error {
|
||||||
|
return m.returnResult(callID, out, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error {
|
||||||
|
return m.returnResult(callID, proof, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
return m.returnResult(callID, nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
return m.returnResult(callID, nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
return m.returnResult(callID, nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
return m.returnResult(callID, nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error {
|
||||||
|
return m.returnResult(callID, ok, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
return m.returnResult(callID, nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
|
func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
|
||||||
l, err := m.localStore.Local(ctx)
|
l, err := m.localStore.Local(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
375
extern/sector-storage/manager_calltracker.go
vendored
Normal file
375
extern/sector-storage/manager_calltracker.go
vendored
Normal file
@ -0,0 +1,375 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WorkID struct {
|
||||||
|
Method sealtasks.TaskType
|
||||||
|
Params string // json [...params]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w WorkID) String() string {
|
||||||
|
return fmt.Sprintf("%s(%s)", w.Method, w.Params)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fmt.Stringer = &WorkID{}
|
||||||
|
|
||||||
|
type WorkStatus string
|
||||||
|
|
||||||
|
const (
|
||||||
|
wsStarted WorkStatus = "started" // task started, not scheduled/running on a worker yet
|
||||||
|
wsRunning WorkStatus = "running" // task running on a worker, waiting for worker return
|
||||||
|
wsDone WorkStatus = "done" // task returned from the worker, results available
|
||||||
|
)
|
||||||
|
|
||||||
|
type WorkState struct {
|
||||||
|
ID WorkID
|
||||||
|
|
||||||
|
Status WorkStatus
|
||||||
|
|
||||||
|
WorkerCall storiface.CallID // Set when entering wsRunning
|
||||||
|
WorkError string // Status = wsDone, set when failed to start work
|
||||||
|
}
|
||||||
|
|
||||||
|
func newWorkID(method sealtasks.TaskType, params ...interface{}) (WorkID, error) {
|
||||||
|
pb, err := json.Marshal(params)
|
||||||
|
if err != nil {
|
||||||
|
return WorkID{}, xerrors.Errorf("marshaling work params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pb) > 256 {
|
||||||
|
s := sha256.Sum256(pb)
|
||||||
|
pb = []byte(hex.EncodeToString(s[:]))
|
||||||
|
}
|
||||||
|
|
||||||
|
return WorkID{
|
||||||
|
Method: method,
|
||||||
|
Params: string(pb),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) setupWorkTracker() {
|
||||||
|
m.workLk.Lock()
|
||||||
|
defer m.workLk.Unlock()
|
||||||
|
|
||||||
|
var ids []WorkState
|
||||||
|
if err := m.work.List(&ids); err != nil {
|
||||||
|
log.Error("getting work IDs") // quite bad
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, st := range ids {
|
||||||
|
wid := st.ID
|
||||||
|
|
||||||
|
if os.Getenv("LOTUS_MINER_ABORT_UNFINISHED_WORK") == "1" {
|
||||||
|
st.Status = wsDone
|
||||||
|
}
|
||||||
|
|
||||||
|
switch st.Status {
|
||||||
|
case wsStarted:
|
||||||
|
log.Warnf("dropping non-running work %s", wid)
|
||||||
|
|
||||||
|
if err := m.work.Get(wid).End(); err != nil {
|
||||||
|
log.Errorf("cleannig up work state for %s", wid)
|
||||||
|
}
|
||||||
|
case wsDone:
|
||||||
|
// realistically this shouldn't ever happen as we return results
|
||||||
|
// immediately after getting them
|
||||||
|
log.Warnf("dropping done work, no result, wid %s", wid)
|
||||||
|
|
||||||
|
if err := m.work.Get(wid).End(); err != nil {
|
||||||
|
log.Errorf("cleannig up work state for %s", wid)
|
||||||
|
}
|
||||||
|
case wsRunning:
|
||||||
|
m.callToWork[st.WorkerCall] = wid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns wait=true when the task is already tracked/running
|
||||||
|
func (m *Manager) getWork(ctx context.Context, method sealtasks.TaskType, params ...interface{}) (wid WorkID, wait bool, cancel func(), err error) {
|
||||||
|
wid, err = newWorkID(method, params)
|
||||||
|
if err != nil {
|
||||||
|
return WorkID{}, false, nil, xerrors.Errorf("creating WorkID: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.workLk.Lock()
|
||||||
|
defer m.workLk.Unlock()
|
||||||
|
|
||||||
|
have, err := m.work.Has(wid)
|
||||||
|
if err != nil {
|
||||||
|
return WorkID{}, false, nil, xerrors.Errorf("failed to check if the task is already tracked: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !have {
|
||||||
|
err := m.work.Begin(wid, &WorkState{
|
||||||
|
ID: wid,
|
||||||
|
Status: wsStarted,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return WorkID{}, false, nil, xerrors.Errorf("failed to track task start: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return wid, false, func() {
|
||||||
|
m.workLk.Lock()
|
||||||
|
defer m.workLk.Unlock()
|
||||||
|
|
||||||
|
have, err := m.work.Has(wid)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("cancel: work has error: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !have {
|
||||||
|
return // expected / happy path
|
||||||
|
}
|
||||||
|
|
||||||
|
var ws WorkState
|
||||||
|
if err := m.work.Get(wid).Get(&ws); err != nil {
|
||||||
|
log.Errorf("cancel: get work %s: %+v", wid, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch ws.Status {
|
||||||
|
case wsStarted:
|
||||||
|
log.Warn("canceling started (not running) work %s", wid)
|
||||||
|
|
||||||
|
if err := m.work.Get(wid).End(); err != nil {
|
||||||
|
log.Errorf("cancel: failed to cancel started work %s: %+v", wid, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case wsDone:
|
||||||
|
// TODO: still remove?
|
||||||
|
log.Warn("cancel called on work %s in 'done' state", wid)
|
||||||
|
case wsRunning:
|
||||||
|
log.Warn("cancel called on work %s in 'running' state (manager shutting down?)", wid)
|
||||||
|
}
|
||||||
|
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// already started
|
||||||
|
|
||||||
|
return wid, true, func() {
|
||||||
|
// TODO
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) startWork(ctx context.Context, wk WorkID) func(callID storiface.CallID, err error) error {
|
||||||
|
return func(callID storiface.CallID, err error) error {
|
||||||
|
m.workLk.Lock()
|
||||||
|
defer m.workLk.Unlock()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
merr := m.work.Get(wk).Mutate(func(ws *WorkState) error {
|
||||||
|
ws.Status = wsDone
|
||||||
|
ws.WorkError = err.Error()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if merr != nil {
|
||||||
|
return xerrors.Errorf("failed to start work and to track the error; merr: %+v, err: %w", merr, err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = m.work.Get(wk).Mutate(func(ws *WorkState) error {
|
||||||
|
_, ok := m.results[wk]
|
||||||
|
if ok {
|
||||||
|
log.Warn("work returned before we started tracking it")
|
||||||
|
ws.Status = wsDone
|
||||||
|
} else {
|
||||||
|
ws.Status = wsRunning
|
||||||
|
}
|
||||||
|
ws.WorkerCall = callID
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("registering running work: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.callToWork[callID] = wk
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) waitWork(ctx context.Context, wid WorkID) (interface{}, error) {
|
||||||
|
m.workLk.Lock()
|
||||||
|
|
||||||
|
var ws WorkState
|
||||||
|
if err := m.work.Get(wid).Get(&ws); err != nil {
|
||||||
|
m.workLk.Unlock()
|
||||||
|
return nil, xerrors.Errorf("getting work status: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ws.Status == wsStarted {
|
||||||
|
m.workLk.Unlock()
|
||||||
|
return nil, xerrors.Errorf("waitWork called for work in 'started' state")
|
||||||
|
}
|
||||||
|
|
||||||
|
// sanity check
|
||||||
|
wk := m.callToWork[ws.WorkerCall]
|
||||||
|
if wk != wid {
|
||||||
|
m.workLk.Unlock()
|
||||||
|
return nil, xerrors.Errorf("wrong callToWork mapping for call %s; expected %s, got %s", ws.WorkerCall, wid, wk)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure we don't have the result ready
|
||||||
|
cr, ok := m.callRes[ws.WorkerCall]
|
||||||
|
if ok {
|
||||||
|
delete(m.callToWork, ws.WorkerCall)
|
||||||
|
|
||||||
|
if len(cr) == 1 {
|
||||||
|
err := m.work.Get(wk).End()
|
||||||
|
if err != nil {
|
||||||
|
m.workLk.Unlock()
|
||||||
|
// Not great, but not worth discarding potentially multi-hour computation over this
|
||||||
|
log.Errorf("marking work as done: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := <-cr
|
||||||
|
delete(m.callRes, ws.WorkerCall)
|
||||||
|
|
||||||
|
m.workLk.Unlock()
|
||||||
|
return res.r, res.err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.workLk.Unlock()
|
||||||
|
return nil, xerrors.Errorf("something else in waiting on callRes")
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, ok := m.waitRes[wid]
|
||||||
|
if !ok {
|
||||||
|
ch = make(chan struct{})
|
||||||
|
m.waitRes[wid] = ch
|
||||||
|
}
|
||||||
|
m.workLk.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ch:
|
||||||
|
m.workLk.Lock()
|
||||||
|
defer m.workLk.Unlock()
|
||||||
|
|
||||||
|
res := m.results[wid]
|
||||||
|
delete(m.results, wid)
|
||||||
|
|
||||||
|
_, ok := m.callToWork[ws.WorkerCall]
|
||||||
|
if ok {
|
||||||
|
delete(m.callToWork, ws.WorkerCall)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := m.work.Get(wk).End()
|
||||||
|
if err != nil {
|
||||||
|
// Not great, but not worth discarding potentially multi-hour computation over this
|
||||||
|
log.Errorf("marking work as done: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.r, res.err
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, xerrors.Errorf("waiting for work result: %w", ctx.Err())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) waitSimpleCall(ctx context.Context) func(callID storiface.CallID, err error) (interface{}, error) {
|
||||||
|
return func(callID storiface.CallID, err error) (interface{}, error) {
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.waitCall(ctx, callID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) waitCall(ctx context.Context, callID storiface.CallID) (interface{}, error) {
|
||||||
|
m.workLk.Lock()
|
||||||
|
_, ok := m.callToWork[callID]
|
||||||
|
if ok {
|
||||||
|
m.workLk.Unlock()
|
||||||
|
return nil, xerrors.Errorf("can't wait for calls related to work")
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, ok := m.callRes[callID]
|
||||||
|
if !ok {
|
||||||
|
ch = make(chan result, 1)
|
||||||
|
m.callRes[callID] = ch
|
||||||
|
}
|
||||||
|
m.workLk.Unlock()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
m.workLk.Lock()
|
||||||
|
defer m.workLk.Unlock()
|
||||||
|
|
||||||
|
delete(m.callRes, callID)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case res := <-ch:
|
||||||
|
return res.r, res.err
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, xerrors.Errorf("waiting for call result: %w", ctx.Err())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) returnResult(callID storiface.CallID, r interface{}, serr string) error {
|
||||||
|
var err error
|
||||||
|
if serr != "" {
|
||||||
|
err = errors.New(serr)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := result{
|
||||||
|
r: r,
|
||||||
|
err: err,
|
||||||
|
}
|
||||||
|
|
||||||
|
m.sched.workTracker.onDone(callID)
|
||||||
|
|
||||||
|
m.workLk.Lock()
|
||||||
|
defer m.workLk.Unlock()
|
||||||
|
|
||||||
|
wid, ok := m.callToWork[callID]
|
||||||
|
if !ok {
|
||||||
|
rch, ok := m.callRes[callID]
|
||||||
|
if !ok {
|
||||||
|
rch = make(chan result, 1)
|
||||||
|
m.callRes[callID] = rch
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rch) > 0 {
|
||||||
|
return xerrors.Errorf("callRes channel already has a response")
|
||||||
|
}
|
||||||
|
if cap(rch) == 0 {
|
||||||
|
return xerrors.Errorf("expected rch to be buffered")
|
||||||
|
}
|
||||||
|
|
||||||
|
rch <- res
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = m.results[wid]
|
||||||
|
if ok {
|
||||||
|
return xerrors.Errorf("result for call %v already reported", wid)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.results[wid] = res
|
||||||
|
|
||||||
|
_, found := m.waitRes[wid]
|
||||||
|
if found {
|
||||||
|
close(m.waitRes[wid])
|
||||||
|
delete(m.waitRes, wid)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
224
extern/sector-storage/manager_test.go
vendored
224
extern/sector-storage/manager_test.go
vendored
@ -9,18 +9,23 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/ipfs/go-datastore"
|
||||||
|
logging "github.com/ipfs/go-log"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-statestore"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
logging "github.com/ipfs/go-log"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -80,9 +85,8 @@ func (t *testStorage) Stat(path string) (fsutil.FsStat, error) {
|
|||||||
|
|
||||||
var _ stores.LocalStorage = &testStorage{}
|
var _ stores.LocalStorage = &testStorage{}
|
||||||
|
|
||||||
func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *stores.Remote, *stores.Index) {
|
func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Manager, *stores.Local, *stores.Remote, *stores.Index, func()) {
|
||||||
st := newTestStorage(t)
|
st := newTestStorage(t)
|
||||||
defer st.cleanup()
|
|
||||||
|
|
||||||
si := stores.NewIndex()
|
si := stores.NewIndex()
|
||||||
cfg := &ffiwrapper.Config{
|
cfg := &ffiwrapper.Config{
|
||||||
@ -109,18 +113,27 @@ func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *st
|
|||||||
sched: newScheduler(cfg.SealProofType),
|
sched: newScheduler(cfg.SealProofType),
|
||||||
|
|
||||||
Prover: prover,
|
Prover: prover,
|
||||||
|
|
||||||
|
work: statestore.New(ds),
|
||||||
|
callToWork: map[storiface.CallID]WorkID{},
|
||||||
|
callRes: map[storiface.CallID]chan result{},
|
||||||
|
results: map[WorkID]result{},
|
||||||
|
waitRes: map[WorkID]chan struct{}{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m.setupWorkTracker()
|
||||||
|
|
||||||
go m.sched.runSched()
|
go m.sched.runSched()
|
||||||
|
|
||||||
return m, lstor, stor, si
|
return m, lstor, stor, si, st.cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSimple(t *testing.T) {
|
func TestSimple(t *testing.T) {
|
||||||
logging.SetAllLoggers(logging.LevelDebug)
|
logging.SetAllLoggers(logging.LevelDebug)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
m, lstor, _, _ := newTestMgr(ctx, t)
|
m, lstor, _, _, cleanup := newTestMgr(ctx, t, datastore.NewMapDatastore())
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
localTasks := []sealtasks.TaskType{
|
localTasks := []sealtasks.TaskType{
|
||||||
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch,
|
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch,
|
||||||
@ -129,7 +142,44 @@ func TestSimple(t *testing.T) {
|
|||||||
err := m.AddWorker(ctx, newTestWorker(WorkerConfig{
|
err := m.AddWorker(ctx, newTestWorker(WorkerConfig{
|
||||||
SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||||
TaskTypes: localTasks,
|
TaskTypes: localTasks,
|
||||||
}, lstor))
|
}, lstor, m))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||||
|
|
||||||
|
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, abi.PaddedPieceSize(1024), pi.Size)
|
||||||
|
|
||||||
|
piz, err := m.AddPiece(ctx, sid, nil, 1016, bytes.NewReader(make([]byte, 1016)[:]))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, abi.PaddedPieceSize(1024), piz.Size)
|
||||||
|
|
||||||
|
pieces := []abi.PieceInfo{pi, piz}
|
||||||
|
|
||||||
|
ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9}
|
||||||
|
|
||||||
|
_, err = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRedoPC1(t *testing.T) {
|
||||||
|
logging.SetAllLoggers(logging.LevelDebug)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
m, lstor, _, _, cleanup := newTestMgr(ctx, t, datastore.NewMapDatastore())
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
localTasks := []sealtasks.TaskType{
|
||||||
|
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch,
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := newTestWorker(WorkerConfig{
|
||||||
|
SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||||
|
TaskTypes: localTasks,
|
||||||
|
}, lstor, m)
|
||||||
|
|
||||||
|
err := m.AddWorker(ctx, tw)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sid := abi.SectorID{Miner: 1000, Number: 1}
|
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||||
@ -149,4 +199,156 @@ func TestSimple(t *testing.T) {
|
|||||||
_, err = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
_, err = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// tell mock ffi that we expect PC1 again
|
||||||
|
require.NoError(t, tw.mockSeal.ForceState(sid, 0)) // sectorPacking
|
||||||
|
|
||||||
|
_, err = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 2, tw.pc1s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manager restarts in the middle of a task, restarts it, it completes
|
||||||
|
func TestRestartManager(t *testing.T) {
|
||||||
|
logging.SetAllLoggers(logging.LevelDebug)
|
||||||
|
|
||||||
|
ctx, done := context.WithCancel(context.Background())
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
|
m, lstor, _, _, cleanup := newTestMgr(ctx, t, ds)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
localTasks := []sealtasks.TaskType{
|
||||||
|
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch,
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := newTestWorker(WorkerConfig{
|
||||||
|
SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||||
|
TaskTypes: localTasks,
|
||||||
|
}, lstor, m)
|
||||||
|
|
||||||
|
err := m.AddWorker(ctx, tw)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||||
|
|
||||||
|
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, abi.PaddedPieceSize(1024), pi.Size)
|
||||||
|
|
||||||
|
piz, err := m.AddPiece(ctx, sid, nil, 1016, bytes.NewReader(make([]byte, 1016)[:]))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, abi.PaddedPieceSize(1024), piz.Size)
|
||||||
|
|
||||||
|
pieces := []abi.PieceInfo{pi, piz}
|
||||||
|
|
||||||
|
ticket := abi.SealRandomness{0, 9, 9, 9, 9, 9, 9, 9}
|
||||||
|
|
||||||
|
tw.pc1lk.Lock()
|
||||||
|
tw.pc1wait = &sync.WaitGroup{}
|
||||||
|
tw.pc1wait.Add(1)
|
||||||
|
|
||||||
|
var cwg sync.WaitGroup
|
||||||
|
cwg.Add(1)
|
||||||
|
|
||||||
|
var perr error
|
||||||
|
go func() {
|
||||||
|
defer cwg.Done()
|
||||||
|
_, perr = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
||||||
|
}()
|
||||||
|
|
||||||
|
tw.pc1wait.Wait()
|
||||||
|
|
||||||
|
require.NoError(t, m.Close(ctx))
|
||||||
|
tw.ret = nil
|
||||||
|
|
||||||
|
cwg.Wait()
|
||||||
|
require.Error(t, perr)
|
||||||
|
|
||||||
|
m, _, _, _, cleanup2 := newTestMgr(ctx, t, ds)
|
||||||
|
defer cleanup2()
|
||||||
|
|
||||||
|
tw.ret = m // simulate jsonrpc auto-reconnect
|
||||||
|
err = m.AddWorker(ctx, tw)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tw.pc1lk.Unlock()
|
||||||
|
|
||||||
|
_, err = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, tw.pc1s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Worker restarts in the middle of a task, task fails after restart
|
||||||
|
func TestRestartWorker(t *testing.T) {
|
||||||
|
logging.SetAllLoggers(logging.LevelDebug)
|
||||||
|
|
||||||
|
ctx, done := context.WithCancel(context.Background())
|
||||||
|
defer done()
|
||||||
|
|
||||||
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
|
m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
localTasks := []sealtasks.TaskType{
|
||||||
|
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch,
|
||||||
|
}
|
||||||
|
|
||||||
|
wds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
|
arch := make(chan chan apres)
|
||||||
|
w := newLocalWorker(func() (ffiwrapper.Storage, error) {
|
||||||
|
return &testExec{apch: arch}, nil
|
||||||
|
}, WorkerConfig{
|
||||||
|
SealProof: 0,
|
||||||
|
TaskTypes: localTasks,
|
||||||
|
}, stor, lstor, idx, m, statestore.New(wds))
|
||||||
|
|
||||||
|
err := m.AddWorker(ctx, w)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sid := abi.SectorID{Miner: 1000, Number: 1}
|
||||||
|
|
||||||
|
apDone := make(chan struct{})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(apDone)
|
||||||
|
|
||||||
|
_, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
||||||
|
require.Error(t, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// kill the worker
|
||||||
|
<-arch
|
||||||
|
require.NoError(t, w.Close())
|
||||||
|
|
||||||
|
for {
|
||||||
|
if len(m.WorkerStats()) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// restart the worker
|
||||||
|
w = newLocalWorker(func() (ffiwrapper.Storage, error) {
|
||||||
|
return &testExec{apch: arch}, nil
|
||||||
|
}, WorkerConfig{
|
||||||
|
SealProof: 0,
|
||||||
|
TaskTypes: localTasks,
|
||||||
|
}, stor, lstor, idx, m, statestore.New(wds))
|
||||||
|
|
||||||
|
err = m.AddWorker(ctx, w)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
<-apDone
|
||||||
|
|
||||||
|
time.Sleep(12 * time.Millisecond)
|
||||||
|
uf, err := w.ct.unfinished()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Empty(t, uf)
|
||||||
}
|
}
|
||||||
|
57
extern/sector-storage/mock/mock.go
vendored
57
extern/sector-storage/mock/mock.go
vendored
@ -127,6 +127,19 @@ func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) {
|
|||||||
return id, nil
|
return id, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ForceState(sid abi.SectorID, st int) error {
|
||||||
|
mgr.lk.Lock()
|
||||||
|
ss, ok := mgr.sectors[sid]
|
||||||
|
mgr.lk.Unlock()
|
||||||
|
if !ok {
|
||||||
|
return xerrors.Errorf("no sector with id %d in storage", sid)
|
||||||
|
}
|
||||||
|
|
||||||
|
ss.state = st
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||||
mgr.lk.Lock()
|
mgr.lk.Lock()
|
||||||
ss, ok := mgr.sectors[sid]
|
ss, ok := mgr.sectors[sid]
|
||||||
@ -406,6 +419,50 @@ func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStPr
|
|||||||
return bad, nil
|
return bad, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error {
|
||||||
|
panic("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) {
|
func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) {
|
||||||
if len(svi.Proof) != 1920 {
|
if len(svi.Proof) != 1920 {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
17
extern/sector-storage/roprov.go
vendored
17
extern/sector-storage/roprov.go
vendored
@ -8,6 +8,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
type readonlyProvider struct {
|
type readonlyProvider struct {
|
||||||
@ -16,30 +17,30 @@ type readonlyProvider struct {
|
|||||||
spt abi.RegisteredSealProof
|
spt abi.RegisteredSealProof
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) {
|
func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||||
if allocate != stores.FTNone {
|
if allocate != storiface.FTNone {
|
||||||
return stores.SectorPaths{}, nil, xerrors.New("read-only storage")
|
return storiface.SectorPaths{}, nil, xerrors.New("read-only storage")
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize, err := l.spt.SectorSize()
|
ssize, err := l.spt.SectorSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return stores.SectorPaths{}, nil, xerrors.Errorf("failed to determine sector size: %w", err)
|
return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to determine sector size: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
// use TryLock to avoid blocking
|
// use TryLock to avoid blocking
|
||||||
locked, err := l.index.StorageTryLock(ctx, id, existing, stores.FTNone)
|
locked, err := l.index.StorageTryLock(ctx, id, existing, storiface.FTNone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cancel()
|
cancel()
|
||||||
return stores.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err)
|
return storiface.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err)
|
||||||
}
|
}
|
||||||
if !locked {
|
if !locked {
|
||||||
cancel()
|
cancel()
|
||||||
return stores.SectorPaths{}, nil, xerrors.Errorf("failed to acquire sector lock")
|
return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to acquire sector lock")
|
||||||
}
|
}
|
||||||
|
|
||||||
p, _, err := l.stor.AcquireSector(ctx, id, ssize, existing, allocate, sealing, stores.AcquireMove)
|
p, _, err := l.stor.AcquireSector(ctx, id, ssize, existing, allocate, sealing, storiface.AcquireMove)
|
||||||
|
|
||||||
return p, cancel, err
|
return p, cancel, err
|
||||||
}
|
}
|
||||||
|
409
extern/sector-storage/sched.go
vendored
409
extern/sector-storage/sched.go
vendored
@ -2,7 +2,6 @@ package sectorstorage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
@ -53,22 +52,20 @@ type WorkerSelector interface {
|
|||||||
type scheduler struct {
|
type scheduler struct {
|
||||||
spt abi.RegisteredSealProof
|
spt abi.RegisteredSealProof
|
||||||
|
|
||||||
workersLk sync.RWMutex
|
workersLk sync.RWMutex
|
||||||
nextWorker WorkerID
|
workers map[WorkerID]*workerHandle
|
||||||
workers map[WorkerID]*workerHandle
|
|
||||||
|
|
||||||
newWorkers chan *workerHandle
|
|
||||||
|
|
||||||
watchClosing chan WorkerID
|
|
||||||
workerClosing chan WorkerID
|
|
||||||
|
|
||||||
schedule chan *workerRequest
|
schedule chan *workerRequest
|
||||||
windowRequests chan *schedWindowRequest
|
windowRequests chan *schedWindowRequest
|
||||||
|
workerChange chan struct{} // worker added / changed/freed resources
|
||||||
|
workerDisable chan workerDisableReq
|
||||||
|
|
||||||
// owned by the sh.runSched goroutine
|
// owned by the sh.runSched goroutine
|
||||||
schedQueue *requestQueue
|
schedQueue *requestQueue
|
||||||
openWindows []*schedWindowRequest
|
openWindows []*schedWindowRequest
|
||||||
|
|
||||||
|
workTracker *workTracker
|
||||||
|
|
||||||
info chan func(interface{})
|
info chan func(interface{})
|
||||||
|
|
||||||
closing chan struct{}
|
closing chan struct{}
|
||||||
@ -77,7 +74,7 @@ type scheduler struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type workerHandle struct {
|
type workerHandle struct {
|
||||||
w Worker
|
workerRpc Worker
|
||||||
|
|
||||||
info storiface.WorkerInfo
|
info storiface.WorkerInfo
|
||||||
|
|
||||||
@ -89,8 +86,7 @@ type workerHandle struct {
|
|||||||
wndLk sync.Mutex
|
wndLk sync.Mutex
|
||||||
activeWindows []*schedWindow
|
activeWindows []*schedWindow
|
||||||
|
|
||||||
// stats / tracking
|
enabled bool
|
||||||
wt *workTracker
|
|
||||||
|
|
||||||
// for sync manager goroutine closing
|
// for sync manager goroutine closing
|
||||||
cleanupStarted bool
|
cleanupStarted bool
|
||||||
@ -109,6 +105,12 @@ type schedWindow struct {
|
|||||||
todo []*workerRequest
|
todo []*workerRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type workerDisableReq struct {
|
||||||
|
activeWindows []*schedWindow
|
||||||
|
wid WorkerID
|
||||||
|
done func()
|
||||||
|
}
|
||||||
|
|
||||||
type activeResources struct {
|
type activeResources struct {
|
||||||
memUsedMin uint64
|
memUsedMin uint64
|
||||||
memUsedMax uint64
|
memUsedMax uint64
|
||||||
@ -144,19 +146,20 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler {
|
|||||||
return &scheduler{
|
return &scheduler{
|
||||||
spt: spt,
|
spt: spt,
|
||||||
|
|
||||||
nextWorker: 0,
|
workers: map[WorkerID]*workerHandle{},
|
||||||
workers: map[WorkerID]*workerHandle{},
|
|
||||||
|
|
||||||
newWorkers: make(chan *workerHandle),
|
|
||||||
|
|
||||||
watchClosing: make(chan WorkerID),
|
|
||||||
workerClosing: make(chan WorkerID),
|
|
||||||
|
|
||||||
schedule: make(chan *workerRequest),
|
schedule: make(chan *workerRequest),
|
||||||
windowRequests: make(chan *schedWindowRequest, 20),
|
windowRequests: make(chan *schedWindowRequest, 20),
|
||||||
|
workerChange: make(chan struct{}, 20),
|
||||||
|
workerDisable: make(chan workerDisableReq),
|
||||||
|
|
||||||
schedQueue: &requestQueue{},
|
schedQueue: &requestQueue{},
|
||||||
|
|
||||||
|
workTracker: &workTracker{
|
||||||
|
done: map[storiface.CallID]struct{}{},
|
||||||
|
running: map[storiface.CallID]trackedWork{},
|
||||||
|
},
|
||||||
|
|
||||||
info: make(chan func(interface{})),
|
info: make(chan func(interface{})),
|
||||||
|
|
||||||
closing: make(chan struct{}),
|
closing: make(chan struct{}),
|
||||||
@ -220,21 +223,19 @@ type SchedDiagInfo struct {
|
|||||||
func (sh *scheduler) runSched() {
|
func (sh *scheduler) runSched() {
|
||||||
defer close(sh.closed)
|
defer close(sh.closed)
|
||||||
|
|
||||||
go sh.runWorkerWatcher()
|
|
||||||
|
|
||||||
iw := time.After(InitWait)
|
iw := time.After(InitWait)
|
||||||
var initialised bool
|
var initialised bool
|
||||||
|
|
||||||
for {
|
for {
|
||||||
var doSched bool
|
var doSched bool
|
||||||
|
var toDisable []workerDisableReq
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case w := <-sh.newWorkers:
|
case <-sh.workerChange:
|
||||||
sh.newWorker(w)
|
doSched = true
|
||||||
|
case dreq := <-sh.workerDisable:
|
||||||
case wid := <-sh.workerClosing:
|
toDisable = append(toDisable, dreq)
|
||||||
sh.dropWorker(wid)
|
doSched = true
|
||||||
|
|
||||||
case req := <-sh.schedule:
|
case req := <-sh.schedule:
|
||||||
sh.schedQueue.Push(req)
|
sh.schedQueue.Push(req)
|
||||||
doSched = true
|
doSched = true
|
||||||
@ -263,6 +264,9 @@ func (sh *scheduler) runSched() {
|
|||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
case <-sh.workerChange:
|
||||||
|
case dreq := <-sh.workerDisable:
|
||||||
|
toDisable = append(toDisable, dreq)
|
||||||
case req := <-sh.schedule:
|
case req := <-sh.schedule:
|
||||||
sh.schedQueue.Push(req)
|
sh.schedQueue.Push(req)
|
||||||
if sh.testSync != nil {
|
if sh.testSync != nil {
|
||||||
@ -275,6 +279,28 @@ func (sh *scheduler) runSched() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, req := range toDisable {
|
||||||
|
for _, window := range req.activeWindows {
|
||||||
|
for _, request := range window.todo {
|
||||||
|
sh.schedQueue.Push(request)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
openWindows := make([]*schedWindowRequest, 0, len(sh.openWindows))
|
||||||
|
for _, window := range sh.openWindows {
|
||||||
|
if window.worker != req.wid {
|
||||||
|
openWindows = append(openWindows, window)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sh.openWindows = openWindows
|
||||||
|
|
||||||
|
sh.workersLk.Lock()
|
||||||
|
sh.workers[req.wid].enabled = false
|
||||||
|
sh.workersLk.Unlock()
|
||||||
|
|
||||||
|
req.done()
|
||||||
|
}
|
||||||
|
|
||||||
sh.trySched()
|
sh.trySched()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -294,6 +320,9 @@ func (sh *scheduler) diag() SchedDiagInfo {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sh.workersLk.RLock()
|
||||||
|
defer sh.workersLk.RUnlock()
|
||||||
|
|
||||||
for _, window := range sh.openWindows {
|
for _, window := range sh.openWindows {
|
||||||
out.OpenWindows = append(out.OpenWindows, window.worker)
|
out.OpenWindows = append(out.OpenWindows, window.worker)
|
||||||
}
|
}
|
||||||
@ -318,13 +347,14 @@ func (sh *scheduler) trySched() {
|
|||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
sh.workersLk.RLock()
|
||||||
|
defer sh.workersLk.RUnlock()
|
||||||
|
|
||||||
windows := make([]schedWindow, len(sh.openWindows))
|
windows := make([]schedWindow, len(sh.openWindows))
|
||||||
acceptableWindows := make([][]int, sh.schedQueue.Len())
|
acceptableWindows := make([][]int, sh.schedQueue.Len())
|
||||||
|
|
||||||
log.Debugf("SCHED %d queued; %d open windows", sh.schedQueue.Len(), len(windows))
|
log.Debugf("SCHED %d queued; %d open windows", sh.schedQueue.Len(), len(windows))
|
||||||
|
|
||||||
sh.workersLk.RLock()
|
|
||||||
defer sh.workersLk.RUnlock()
|
|
||||||
if len(sh.openWindows) == 0 {
|
if len(sh.openWindows) == 0 {
|
||||||
// nothing to schedule on
|
// nothing to schedule on
|
||||||
return
|
return
|
||||||
@ -353,11 +383,16 @@ func (sh *scheduler) trySched() {
|
|||||||
for wnd, windowRequest := range sh.openWindows {
|
for wnd, windowRequest := range sh.openWindows {
|
||||||
worker, ok := sh.workers[windowRequest.worker]
|
worker, ok := sh.workers[windowRequest.worker]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Errorf("worker referenced by windowRequest not found (worker: %d)", windowRequest.worker)
|
log.Errorf("worker referenced by windowRequest not found (worker: %s)", windowRequest.worker)
|
||||||
// TODO: How to move forward here?
|
// TODO: How to move forward here?
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !worker.enabled {
|
||||||
|
log.Debugw("skipping disabled worker", "worker", windowRequest.worker)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: allow bigger windows
|
// TODO: allow bigger windows
|
||||||
if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info.Resources) {
|
if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info.Resources) {
|
||||||
continue
|
continue
|
||||||
@ -495,320 +530,6 @@ func (sh *scheduler) trySched() {
|
|||||||
sh.openWindows = newOpenWindows
|
sh.openWindows = newOpenWindows
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sh *scheduler) runWorker(wid WorkerID) {
|
|
||||||
var ready sync.WaitGroup
|
|
||||||
ready.Add(1)
|
|
||||||
defer ready.Wait()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
sh.workersLk.RLock()
|
|
||||||
worker, found := sh.workers[wid]
|
|
||||||
sh.workersLk.RUnlock()
|
|
||||||
|
|
||||||
ready.Done()
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
panic(fmt.Sprintf("worker %d not found", wid))
|
|
||||||
}
|
|
||||||
|
|
||||||
defer close(worker.closedMgr)
|
|
||||||
|
|
||||||
scheduledWindows := make(chan *schedWindow, SchedWindows)
|
|
||||||
taskDone := make(chan struct{}, 1)
|
|
||||||
windowsRequested := 0
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
workerClosing, err := worker.w.Closing(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
log.Warnw("Worker closing", "workerid", wid)
|
|
||||||
|
|
||||||
// TODO: close / return all queued tasks
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
// ask for more windows if we need them
|
|
||||||
for ; windowsRequested < SchedWindows; windowsRequested++ {
|
|
||||||
select {
|
|
||||||
case sh.windowRequests <- &schedWindowRequest{
|
|
||||||
worker: wid,
|
|
||||||
done: scheduledWindows,
|
|
||||||
}:
|
|
||||||
case <-sh.closing:
|
|
||||||
return
|
|
||||||
case <-workerClosing:
|
|
||||||
return
|
|
||||||
case <-worker.closingMgr:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case w := <-scheduledWindows:
|
|
||||||
worker.wndLk.Lock()
|
|
||||||
worker.activeWindows = append(worker.activeWindows, w)
|
|
||||||
worker.wndLk.Unlock()
|
|
||||||
case <-taskDone:
|
|
||||||
log.Debugw("task done", "workerid", wid)
|
|
||||||
case <-sh.closing:
|
|
||||||
return
|
|
||||||
case <-workerClosing:
|
|
||||||
return
|
|
||||||
case <-worker.closingMgr:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sh.workersLk.RLock()
|
|
||||||
worker.wndLk.Lock()
|
|
||||||
|
|
||||||
windowsRequested -= sh.workerCompactWindows(worker, wid)
|
|
||||||
|
|
||||||
assignLoop:
|
|
||||||
// process windows in order
|
|
||||||
for len(worker.activeWindows) > 0 {
|
|
||||||
firstWindow := worker.activeWindows[0]
|
|
||||||
|
|
||||||
// process tasks within a window, preferring tasks at lower indexes
|
|
||||||
for len(firstWindow.todo) > 0 {
|
|
||||||
tidx := -1
|
|
||||||
|
|
||||||
worker.lk.Lock()
|
|
||||||
for t, todo := range firstWindow.todo {
|
|
||||||
needRes := ResourceTable[todo.taskType][sh.spt]
|
|
||||||
if worker.preparing.canHandleRequest(needRes, wid, "startPreparing", worker.info.Resources) {
|
|
||||||
tidx = t
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
worker.lk.Unlock()
|
|
||||||
|
|
||||||
if tidx == -1 {
|
|
||||||
break assignLoop
|
|
||||||
}
|
|
||||||
|
|
||||||
todo := firstWindow.todo[tidx]
|
|
||||||
|
|
||||||
log.Debugf("assign worker sector %d", todo.sector.Number)
|
|
||||||
err := sh.assignWorker(taskDone, wid, worker, todo)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Error("assignWorker error: %+v", err)
|
|
||||||
go todo.respond(xerrors.Errorf("assignWorker error: %w", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: we're not freeing window.allocated resources here very much on purpose
|
|
||||||
copy(firstWindow.todo[tidx:], firstWindow.todo[tidx+1:])
|
|
||||||
firstWindow.todo[len(firstWindow.todo)-1] = nil
|
|
||||||
firstWindow.todo = firstWindow.todo[:len(firstWindow.todo)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
copy(worker.activeWindows, worker.activeWindows[1:])
|
|
||||||
worker.activeWindows[len(worker.activeWindows)-1] = nil
|
|
||||||
worker.activeWindows = worker.activeWindows[:len(worker.activeWindows)-1]
|
|
||||||
|
|
||||||
windowsRequested--
|
|
||||||
}
|
|
||||||
|
|
||||||
worker.wndLk.Unlock()
|
|
||||||
sh.workersLk.RUnlock()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sh *scheduler) workerCompactWindows(worker *workerHandle, wid WorkerID) int {
|
|
||||||
// move tasks from older windows to newer windows if older windows
|
|
||||||
// still can fit them
|
|
||||||
if len(worker.activeWindows) > 1 {
|
|
||||||
for wi, window := range worker.activeWindows[1:] {
|
|
||||||
lower := worker.activeWindows[wi]
|
|
||||||
var moved []int
|
|
||||||
|
|
||||||
for ti, todo := range window.todo {
|
|
||||||
needRes := ResourceTable[todo.taskType][sh.spt]
|
|
||||||
if !lower.allocated.canHandleRequest(needRes, wid, "compactWindows", worker.info.Resources) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
moved = append(moved, ti)
|
|
||||||
lower.todo = append(lower.todo, todo)
|
|
||||||
lower.allocated.add(worker.info.Resources, needRes)
|
|
||||||
window.allocated.free(worker.info.Resources, needRes)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(moved) > 0 {
|
|
||||||
newTodo := make([]*workerRequest, 0, len(window.todo)-len(moved))
|
|
||||||
for i, t := range window.todo {
|
|
||||||
if len(moved) > 0 && moved[0] == i {
|
|
||||||
moved = moved[1:]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
newTodo = append(newTodo, t)
|
|
||||||
}
|
|
||||||
window.todo = newTodo
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var compacted int
|
|
||||||
var newWindows []*schedWindow
|
|
||||||
|
|
||||||
for _, window := range worker.activeWindows {
|
|
||||||
if len(window.todo) == 0 {
|
|
||||||
compacted++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
newWindows = append(newWindows, window)
|
|
||||||
}
|
|
||||||
|
|
||||||
worker.activeWindows = newWindows
|
|
||||||
|
|
||||||
return compacted
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *workerHandle, req *workerRequest) error {
|
|
||||||
needRes := ResourceTable[req.taskType][sh.spt]
|
|
||||||
|
|
||||||
w.lk.Lock()
|
|
||||||
w.preparing.add(w.info.Resources, needRes)
|
|
||||||
w.lk.Unlock()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
err := req.prepare(req.ctx, w.wt.worker(w.w))
|
|
||||||
sh.workersLk.Lock()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
w.lk.Lock()
|
|
||||||
w.preparing.free(w.info.Resources, needRes)
|
|
||||||
w.lk.Unlock()
|
|
||||||
sh.workersLk.Unlock()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case taskDone <- struct{}{}:
|
|
||||||
case <-sh.closing:
|
|
||||||
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case req.ret <- workerResponse{err: err}:
|
|
||||||
case <-req.ctx.Done():
|
|
||||||
log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err)
|
|
||||||
case <-sh.closing:
|
|
||||||
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error {
|
|
||||||
w.lk.Lock()
|
|
||||||
w.preparing.free(w.info.Resources, needRes)
|
|
||||||
w.lk.Unlock()
|
|
||||||
sh.workersLk.Unlock()
|
|
||||||
defer sh.workersLk.Lock() // we MUST return locked from this function
|
|
||||||
|
|
||||||
select {
|
|
||||||
case taskDone <- struct{}{}:
|
|
||||||
case <-sh.closing:
|
|
||||||
}
|
|
||||||
|
|
||||||
err = req.work(req.ctx, w.wt.worker(w.w))
|
|
||||||
|
|
||||||
select {
|
|
||||||
case req.ret <- workerResponse{err: err}:
|
|
||||||
case <-req.ctx.Done():
|
|
||||||
log.Warnf("request got cancelled before we could respond")
|
|
||||||
case <-sh.closing:
|
|
||||||
log.Warnf("scheduler closed while sending response")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
sh.workersLk.Unlock()
|
|
||||||
|
|
||||||
// This error should always be nil, since nothing is setting it, but just to be safe:
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error executing worker (withResources): %+v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sh *scheduler) newWorker(w *workerHandle) {
|
|
||||||
w.closedMgr = make(chan struct{})
|
|
||||||
w.closingMgr = make(chan struct{})
|
|
||||||
|
|
||||||
sh.workersLk.Lock()
|
|
||||||
|
|
||||||
id := sh.nextWorker
|
|
||||||
sh.workers[id] = w
|
|
||||||
sh.nextWorker++
|
|
||||||
|
|
||||||
sh.workersLk.Unlock()
|
|
||||||
|
|
||||||
sh.runWorker(id)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case sh.watchClosing <- id:
|
|
||||||
case <-sh.closing:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sh *scheduler) dropWorker(wid WorkerID) {
|
|
||||||
sh.workersLk.Lock()
|
|
||||||
defer sh.workersLk.Unlock()
|
|
||||||
|
|
||||||
w := sh.workers[wid]
|
|
||||||
|
|
||||||
sh.workerCleanup(wid, w)
|
|
||||||
|
|
||||||
delete(sh.workers, wid)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) {
|
|
||||||
select {
|
|
||||||
case <-w.closingMgr:
|
|
||||||
default:
|
|
||||||
close(w.closingMgr)
|
|
||||||
}
|
|
||||||
|
|
||||||
sh.workersLk.Unlock()
|
|
||||||
select {
|
|
||||||
case <-w.closedMgr:
|
|
||||||
case <-time.After(time.Second):
|
|
||||||
log.Errorf("timeout closing worker manager goroutine %d", wid)
|
|
||||||
}
|
|
||||||
sh.workersLk.Lock()
|
|
||||||
|
|
||||||
if !w.cleanupStarted {
|
|
||||||
w.cleanupStarted = true
|
|
||||||
|
|
||||||
newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows))
|
|
||||||
for _, window := range sh.openWindows {
|
|
||||||
if window.worker != wid {
|
|
||||||
newWindows = append(newWindows, window)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sh.openWindows = newWindows
|
|
||||||
|
|
||||||
log.Debugf("dropWorker %d", wid)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := w.w.Close(); err != nil {
|
|
||||||
log.Warnf("closing worker %d: %+v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sh *scheduler) schedClose() {
|
func (sh *scheduler) schedClose() {
|
||||||
sh.workersLk.Lock()
|
sh.workersLk.Lock()
|
||||||
defer sh.workersLk.Unlock()
|
defer sh.workersLk.Unlock()
|
||||||
|
69
extern/sector-storage/sched_test.go
vendored
69
extern/sector-storage/sched_test.go
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@ -43,58 +44,58 @@ type schedTestWorker struct {
|
|||||||
paths []stores.StoragePath
|
paths []stores.StoragePath
|
||||||
|
|
||||||
closed bool
|
closed bool
|
||||||
closing chan struct{}
|
session uuid.UUID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
|
func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) error {
|
func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
|
func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error {
|
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
|
func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error {
|
func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,15 +122,15 @@ func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) Closing(ctx context.Context) (<-chan struct{}, error) {
|
func (s *schedTestWorker) Session(context.Context) (uuid.UUID, error) {
|
||||||
return s.closing, nil
|
return s.session, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *schedTestWorker) Close() error {
|
func (s *schedTestWorker) Close() error {
|
||||||
if !s.closed {
|
if !s.closed {
|
||||||
log.Info("close schedTestWorker")
|
log.Info("close schedTestWorker")
|
||||||
s.closed = true
|
s.closed = true
|
||||||
close(s.closing)
|
s.session = uuid.UUID{}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -142,7 +143,7 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str
|
|||||||
taskTypes: taskTypes,
|
taskTypes: taskTypes,
|
||||||
paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "<octopus>food</octopus>", CanSeal: true, CanStore: true}},
|
paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "<octopus>food</octopus>", CanSeal: true, CanStore: true}},
|
||||||
|
|
||||||
closing: make(chan struct{}),
|
session: uuid.New(),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, path := range w.paths {
|
for _, path := range w.paths {
|
||||||
@ -160,18 +161,7 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := w.Info(context.TODO())
|
require.NoError(t, sched.runWorker(context.TODO(), w))
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
sched.newWorkers <- &workerHandle{
|
|
||||||
w: w,
|
|
||||||
wt: &workTracker{
|
|
||||||
running: map[uint64]storiface.WorkerJob{},
|
|
||||||
},
|
|
||||||
info: info,
|
|
||||||
preparing: &activeResources{},
|
|
||||||
active: &activeResources{},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSchedStartStop(t *testing.T) {
|
func TestSchedStartStop(t *testing.T) {
|
||||||
@ -215,7 +205,7 @@ func TestSched(t *testing.T) {
|
|||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
rm.done[taskName] = done
|
rm.done[taskName] = done
|
||||||
|
|
||||||
sel := newAllocSelector(index, stores.FTCache, stores.PathSealing)
|
sel := newAllocSelector(index, storiface.FTCache, storiface.PathSealing)
|
||||||
|
|
||||||
rm.wg.Add(1)
|
rm.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@ -435,7 +425,7 @@ func TestSched(t *testing.T) {
|
|||||||
|
|
||||||
type line struct {
|
type line struct {
|
||||||
storiface.WorkerJob
|
storiface.WorkerJob
|
||||||
wid uint64
|
wid uuid.UUID
|
||||||
}
|
}
|
||||||
|
|
||||||
lines := make([]line, 0)
|
lines := make([]line, 0)
|
||||||
@ -539,8 +529,8 @@ func BenchmarkTrySched(b *testing.B) {
|
|||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
|
|
||||||
sched := newScheduler(spt)
|
sched := newScheduler(spt)
|
||||||
sched.workers[0] = &workerHandle{
|
sched.workers[WorkerID{}] = &workerHandle{
|
||||||
w: nil,
|
workerRpc: nil,
|
||||||
info: storiface.WorkerInfo{
|
info: storiface.WorkerInfo{
|
||||||
Hostname: "t",
|
Hostname: "t",
|
||||||
Resources: decentWorkerResources,
|
Resources: decentWorkerResources,
|
||||||
@ -551,7 +541,7 @@ func BenchmarkTrySched(b *testing.B) {
|
|||||||
|
|
||||||
for i := 0; i < windows; i++ {
|
for i := 0; i < windows; i++ {
|
||||||
sched.openWindows = append(sched.openWindows, &schedWindowRequest{
|
sched.openWindows = append(sched.openWindows, &schedWindowRequest{
|
||||||
worker: 0,
|
worker: WorkerID{},
|
||||||
done: make(chan *schedWindow, 1000),
|
done: make(chan *schedWindow, 1000),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -601,8 +591,13 @@ func TestWindowCompact(t *testing.T) {
|
|||||||
wh.activeWindows = append(wh.activeWindows, window)
|
wh.activeWindows = append(wh.activeWindows, window)
|
||||||
}
|
}
|
||||||
|
|
||||||
n := sh.workerCompactWindows(wh, 0)
|
sw := schedWorker{
|
||||||
require.Equal(t, len(start)-len(expect), n)
|
sched: &sh,
|
||||||
|
worker: wh,
|
||||||
|
}
|
||||||
|
|
||||||
|
sw.workerCompactWindows()
|
||||||
|
require.Equal(t, len(start)-len(expect), -sw.windowsRequested)
|
||||||
|
|
||||||
for wi, tasks := range expect {
|
for wi, tasks := range expect {
|
||||||
var expectRes activeResources
|
var expectRes activeResources
|
||||||
|
100
extern/sector-storage/sched_watch.go
vendored
100
extern/sector-storage/sched_watch.go
vendored
@ -1,100 +0,0 @@
|
|||||||
package sectorstorage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (sh *scheduler) runWorkerWatcher() {
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
nilch := reflect.ValueOf(new(chan struct{})).Elem()
|
|
||||||
|
|
||||||
cases := []reflect.SelectCase{
|
|
||||||
{
|
|
||||||
Dir: reflect.SelectRecv,
|
|
||||||
Chan: reflect.ValueOf(sh.closing),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Dir: reflect.SelectRecv,
|
|
||||||
Chan: reflect.ValueOf(sh.watchClosing),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
caseToWorker := map[int]WorkerID{}
|
|
||||||
|
|
||||||
for {
|
|
||||||
n, rv, ok := reflect.Select(cases)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case n == 0: // sh.closing
|
|
||||||
return
|
|
||||||
case n == 1: // sh.watchClosing
|
|
||||||
if !ok {
|
|
||||||
log.Errorf("watchClosing channel closed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
wid, ok := rv.Interface().(WorkerID)
|
|
||||||
if !ok {
|
|
||||||
panic("got a non-WorkerID message")
|
|
||||||
}
|
|
||||||
|
|
||||||
sh.workersLk.Lock()
|
|
||||||
workerClosing, err := sh.workers[wid].w.Closing(ctx)
|
|
||||||
sh.workersLk.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("getting worker closing channel: %+v", err)
|
|
||||||
select {
|
|
||||||
case sh.workerClosing <- wid:
|
|
||||||
case <-sh.closing:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
toSet := -1
|
|
||||||
for i, sc := range cases {
|
|
||||||
if sc.Chan == nilch {
|
|
||||||
toSet = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if toSet == -1 {
|
|
||||||
toSet = len(cases)
|
|
||||||
cases = append(cases, reflect.SelectCase{})
|
|
||||||
}
|
|
||||||
|
|
||||||
cases[toSet] = reflect.SelectCase{
|
|
||||||
Dir: reflect.SelectRecv,
|
|
||||||
Chan: reflect.ValueOf(workerClosing),
|
|
||||||
}
|
|
||||||
|
|
||||||
caseToWorker[toSet] = wid
|
|
||||||
default:
|
|
||||||
wid, found := caseToWorker[n]
|
|
||||||
if !found {
|
|
||||||
log.Errorf("worker ID not found for case %d", n)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(caseToWorker, n)
|
|
||||||
cases[n] = reflect.SelectCase{
|
|
||||||
Dir: reflect.SelectRecv,
|
|
||||||
Chan: nilch,
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Warnf("worker %d dropped", wid)
|
|
||||||
// send in a goroutine to avoid a deadlock between workerClosing / watchClosing
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case sh.workerClosing <- wid:
|
|
||||||
case <-sh.closing:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
478
extern/sector-storage/sched_worker.go
vendored
Normal file
478
extern/sector-storage/sched_worker.go
vendored
Normal file
@ -0,0 +1,478 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
)
|
||||||
|
|
||||||
|
type schedWorker struct {
|
||||||
|
sched *scheduler
|
||||||
|
worker *workerHandle
|
||||||
|
|
||||||
|
wid WorkerID
|
||||||
|
|
||||||
|
heartbeatTimer *time.Ticker
|
||||||
|
scheduledWindows chan *schedWindow
|
||||||
|
taskDone chan struct{}
|
||||||
|
|
||||||
|
windowsRequested int
|
||||||
|
}
|
||||||
|
|
||||||
|
// context only used for startup
|
||||||
|
func (sh *scheduler) runWorker(ctx context.Context, w Worker) error {
|
||||||
|
info, err := w.Info(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting worker info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sessID, err := w.Session(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting worker session: %w", err)
|
||||||
|
}
|
||||||
|
if sessID == ClosedWorkerID {
|
||||||
|
return xerrors.Errorf("worker already closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
worker := &workerHandle{
|
||||||
|
workerRpc: w,
|
||||||
|
info: info,
|
||||||
|
|
||||||
|
preparing: &activeResources{},
|
||||||
|
active: &activeResources{},
|
||||||
|
enabled: true,
|
||||||
|
|
||||||
|
closingMgr: make(chan struct{}),
|
||||||
|
closedMgr: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
wid := WorkerID(sessID)
|
||||||
|
|
||||||
|
sh.workersLk.Lock()
|
||||||
|
_, exist := sh.workers[wid]
|
||||||
|
if exist {
|
||||||
|
log.Warnw("duplicated worker added", "id", wid)
|
||||||
|
|
||||||
|
// this is ok, we're already handling this worker in a different goroutine
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sh.workers[wid] = worker
|
||||||
|
sh.workersLk.Unlock()
|
||||||
|
|
||||||
|
sw := &schedWorker{
|
||||||
|
sched: sh,
|
||||||
|
worker: worker,
|
||||||
|
|
||||||
|
wid: wid,
|
||||||
|
|
||||||
|
heartbeatTimer: time.NewTicker(stores.HeartbeatInterval),
|
||||||
|
scheduledWindows: make(chan *schedWindow, SchedWindows),
|
||||||
|
taskDone: make(chan struct{}, 1),
|
||||||
|
|
||||||
|
windowsRequested: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
go sw.handleWorker()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *schedWorker) handleWorker() {
|
||||||
|
worker, sched := sw.worker, sw.sched
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.TODO())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
defer close(worker.closedMgr)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
log.Warnw("Worker closing", "workerid", sw.wid)
|
||||||
|
|
||||||
|
if err := sw.disable(ctx); err != nil {
|
||||||
|
log.Warnw("failed to disable worker", "worker", sw.wid, "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sched.workersLk.Lock()
|
||||||
|
delete(sched.workers, sw.wid)
|
||||||
|
sched.workersLk.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
defer sw.heartbeatTimer.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
sched.workersLk.Lock()
|
||||||
|
enabled := worker.enabled
|
||||||
|
sched.workersLk.Unlock()
|
||||||
|
|
||||||
|
// ask for more windows if we need them (non-blocking)
|
||||||
|
if enabled {
|
||||||
|
if !sw.requestWindows() {
|
||||||
|
return // graceful shutdown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for more windows to come in, or for tasks to get finished (blocking)
|
||||||
|
for {
|
||||||
|
// ping the worker and check session
|
||||||
|
if !sw.checkSession(ctx) {
|
||||||
|
return // invalid session / exiting
|
||||||
|
}
|
||||||
|
|
||||||
|
// session looks good
|
||||||
|
if !enabled {
|
||||||
|
sched.workersLk.Lock()
|
||||||
|
worker.enabled = true
|
||||||
|
sched.workersLk.Unlock()
|
||||||
|
|
||||||
|
// we'll send window requests on the next loop
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for more tasks to be assigned by the main scheduler or for the worker
|
||||||
|
// to finish precessing a task
|
||||||
|
update, ok := sw.waitForUpdates()
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if update {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// process assigned windows (non-blocking)
|
||||||
|
sched.workersLk.RLock()
|
||||||
|
worker.wndLk.Lock()
|
||||||
|
|
||||||
|
sw.workerCompactWindows()
|
||||||
|
|
||||||
|
// send tasks to the worker
|
||||||
|
sw.processAssignedWindows()
|
||||||
|
|
||||||
|
worker.wndLk.Unlock()
|
||||||
|
sched.workersLk.RUnlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *schedWorker) disable(ctx context.Context) error {
|
||||||
|
done := make(chan struct{})
|
||||||
|
|
||||||
|
// request cleanup in the main scheduler goroutine
|
||||||
|
select {
|
||||||
|
case sw.sched.workerDisable <- workerDisableReq{
|
||||||
|
activeWindows: sw.worker.activeWindows,
|
||||||
|
wid: sw.wid,
|
||||||
|
done: func() {
|
||||||
|
close(done)
|
||||||
|
},
|
||||||
|
}:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-sw.sched.closing:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for cleanup to complete
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-sw.sched.closing:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sw.worker.activeWindows = sw.worker.activeWindows[:0]
|
||||||
|
sw.windowsRequested = 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *schedWorker) checkSession(ctx context.Context) bool {
|
||||||
|
for {
|
||||||
|
sctx, scancel := context.WithTimeout(ctx, stores.HeartbeatInterval/2)
|
||||||
|
curSes, err := sw.worker.workerRpc.Session(sctx)
|
||||||
|
scancel()
|
||||||
|
if err != nil {
|
||||||
|
// Likely temporary error
|
||||||
|
|
||||||
|
log.Warnw("failed to check worker session", "error", err)
|
||||||
|
|
||||||
|
if err := sw.disable(ctx); err != nil {
|
||||||
|
log.Warnw("failed to disable worker with session error", "worker", sw.wid, "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-sw.heartbeatTimer.C:
|
||||||
|
continue
|
||||||
|
case w := <-sw.scheduledWindows:
|
||||||
|
// was in flight when initially disabled, return
|
||||||
|
sw.worker.wndLk.Lock()
|
||||||
|
sw.worker.activeWindows = append(sw.worker.activeWindows, w)
|
||||||
|
sw.worker.wndLk.Unlock()
|
||||||
|
|
||||||
|
if err := sw.disable(ctx); err != nil {
|
||||||
|
log.Warnw("failed to disable worker with session error", "worker", sw.wid, "error", err)
|
||||||
|
}
|
||||||
|
case <-sw.sched.closing:
|
||||||
|
return false
|
||||||
|
case <-sw.worker.closingMgr:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if WorkerID(curSes) != sw.wid {
|
||||||
|
if curSes != ClosedWorkerID {
|
||||||
|
// worker restarted
|
||||||
|
log.Warnw("worker session changed (worker restarted?)", "initial", sw.wid, "current", curSes)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *schedWorker) requestWindows() bool {
|
||||||
|
for ; sw.windowsRequested < SchedWindows; sw.windowsRequested++ {
|
||||||
|
select {
|
||||||
|
case sw.sched.windowRequests <- &schedWindowRequest{
|
||||||
|
worker: sw.wid,
|
||||||
|
done: sw.scheduledWindows,
|
||||||
|
}:
|
||||||
|
case <-sw.sched.closing:
|
||||||
|
return false
|
||||||
|
case <-sw.worker.closingMgr:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *schedWorker) waitForUpdates() (update bool, ok bool) {
|
||||||
|
select {
|
||||||
|
case <-sw.heartbeatTimer.C:
|
||||||
|
return false, true
|
||||||
|
case w := <-sw.scheduledWindows:
|
||||||
|
sw.worker.wndLk.Lock()
|
||||||
|
sw.worker.activeWindows = append(sw.worker.activeWindows, w)
|
||||||
|
sw.worker.wndLk.Unlock()
|
||||||
|
return true, true
|
||||||
|
case <-sw.taskDone:
|
||||||
|
log.Debugw("task done", "workerid", sw.wid)
|
||||||
|
return true, true
|
||||||
|
case <-sw.sched.closing:
|
||||||
|
case <-sw.worker.closingMgr:
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *schedWorker) workerCompactWindows() {
|
||||||
|
worker := sw.worker
|
||||||
|
|
||||||
|
// move tasks from older windows to newer windows if older windows
|
||||||
|
// still can fit them
|
||||||
|
if len(worker.activeWindows) > 1 {
|
||||||
|
for wi, window := range worker.activeWindows[1:] {
|
||||||
|
lower := worker.activeWindows[wi]
|
||||||
|
var moved []int
|
||||||
|
|
||||||
|
for ti, todo := range window.todo {
|
||||||
|
needRes := ResourceTable[todo.taskType][sw.sched.spt]
|
||||||
|
if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info.Resources) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
moved = append(moved, ti)
|
||||||
|
lower.todo = append(lower.todo, todo)
|
||||||
|
lower.allocated.add(worker.info.Resources, needRes)
|
||||||
|
window.allocated.free(worker.info.Resources, needRes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(moved) > 0 {
|
||||||
|
newTodo := make([]*workerRequest, 0, len(window.todo)-len(moved))
|
||||||
|
for i, t := range window.todo {
|
||||||
|
if len(moved) > 0 && moved[0] == i {
|
||||||
|
moved = moved[1:]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
newTodo = append(newTodo, t)
|
||||||
|
}
|
||||||
|
window.todo = newTodo
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var compacted int
|
||||||
|
var newWindows []*schedWindow
|
||||||
|
|
||||||
|
for _, window := range worker.activeWindows {
|
||||||
|
if len(window.todo) == 0 {
|
||||||
|
compacted++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
newWindows = append(newWindows, window)
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.activeWindows = newWindows
|
||||||
|
sw.windowsRequested -= compacted
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *schedWorker) processAssignedWindows() {
|
||||||
|
worker := sw.worker
|
||||||
|
|
||||||
|
assignLoop:
|
||||||
|
// process windows in order
|
||||||
|
for len(worker.activeWindows) > 0 {
|
||||||
|
firstWindow := worker.activeWindows[0]
|
||||||
|
|
||||||
|
// process tasks within a window, preferring tasks at lower indexes
|
||||||
|
for len(firstWindow.todo) > 0 {
|
||||||
|
tidx := -1
|
||||||
|
|
||||||
|
worker.lk.Lock()
|
||||||
|
for t, todo := range firstWindow.todo {
|
||||||
|
needRes := ResourceTable[todo.taskType][sw.sched.spt]
|
||||||
|
if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info.Resources) {
|
||||||
|
tidx = t
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
worker.lk.Unlock()
|
||||||
|
|
||||||
|
if tidx == -1 {
|
||||||
|
break assignLoop
|
||||||
|
}
|
||||||
|
|
||||||
|
todo := firstWindow.todo[tidx]
|
||||||
|
|
||||||
|
log.Debugf("assign worker sector %d", todo.sector.Number)
|
||||||
|
err := sw.startProcessingTask(sw.taskDone, todo)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error("startProcessingTask error: %+v", err)
|
||||||
|
go todo.respond(xerrors.Errorf("startProcessingTask error: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: we're not freeing window.allocated resources here very much on purpose
|
||||||
|
copy(firstWindow.todo[tidx:], firstWindow.todo[tidx+1:])
|
||||||
|
firstWindow.todo[len(firstWindow.todo)-1] = nil
|
||||||
|
firstWindow.todo = firstWindow.todo[:len(firstWindow.todo)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(worker.activeWindows, worker.activeWindows[1:])
|
||||||
|
worker.activeWindows[len(worker.activeWindows)-1] = nil
|
||||||
|
worker.activeWindows = worker.activeWindows[:len(worker.activeWindows)-1]
|
||||||
|
|
||||||
|
sw.windowsRequested--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRequest) error {
|
||||||
|
w, sh := sw.worker, sw.sched
|
||||||
|
|
||||||
|
needRes := ResourceTable[req.taskType][sh.spt]
|
||||||
|
|
||||||
|
w.lk.Lock()
|
||||||
|
w.preparing.add(w.info.Resources, needRes)
|
||||||
|
w.lk.Unlock()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// first run the prepare step (e.g. fetching sector data from other worker)
|
||||||
|
err := req.prepare(req.ctx, sh.workTracker.worker(sw.wid, w.workerRpc))
|
||||||
|
sh.workersLk.Lock()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
w.lk.Lock()
|
||||||
|
w.preparing.free(w.info.Resources, needRes)
|
||||||
|
w.lk.Unlock()
|
||||||
|
sh.workersLk.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case taskDone <- struct{}{}:
|
||||||
|
case <-sh.closing:
|
||||||
|
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case req.ret <- workerResponse{err: err}:
|
||||||
|
case <-req.ctx.Done():
|
||||||
|
log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err)
|
||||||
|
case <-sh.closing:
|
||||||
|
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait (if needed) for resources in the 'active' window
|
||||||
|
err = w.active.withResources(sw.wid, w.info.Resources, needRes, &sh.workersLk, func() error {
|
||||||
|
w.lk.Lock()
|
||||||
|
w.preparing.free(w.info.Resources, needRes)
|
||||||
|
w.lk.Unlock()
|
||||||
|
sh.workersLk.Unlock()
|
||||||
|
defer sh.workersLk.Lock() // we MUST return locked from this function
|
||||||
|
|
||||||
|
select {
|
||||||
|
case taskDone <- struct{}{}:
|
||||||
|
case <-sh.closing:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do the work!
|
||||||
|
err = req.work(req.ctx, sh.workTracker.worker(sw.wid, w.workerRpc))
|
||||||
|
|
||||||
|
select {
|
||||||
|
case req.ret <- workerResponse{err: err}:
|
||||||
|
case <-req.ctx.Done():
|
||||||
|
log.Warnf("request got cancelled before we could respond")
|
||||||
|
case <-sh.closing:
|
||||||
|
log.Warnf("scheduler closed while sending response")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
sh.workersLk.Unlock()
|
||||||
|
|
||||||
|
// This error should always be nil, since nothing is setting it, but just to be safe:
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error executing worker (withResources): %+v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) {
|
||||||
|
select {
|
||||||
|
case <-w.closingMgr:
|
||||||
|
default:
|
||||||
|
close(w.closingMgr)
|
||||||
|
}
|
||||||
|
|
||||||
|
sh.workersLk.Unlock()
|
||||||
|
select {
|
||||||
|
case <-w.closedMgr:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
log.Errorf("timeout closing worker manager goroutine %d", wid)
|
||||||
|
}
|
||||||
|
sh.workersLk.Lock()
|
||||||
|
|
||||||
|
if !w.cleanupStarted {
|
||||||
|
w.cleanupStarted = true
|
||||||
|
|
||||||
|
newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows))
|
||||||
|
for _, window := range sh.openWindows {
|
||||||
|
if window.worker != wid {
|
||||||
|
newWindows = append(newWindows, window)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sh.openWindows = newWindows
|
||||||
|
|
||||||
|
log.Debugf("worker %d dropped", wid)
|
||||||
|
}
|
||||||
|
}
|
11
extern/sector-storage/selector_alloc.go
vendored
11
extern/sector-storage/selector_alloc.go
vendored
@ -9,15 +9,16 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
type allocSelector struct {
|
type allocSelector struct {
|
||||||
index stores.SectorIndex
|
index stores.SectorIndex
|
||||||
alloc stores.SectorFileType
|
alloc storiface.SectorFileType
|
||||||
ptype stores.PathType
|
ptype storiface.PathType
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAllocSelector(index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector {
|
func newAllocSelector(index stores.SectorIndex, alloc storiface.SectorFileType, ptype storiface.PathType) *allocSelector {
|
||||||
return &allocSelector{
|
return &allocSelector{
|
||||||
index: index,
|
index: index,
|
||||||
alloc: alloc,
|
alloc: alloc,
|
||||||
@ -26,7 +27,7 @@ func newAllocSelector(index stores.SectorIndex, alloc stores.SectorFileType, pty
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
||||||
tasks, err := whnd.w.TaskTypes(ctx)
|
tasks, err := whnd.workerRpc.TaskTypes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||||
}
|
}
|
||||||
@ -34,7 +35,7 @@ func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
paths, err := whnd.w.Paths(ctx)
|
paths, err := whnd.workerRpc.Paths(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("getting worker paths: %w", err)
|
return false, xerrors.Errorf("getting worker paths: %w", err)
|
||||||
}
|
}
|
||||||
|
9
extern/sector-storage/selector_existing.go
vendored
9
extern/sector-storage/selector_existing.go
vendored
@ -9,16 +9,17 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
type existingSelector struct {
|
type existingSelector struct {
|
||||||
index stores.SectorIndex
|
index stores.SectorIndex
|
||||||
sector abi.SectorID
|
sector abi.SectorID
|
||||||
alloc stores.SectorFileType
|
alloc storiface.SectorFileType
|
||||||
allowFetch bool
|
allowFetch bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) *existingSelector {
|
func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc storiface.SectorFileType, allowFetch bool) *existingSelector {
|
||||||
return &existingSelector{
|
return &existingSelector{
|
||||||
index: index,
|
index: index,
|
||||||
sector: sector,
|
sector: sector,
|
||||||
@ -28,7 +29,7 @@ func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc st
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
||||||
tasks, err := whnd.w.TaskTypes(ctx)
|
tasks, err := whnd.workerRpc.TaskTypes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||||
}
|
}
|
||||||
@ -36,7 +37,7 @@ func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
paths, err := whnd.w.Paths(ctx)
|
paths, err := whnd.workerRpc.Paths(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("getting worker paths: %w", err)
|
return false, xerrors.Errorf("getting worker paths: %w", err)
|
||||||
}
|
}
|
||||||
|
6
extern/sector-storage/selector_task.go
vendored
6
extern/sector-storage/selector_task.go
vendored
@ -20,7 +20,7 @@ func newTaskSelector() *taskSelector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
|
||||||
tasks, err := whnd.w.TaskTypes(ctx)
|
tasks, err := whnd.workerRpc.TaskTypes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||||
}
|
}
|
||||||
@ -30,11 +30,11 @@ func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *workerHandle) (bool, error) {
|
func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *workerHandle) (bool, error) {
|
||||||
atasks, err := a.w.TaskTypes(ctx)
|
atasks, err := a.workerRpc.TaskTypes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||||
}
|
}
|
||||||
btasks, err := b.w.TaskTypes(ctx)
|
btasks, err := b.workerRpc.TaskTypes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
return false, xerrors.Errorf("getting supported worker task types: %w", err)
|
||||||
}
|
}
|
||||||
|
53
extern/sector-storage/stats.go
vendored
53
extern/sector-storage/stats.go
vendored
@ -1,18 +1,24 @@
|
|||||||
package sectorstorage
|
package sectorstorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats {
|
func (m *Manager) WorkerStats() map[uuid.UUID]storiface.WorkerStats {
|
||||||
m.sched.workersLk.RLock()
|
m.sched.workersLk.RLock()
|
||||||
defer m.sched.workersLk.RUnlock()
|
defer m.sched.workersLk.RUnlock()
|
||||||
|
|
||||||
out := map[uint64]storiface.WorkerStats{}
|
out := map[uuid.UUID]storiface.WorkerStats{}
|
||||||
|
|
||||||
for id, handle := range m.sched.workers {
|
for id, handle := range m.sched.workers {
|
||||||
out[uint64(id)] = storiface.WorkerStats{
|
out[uuid.UUID(id)] = storiface.WorkerStats{
|
||||||
Info: handle.info,
|
Info: handle.info,
|
||||||
|
Enabled: handle.enabled,
|
||||||
|
|
||||||
MemUsedMin: handle.active.memUsedMin,
|
MemUsedMin: handle.active.memUsedMin,
|
||||||
MemUsedMax: handle.active.memUsedMax,
|
MemUsedMax: handle.active.memUsedMax,
|
||||||
GpuUsed: handle.active.gpuUsed,
|
GpuUsed: handle.active.gpuUsed,
|
||||||
@ -23,20 +29,23 @@ func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) WorkerJobs() map[uint64][]storiface.WorkerJob {
|
func (m *Manager) WorkerJobs() map[uuid.UUID][]storiface.WorkerJob {
|
||||||
m.sched.workersLk.RLock()
|
out := map[uuid.UUID][]storiface.WorkerJob{}
|
||||||
defer m.sched.workersLk.RUnlock()
|
calls := map[storiface.CallID]struct{}{}
|
||||||
|
|
||||||
out := map[uint64][]storiface.WorkerJob{}
|
for _, t := range m.sched.workTracker.Running() {
|
||||||
|
out[uuid.UUID(t.worker)] = append(out[uuid.UUID(t.worker)], t.job)
|
||||||
|
calls[t.job.ID] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
m.sched.workersLk.RLock()
|
||||||
|
|
||||||
for id, handle := range m.sched.workers {
|
for id, handle := range m.sched.workers {
|
||||||
out[uint64(id)] = handle.wt.Running()
|
|
||||||
|
|
||||||
handle.wndLk.Lock()
|
handle.wndLk.Lock()
|
||||||
for wi, window := range handle.activeWindows {
|
for wi, window := range handle.activeWindows {
|
||||||
for _, request := range window.todo {
|
for _, request := range window.todo {
|
||||||
out[uint64(id)] = append(out[uint64(id)], storiface.WorkerJob{
|
out[uuid.UUID(id)] = append(out[uuid.UUID(id)], storiface.WorkerJob{
|
||||||
ID: 0,
|
ID: storiface.UndefCall,
|
||||||
Sector: request.sector,
|
Sector: request.sector,
|
||||||
Task: request.taskType,
|
Task: request.taskType,
|
||||||
RunWait: wi + 1,
|
RunWait: wi + 1,
|
||||||
@ -47,5 +56,25 @@ func (m *Manager) WorkerJobs() map[uint64][]storiface.WorkerJob {
|
|||||||
handle.wndLk.Unlock()
|
handle.wndLk.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m.sched.workersLk.RUnlock()
|
||||||
|
|
||||||
|
m.workLk.Lock()
|
||||||
|
defer m.workLk.Unlock()
|
||||||
|
|
||||||
|
for id, work := range m.callToWork {
|
||||||
|
_, found := calls[id]
|
||||||
|
if found {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
out[uuid.UUID{}] = append(out[uuid.UUID{}], storiface.WorkerJob{
|
||||||
|
ID: id,
|
||||||
|
Sector: id.Sector,
|
||||||
|
Task: work.Method,
|
||||||
|
RunWait: -1,
|
||||||
|
Start: time.Time{},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
41
extern/sector-storage/stores/http_handler.go
vendored
41
extern/sector-storage/stores/http_handler.go
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/tarutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/tarutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -55,16 +56,16 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ
|
|||||||
log.Infof("SERVE GET %s", r.URL)
|
log.Infof("SERVE GET %s", r.URL)
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
|
|
||||||
id, err := ParseSectorID(vars["id"])
|
id, err := storiface.ParseSectorID(vars["id"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ft, err := ftFromString(vars["type"])
|
ft, err := ftFromString(vars["type"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -72,16 +73,16 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ
|
|||||||
// The caller has a lock on this sector already, no need to get one here
|
// The caller has a lock on this sector already, no need to get one here
|
||||||
|
|
||||||
// passing 0 spt because we don't allocate anything
|
// passing 0 spt because we don't allocate anything
|
||||||
paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, PathStorage, AcquireMove)
|
paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: reserve local storage here
|
// TODO: reserve local storage here
|
||||||
|
|
||||||
path := PathByType(paths, ft)
|
path := storiface.PathByType(paths, ft)
|
||||||
if path == "" {
|
if path == "" {
|
||||||
log.Error("acquired path was empty")
|
log.Error("acquired path was empty")
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
@ -90,7 +91,7 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ
|
|||||||
|
|
||||||
stat, err := os.Stat(path)
|
stat, err := os.Stat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -104,14 +105,14 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ
|
|||||||
w.Header().Set("Content-Type", "application/octet-stream")
|
w.Header().Set("Content-Type", "application/octet-stream")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(200)
|
w.WriteHeader(200)
|
||||||
if _, err := io.Copy(w, rd); err != nil { // TODO: default 32k buf may be too small
|
if _, err := io.Copy(w, rd); err != nil { // TODO: default 32k buf may be too small
|
||||||
log.Error("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -120,35 +121,35 @@ func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.R
|
|||||||
log.Infof("SERVE DELETE %s", r.URL)
|
log.Infof("SERVE DELETE %s", r.URL)
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
|
|
||||||
id, err := ParseSectorID(vars["id"])
|
id, err := storiface.ParseSectorID(vars["id"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ft, err := ftFromString(vars["type"])
|
ft, err := ftFromString(vars["type"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := handler.Remove(r.Context(), id, ft, false); err != nil {
|
if err := handler.Remove(r.Context(), id, ft, false); err != nil {
|
||||||
log.Error("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ftFromString(t string) (SectorFileType, error) {
|
func ftFromString(t string) (storiface.SectorFileType, error) {
|
||||||
switch t {
|
switch t {
|
||||||
case FTUnsealed.String():
|
case storiface.FTUnsealed.String():
|
||||||
return FTUnsealed, nil
|
return storiface.FTUnsealed, nil
|
||||||
case FTSealed.String():
|
case storiface.FTSealed.String():
|
||||||
return FTSealed, nil
|
return storiface.FTSealed, nil
|
||||||
case FTCache.String():
|
case storiface.FTCache.String():
|
||||||
return FTCache, nil
|
return storiface.FTCache, nil
|
||||||
default:
|
default:
|
||||||
return 0, xerrors.Errorf("unknown sector file type: '%s'", t)
|
return 0, xerrors.Errorf("unknown sector file type: '%s'", t)
|
||||||
}
|
}
|
||||||
|
47
extern/sector-storage/stores/index.go
vendored
47
extern/sector-storage/stores/index.go
vendored
@ -10,10 +10,11 @@ import (
|
|||||||
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
var HeartbeatInterval = 10 * time.Second
|
var HeartbeatInterval = 10 * time.Second
|
||||||
@ -53,20 +54,20 @@ type SectorIndex interface { // part of storage-miner api
|
|||||||
StorageInfo(context.Context, ID) (StorageInfo, error)
|
StorageInfo(context.Context, ID) (StorageInfo, error)
|
||||||
StorageReportHealth(context.Context, ID, HealthReport) error
|
StorageReportHealth(context.Context, ID, HealthReport) error
|
||||||
|
|
||||||
StorageDeclareSector(ctx context.Context, storageID ID, s abi.SectorID, ft SectorFileType, primary bool) error
|
StorageDeclareSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error
|
||||||
StorageDropSector(ctx context.Context, storageID ID, s abi.SectorID, ft SectorFileType) error
|
StorageDropSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType) error
|
||||||
StorageFindSector(ctx context.Context, sector abi.SectorID, ft SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]SectorStorageInfo, error)
|
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]SectorStorageInfo, error)
|
||||||
|
|
||||||
StorageBestAlloc(ctx context.Context, allocate SectorFileType, ssize abi.SectorSize, pathType PathType) ([]StorageInfo, error)
|
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]StorageInfo, error)
|
||||||
|
|
||||||
// atomically acquire locks on all sector file types. close ctx to unlock
|
// atomically acquire locks on all sector file types. close ctx to unlock
|
||||||
StorageLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) error
|
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error
|
||||||
StorageTryLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) (bool, error)
|
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Decl struct {
|
type Decl struct {
|
||||||
abi.SectorID
|
abi.SectorID
|
||||||
SectorFileType
|
storiface.SectorFileType
|
||||||
}
|
}
|
||||||
|
|
||||||
type declMeta struct {
|
type declMeta struct {
|
||||||
@ -104,10 +105,10 @@ func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) {
|
|||||||
i.lk.RLock()
|
i.lk.RLock()
|
||||||
defer i.lk.RUnlock()
|
defer i.lk.RUnlock()
|
||||||
|
|
||||||
byID := map[ID]map[abi.SectorID]SectorFileType{}
|
byID := map[ID]map[abi.SectorID]storiface.SectorFileType{}
|
||||||
|
|
||||||
for id := range i.stores {
|
for id := range i.stores {
|
||||||
byID[id] = map[abi.SectorID]SectorFileType{}
|
byID[id] = map[abi.SectorID]storiface.SectorFileType{}
|
||||||
}
|
}
|
||||||
for decl, ids := range i.sectors {
|
for decl, ids := range i.sectors {
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
@ -180,12 +181,12 @@ func (i *Index) StorageReportHealth(ctx context.Context, id ID, report HealthRep
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageDeclareSector(ctx context.Context, storageID ID, s abi.SectorID, ft SectorFileType, primary bool) error {
|
func (i *Index) StorageDeclareSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error {
|
||||||
i.lk.Lock()
|
i.lk.Lock()
|
||||||
defer i.lk.Unlock()
|
defer i.lk.Unlock()
|
||||||
|
|
||||||
loop:
|
loop:
|
||||||
for _, fileType := range PathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if fileType&ft == 0 {
|
if fileType&ft == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -212,11 +213,11 @@ loop:
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.SectorID, ft SectorFileType) error {
|
func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.SectorID, ft storiface.SectorFileType) error {
|
||||||
i.lk.Lock()
|
i.lk.Lock()
|
||||||
defer i.lk.Unlock()
|
defer i.lk.Unlock()
|
||||||
|
|
||||||
for _, fileType := range PathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if fileType&ft == 0 {
|
if fileType&ft == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -246,14 +247,14 @@ func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.Secto
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]SectorStorageInfo, error) {
|
func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]SectorStorageInfo, error) {
|
||||||
i.lk.RLock()
|
i.lk.RLock()
|
||||||
defer i.lk.RUnlock()
|
defer i.lk.RUnlock()
|
||||||
|
|
||||||
storageIDs := map[ID]uint64{}
|
storageIDs := map[ID]uint64{}
|
||||||
isprimary := map[ID]bool{}
|
isprimary := map[ID]bool{}
|
||||||
|
|
||||||
for _, pathType := range PathTypes {
|
for _, pathType := range storiface.PathTypes {
|
||||||
if ft&pathType == 0 {
|
if ft&pathType == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -280,7 +281,7 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft Sector
|
|||||||
return nil, xerrors.Errorf("failed to parse url: %w", err)
|
return nil, xerrors.Errorf("failed to parse url: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rl.Path = gopath.Join(rl.Path, ft.String(), SectorName(s))
|
rl.Path = gopath.Join(rl.Path, ft.String(), storiface.SectorName(s))
|
||||||
urls[k] = rl.String()
|
urls[k] = rl.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -333,7 +334,7 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft Sector
|
|||||||
return nil, xerrors.Errorf("failed to parse url: %w", err)
|
return nil, xerrors.Errorf("failed to parse url: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rl.Path = gopath.Join(rl.Path, ft.String(), SectorName(s))
|
rl.Path = gopath.Join(rl.Path, ft.String(), storiface.SectorName(s))
|
||||||
urls[k] = rl.String()
|
urls[k] = rl.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -365,7 +366,7 @@ func (i *Index) StorageInfo(ctx context.Context, id ID) (StorageInfo, error) {
|
|||||||
return *si.info, nil
|
return *si.info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, ssize abi.SectorSize, pathType PathType) ([]StorageInfo, error) {
|
func (i *Index) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]StorageInfo, error) {
|
||||||
i.lk.RLock()
|
i.lk.RLock()
|
||||||
defer i.lk.RUnlock()
|
defer i.lk.RUnlock()
|
||||||
|
|
||||||
@ -377,10 +378,10 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, p := range i.stores {
|
for _, p := range i.stores {
|
||||||
if (pathType == PathSealing) && !p.info.CanSeal {
|
if (pathType == storiface.PathSealing) && !p.info.CanSeal {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if (pathType == PathStorage) && !p.info.CanStore {
|
if (pathType == storiface.PathStorage) && !p.info.CanStore {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -421,7 +422,7 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, s
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Index) FindSector(id abi.SectorID, typ SectorFileType) ([]ID, error) {
|
func (i *Index) FindSector(id abi.SectorID, typ storiface.SectorFileType) ([]ID, error) {
|
||||||
i.lk.RLock()
|
i.lk.RLock()
|
||||||
defer i.lk.RUnlock()
|
defer i.lk.RUnlock()
|
||||||
|
|
||||||
|
26
extern/sector-storage/stores/index_locks.go
vendored
26
extern/sector-storage/stores/index_locks.go
vendored
@ -7,18 +7,20 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
type sectorLock struct {
|
type sectorLock struct {
|
||||||
cond *ctxCond
|
cond *ctxCond
|
||||||
|
|
||||||
r [FileTypes]uint
|
r [storiface.FileTypes]uint
|
||||||
w SectorFileType
|
w storiface.SectorFileType
|
||||||
|
|
||||||
refs uint // access with indexLocks.lk
|
refs uint // access with indexLocks.lk
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *sectorLock) canLock(read SectorFileType, write SectorFileType) bool {
|
func (l *sectorLock) canLock(read storiface.SectorFileType, write storiface.SectorFileType) bool {
|
||||||
for i, b := range write.All() {
|
for i, b := range write.All() {
|
||||||
if b && l.r[i] > 0 {
|
if b && l.r[i] > 0 {
|
||||||
return false
|
return false
|
||||||
@ -29,7 +31,7 @@ func (l *sectorLock) canLock(read SectorFileType, write SectorFileType) bool {
|
|||||||
return l.w&read == 0 && l.w&write == 0
|
return l.w&read == 0 && l.w&write == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *sectorLock) tryLock(read SectorFileType, write SectorFileType) bool {
|
func (l *sectorLock) tryLock(read storiface.SectorFileType, write storiface.SectorFileType) bool {
|
||||||
if !l.canLock(read, write) {
|
if !l.canLock(read, write) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -45,16 +47,16 @@ func (l *sectorLock) tryLock(read SectorFileType, write SectorFileType) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
type lockFn func(l *sectorLock, ctx context.Context, read SectorFileType, write SectorFileType) (bool, error)
|
type lockFn func(l *sectorLock, ctx context.Context, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error)
|
||||||
|
|
||||||
func (l *sectorLock) tryLockSafe(ctx context.Context, read SectorFileType, write SectorFileType) (bool, error) {
|
func (l *sectorLock) tryLockSafe(ctx context.Context, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) {
|
||||||
l.cond.L.Lock()
|
l.cond.L.Lock()
|
||||||
defer l.cond.L.Unlock()
|
defer l.cond.L.Unlock()
|
||||||
|
|
||||||
return l.tryLock(read, write), nil
|
return l.tryLock(read, write), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *sectorLock) lock(ctx context.Context, read SectorFileType, write SectorFileType) (bool, error) {
|
func (l *sectorLock) lock(ctx context.Context, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) {
|
||||||
l.cond.L.Lock()
|
l.cond.L.Lock()
|
||||||
defer l.cond.L.Unlock()
|
defer l.cond.L.Unlock()
|
||||||
|
|
||||||
@ -67,7 +69,7 @@ func (l *sectorLock) lock(ctx context.Context, read SectorFileType, write Sector
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *sectorLock) unlock(read SectorFileType, write SectorFileType) {
|
func (l *sectorLock) unlock(read storiface.SectorFileType, write storiface.SectorFileType) {
|
||||||
l.cond.L.Lock()
|
l.cond.L.Lock()
|
||||||
defer l.cond.L.Unlock()
|
defer l.cond.L.Unlock()
|
||||||
|
|
||||||
@ -88,12 +90,12 @@ type indexLocks struct {
|
|||||||
locks map[abi.SectorID]*sectorLock
|
locks map[abi.SectorID]*sectorLock
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *indexLocks) lockWith(ctx context.Context, lockFn lockFn, sector abi.SectorID, read SectorFileType, write SectorFileType) (bool, error) {
|
func (i *indexLocks) lockWith(ctx context.Context, lockFn lockFn, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) {
|
||||||
if read|write == 0 {
|
if read|write == 0 {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if read|write > (1<<FileTypes)-1 {
|
if read|write > (1<<storiface.FileTypes)-1 {
|
||||||
return false, xerrors.Errorf("unknown file types specified")
|
return false, xerrors.Errorf("unknown file types specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -136,7 +138,7 @@ func (i *indexLocks) lockWith(ctx context.Context, lockFn lockFn, sector abi.Sec
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *indexLocks) StorageLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) error {
|
func (i *indexLocks) StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error {
|
||||||
ok, err := i.lockWith(ctx, (*sectorLock).lock, sector, read, write)
|
ok, err := i.lockWith(ctx, (*sectorLock).lock, sector, read, write)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -149,6 +151,6 @@ func (i *indexLocks) StorageLock(ctx context.Context, sector abi.SectorID, read
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *indexLocks) StorageTryLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) (bool, error) {
|
func (i *indexLocks) StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) {
|
||||||
return i.lockWith(ctx, (*sectorLock).tryLockSafe, sector, read, write)
|
return i.lockWith(ctx, (*sectorLock).tryLockSafe, sector, read, write)
|
||||||
}
|
}
|
||||||
|
66
extern/sector-storage/stores/index_locks_test.go
vendored
66
extern/sector-storage/stores/index_locks_test.go
vendored
@ -8,6 +8,8 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
var aSector = abi.SectorID{
|
var aSector = abi.SectorID{
|
||||||
@ -17,41 +19,41 @@ var aSector = abi.SectorID{
|
|||||||
|
|
||||||
func TestCanLock(t *testing.T) {
|
func TestCanLock(t *testing.T) {
|
||||||
lk := sectorLock{
|
lk := sectorLock{
|
||||||
r: [FileTypes]uint{},
|
r: [storiface.FileTypes]uint{},
|
||||||
w: FTNone,
|
w: storiface.FTNone,
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, true, lk.canLock(FTUnsealed, FTNone))
|
require.Equal(t, true, lk.canLock(storiface.FTUnsealed, storiface.FTNone))
|
||||||
require.Equal(t, true, lk.canLock(FTNone, FTUnsealed))
|
require.Equal(t, true, lk.canLock(storiface.FTNone, storiface.FTUnsealed))
|
||||||
|
|
||||||
ftAll := FTUnsealed | FTSealed | FTCache
|
ftAll := storiface.FTUnsealed | storiface.FTSealed | storiface.FTCache
|
||||||
|
|
||||||
require.Equal(t, true, lk.canLock(ftAll, FTNone))
|
require.Equal(t, true, lk.canLock(ftAll, storiface.FTNone))
|
||||||
require.Equal(t, true, lk.canLock(FTNone, ftAll))
|
require.Equal(t, true, lk.canLock(storiface.FTNone, ftAll))
|
||||||
|
|
||||||
lk.r[0] = 1 // unsealed read taken
|
lk.r[0] = 1 // unsealed read taken
|
||||||
|
|
||||||
require.Equal(t, true, lk.canLock(FTUnsealed, FTNone))
|
require.Equal(t, true, lk.canLock(storiface.FTUnsealed, storiface.FTNone))
|
||||||
require.Equal(t, false, lk.canLock(FTNone, FTUnsealed))
|
require.Equal(t, false, lk.canLock(storiface.FTNone, storiface.FTUnsealed))
|
||||||
|
|
||||||
require.Equal(t, true, lk.canLock(ftAll, FTNone))
|
require.Equal(t, true, lk.canLock(ftAll, storiface.FTNone))
|
||||||
require.Equal(t, false, lk.canLock(FTNone, ftAll))
|
require.Equal(t, false, lk.canLock(storiface.FTNone, ftAll))
|
||||||
|
|
||||||
require.Equal(t, true, lk.canLock(FTNone, FTSealed|FTCache))
|
require.Equal(t, true, lk.canLock(storiface.FTNone, storiface.FTSealed|storiface.FTCache))
|
||||||
require.Equal(t, true, lk.canLock(FTUnsealed, FTSealed|FTCache))
|
require.Equal(t, true, lk.canLock(storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache))
|
||||||
|
|
||||||
lk.r[0] = 0
|
lk.r[0] = 0
|
||||||
|
|
||||||
lk.w = FTSealed
|
lk.w = storiface.FTSealed
|
||||||
|
|
||||||
require.Equal(t, true, lk.canLock(FTUnsealed, FTNone))
|
require.Equal(t, true, lk.canLock(storiface.FTUnsealed, storiface.FTNone))
|
||||||
require.Equal(t, true, lk.canLock(FTNone, FTUnsealed))
|
require.Equal(t, true, lk.canLock(storiface.FTNone, storiface.FTUnsealed))
|
||||||
|
|
||||||
require.Equal(t, false, lk.canLock(FTSealed, FTNone))
|
require.Equal(t, false, lk.canLock(storiface.FTSealed, storiface.FTNone))
|
||||||
require.Equal(t, false, lk.canLock(FTNone, FTSealed))
|
require.Equal(t, false, lk.canLock(storiface.FTNone, storiface.FTSealed))
|
||||||
|
|
||||||
require.Equal(t, false, lk.canLock(ftAll, FTNone))
|
require.Equal(t, false, lk.canLock(ftAll, storiface.FTNone))
|
||||||
require.Equal(t, false, lk.canLock(FTNone, ftAll))
|
require.Equal(t, false, lk.canLock(storiface.FTNone, ftAll))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIndexLocksSeq(t *testing.T) {
|
func TestIndexLocksSeq(t *testing.T) {
|
||||||
@ -61,32 +63,32 @@ func TestIndexLocksSeq(t *testing.T) {
|
|||||||
locks: map[abi.SectorID]*sectorLock{},
|
locks: map[abi.SectorID]*sectorLock{},
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
|
require.NoError(t, ilk.StorageLock(ctx, aSector, storiface.FTNone, storiface.FTUnsealed))
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||||
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
|
require.NoError(t, ilk.StorageLock(ctx, aSector, storiface.FTNone, storiface.FTUnsealed))
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||||
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
|
require.NoError(t, ilk.StorageLock(ctx, aSector, storiface.FTNone, storiface.FTUnsealed))
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||||
require.NoError(t, ilk.StorageLock(ctx, aSector, FTUnsealed, FTNone))
|
require.NoError(t, ilk.StorageLock(ctx, aSector, storiface.FTUnsealed, storiface.FTNone))
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||||
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
|
require.NoError(t, ilk.StorageLock(ctx, aSector, storiface.FTNone, storiface.FTUnsealed))
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||||
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
|
require.NoError(t, ilk.StorageLock(ctx, aSector, storiface.FTNone, storiface.FTUnsealed))
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIndexLocksBlockOn(t *testing.T) {
|
func TestIndexLocksBlockOn(t *testing.T) {
|
||||||
test := func(r1 SectorFileType, w1 SectorFileType, r2 SectorFileType, w2 SectorFileType) func(t *testing.T) {
|
test := func(r1 storiface.SectorFileType, w1 storiface.SectorFileType, r2 storiface.SectorFileType, w2 storiface.SectorFileType) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
@ -126,9 +128,9 @@ func TestIndexLocksBlockOn(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("readBlocksWrite", test(FTUnsealed, FTNone, FTNone, FTUnsealed))
|
t.Run("readBlocksWrite", test(storiface.FTUnsealed, storiface.FTNone, storiface.FTNone, storiface.FTUnsealed))
|
||||||
t.Run("writeBlocksRead", test(FTNone, FTUnsealed, FTUnsealed, FTNone))
|
t.Run("writeBlocksRead", test(storiface.FTNone, storiface.FTUnsealed, storiface.FTUnsealed, storiface.FTNone))
|
||||||
t.Run("writeBlocksWrite", test(FTNone, FTUnsealed, FTNone, FTUnsealed))
|
t.Run("writeBlocksWrite", test(storiface.FTNone, storiface.FTUnsealed, storiface.FTNone, storiface.FTUnsealed))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIndexLocksBlockWonR(t *testing.T) {
|
func TestIndexLocksBlockWonR(t *testing.T) {
|
||||||
@ -138,7 +140,7 @@ func TestIndexLocksBlockWonR(t *testing.T) {
|
|||||||
locks: map[abi.SectorID]*sectorLock{},
|
locks: map[abi.SectorID]*sectorLock{},
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, ilk.StorageLock(ctx, aSector, FTUnsealed, FTNone))
|
require.NoError(t, ilk.StorageLock(ctx, aSector, storiface.FTUnsealed, storiface.FTNone))
|
||||||
|
|
||||||
sch := make(chan struct{})
|
sch := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
@ -146,7 +148,7 @@ func TestIndexLocksBlockWonR(t *testing.T) {
|
|||||||
|
|
||||||
sch <- struct{}{}
|
sch <- struct{}{}
|
||||||
|
|
||||||
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
|
require.NoError(t, ilk.StorageLock(ctx, aSector, storiface.FTNone, storiface.FTUnsealed))
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
sch <- struct{}{}
|
sch <- struct{}{}
|
||||||
|
24
extern/sector-storage/stores/interface.go
vendored
24
extern/sector-storage/stores/interface.go
vendored
@ -4,33 +4,21 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
)
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
|
||||||
type PathType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
PathStorage PathType = "storage"
|
|
||||||
PathSealing PathType = "sealing"
|
|
||||||
)
|
|
||||||
|
|
||||||
type AcquireMode string
|
|
||||||
|
|
||||||
const (
|
|
||||||
AcquireMove AcquireMode = "move"
|
|
||||||
AcquireCopy AcquireMode = "copy"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Store interface {
|
type Store interface {
|
||||||
AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing SectorFileType, allocate SectorFileType, sealing PathType, op AcquireMode) (paths SectorPaths, stores SectorPaths, err error)
|
AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error)
|
||||||
Remove(ctx context.Context, s abi.SectorID, types SectorFileType, force bool) error
|
Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error
|
||||||
|
|
||||||
// like remove, but doesn't remove the primary sector copy, nor the last
|
// like remove, but doesn't remove the primary sector copy, nor the last
|
||||||
// non-primary copy if there no primary copies
|
// non-primary copy if there no primary copies
|
||||||
RemoveCopies(ctx context.Context, s abi.SectorID, types SectorFileType) error
|
RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error
|
||||||
|
|
||||||
// move sectors into storage
|
// move sectors into storage
|
||||||
MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types SectorFileType) error
|
MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error
|
||||||
|
|
||||||
FsStat(ctx context.Context, id ID) (fsutil.FsStat, error)
|
FsStat(ctx context.Context, id ID) (fsutil.FsStat, error)
|
||||||
}
|
}
|
||||||
|
183
extern/sector-storage/stores/local.go
vendored
183
extern/sector-storage/stores/local.go
vendored
@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StoragePath struct {
|
type StoragePath struct {
|
||||||
@ -64,8 +65,6 @@ type LocalStorage interface {
|
|||||||
|
|
||||||
const MetaFile = "sectorstore.json"
|
const MetaFile = "sectorstore.json"
|
||||||
|
|
||||||
var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache}
|
|
||||||
|
|
||||||
type Local struct {
|
type Local struct {
|
||||||
localStorage LocalStorage
|
localStorage LocalStorage
|
||||||
index SectorIndex
|
index SectorIndex
|
||||||
@ -80,7 +79,7 @@ type path struct {
|
|||||||
local string // absolute local path
|
local string // absolute local path
|
||||||
|
|
||||||
reserved int64
|
reserved int64
|
||||||
reservations map[abi.SectorID]SectorFileType
|
reservations map[abi.SectorID]storiface.SectorFileType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) {
|
func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) {
|
||||||
@ -92,7 +91,7 @@ func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) {
|
|||||||
stat.Reserved = p.reserved
|
stat.Reserved = p.reserved
|
||||||
|
|
||||||
for id, ft := range p.reservations {
|
for id, ft := range p.reservations {
|
||||||
for _, fileType := range PathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if fileType&ft == 0 {
|
if fileType&ft == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -130,8 +129,8 @@ func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) {
|
|||||||
return stat, err
|
return stat, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *path) sectorPath(sid abi.SectorID, fileType SectorFileType) string {
|
func (p *path) sectorPath(sid abi.SectorID, fileType storiface.SectorFileType) string {
|
||||||
return filepath.Join(p.local, fileType.String(), SectorName(sid))
|
return filepath.Join(p.local, fileType.String(), storiface.SectorName(sid))
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) {
|
func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) {
|
||||||
@ -165,7 +164,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error {
|
|||||||
local: p,
|
local: p,
|
||||||
|
|
||||||
reserved: 0,
|
reserved: 0,
|
||||||
reservations: map[abi.SectorID]SectorFileType{},
|
reservations: map[abi.SectorID]storiface.SectorFileType{},
|
||||||
}
|
}
|
||||||
|
|
||||||
fst, err := out.stat(st.localStorage)
|
fst, err := out.stat(st.localStorage)
|
||||||
@ -184,33 +183,8 @@ func (st *Local) OpenPath(ctx context.Context, p string) error {
|
|||||||
return xerrors.Errorf("declaring storage in index: %w", err)
|
return xerrors.Errorf("declaring storage in index: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, t := range PathTypes {
|
if err := st.declareSectors(ctx, p, meta.ID, meta.CanStore); err != nil {
|
||||||
ents, err := ioutil.ReadDir(filepath.Join(p, t.String()))
|
return err
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
if err := os.MkdirAll(filepath.Join(p, t.String()), 0755); err != nil { // nolint
|
|
||||||
return xerrors.Errorf("openPath mkdir '%s': %w", filepath.Join(p, t.String()), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return xerrors.Errorf("listing %s: %w", filepath.Join(p, t.String()), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ent := range ents {
|
|
||||||
if ent.Name() == FetchTempSubdir {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
sid, err := ParseSectorID(ent.Name())
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := st.index.StorageDeclareSector(ctx, meta.ID, sid, t, meta.CanStore); err != nil {
|
|
||||||
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", sid, t, meta.ID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
st.paths[meta.ID] = out
|
st.paths[meta.ID] = out
|
||||||
@ -236,6 +210,83 @@ func (st *Local) open(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (st *Local) Redeclare(ctx context.Context) error {
|
||||||
|
st.localLk.Lock()
|
||||||
|
defer st.localLk.Unlock()
|
||||||
|
|
||||||
|
for id, p := range st.paths {
|
||||||
|
mb, err := ioutil.ReadFile(filepath.Join(p.local, MetaFile))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("reading storage metadata for %s: %w", p.local, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var meta LocalStorageMeta
|
||||||
|
if err := json.Unmarshal(mb, &meta); err != nil {
|
||||||
|
return xerrors.Errorf("unmarshalling storage metadata for %s: %w", p.local, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fst, err := p.stat(st.localStorage)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if id != meta.ID {
|
||||||
|
log.Errorf("storage path ID changed: %s; %s -> %s", p.local, id, meta.ID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = st.index.StorageAttach(ctx, StorageInfo{
|
||||||
|
ID: id,
|
||||||
|
URLs: st.urls,
|
||||||
|
Weight: meta.Weight,
|
||||||
|
CanSeal: meta.CanSeal,
|
||||||
|
CanStore: meta.CanStore,
|
||||||
|
}, fst)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("redeclaring storage in index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := st.declareSectors(ctx, p.local, meta.ID, meta.CanStore); err != nil {
|
||||||
|
return xerrors.Errorf("redeclaring sectors: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *Local) declareSectors(ctx context.Context, p string, id ID, primary bool) error {
|
||||||
|
for _, t := range storiface.PathTypes {
|
||||||
|
ents, err := ioutil.ReadDir(filepath.Join(p, t.String()))
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
if err := os.MkdirAll(filepath.Join(p, t.String()), 0755); err != nil { // nolint
|
||||||
|
return xerrors.Errorf("openPath mkdir '%s': %w", filepath.Join(p, t.String()), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return xerrors.Errorf("listing %s: %w", filepath.Join(p, t.String()), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ent := range ents {
|
||||||
|
if ent.Name() == FetchTempSubdir {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sid, err := storiface.ParseSectorID(ent.Name())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := st.index.StorageDeclareSector(ctx, id, sid, t, primary); err != nil {
|
||||||
|
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", sid, t, id, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (st *Local) reportHealth(ctx context.Context) {
|
func (st *Local) reportHealth(ctx context.Context) {
|
||||||
// randomize interval by ~10%
|
// randomize interval by ~10%
|
||||||
interval := (HeartbeatInterval*100_000 + time.Duration(rand.Int63n(10_000))) / 100_000
|
interval := (HeartbeatInterval*100_000 + time.Duration(rand.Int63n(10_000))) / 100_000
|
||||||
@ -263,13 +314,13 @@ func (st *Local) reportHealth(ctx context.Context) {
|
|||||||
|
|
||||||
for id, report := range toReport {
|
for id, report := range toReport {
|
||||||
if err := st.index.StorageReportHealth(ctx, id, report); err != nil {
|
if err := st.index.StorageReportHealth(ctx, id, report); err != nil {
|
||||||
log.Warnf("error reporting storage health for %s: %+v", id, report)
|
log.Warnf("error reporting storage health for %s (%+v): %+v", id, report, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, ft SectorFileType, storageIDs SectorPaths, overheadTab map[SectorFileType]int) (func(), error) {
|
func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) {
|
||||||
st.localLk.Lock()
|
st.localLk.Lock()
|
||||||
|
|
||||||
done := func() {}
|
done := func() {}
|
||||||
@ -279,12 +330,12 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.Sector
|
|||||||
deferredDone()
|
deferredDone()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for _, fileType := range PathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if fileType&ft == 0 {
|
if fileType&ft == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
id := ID(PathByType(storageIDs, fileType))
|
id := ID(storiface.PathByType(storageIDs, fileType))
|
||||||
|
|
||||||
p, ok := st.paths[id]
|
p, ok := st.paths[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -296,7 +347,7 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.Sector
|
|||||||
return nil, xerrors.Errorf("getting local storage stat: %w", err)
|
return nil, xerrors.Errorf("getting local storage stat: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
overhead := int64(overheadTab[fileType]) * int64(ssize) / FSOverheadDen
|
overhead := int64(overheadTab[fileType]) * int64(ssize) / storiface.FSOverheadDen
|
||||||
|
|
||||||
if stat.Available < overhead {
|
if stat.Available < overhead {
|
||||||
return nil, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available)
|
return nil, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available)
|
||||||
@ -319,18 +370,18 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.Sector
|
|||||||
return done, nil
|
return done, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) {
|
func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) {
|
||||||
if existing|allocate != existing^allocate {
|
if existing|allocate != existing^allocate {
|
||||||
return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector")
|
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector")
|
||||||
}
|
}
|
||||||
|
|
||||||
st.localLk.RLock()
|
st.localLk.RLock()
|
||||||
defer st.localLk.RUnlock()
|
defer st.localLk.RUnlock()
|
||||||
|
|
||||||
var out SectorPaths
|
var out storiface.SectorPaths
|
||||||
var storageIDs SectorPaths
|
var storageIDs storiface.SectorPaths
|
||||||
|
|
||||||
for _, fileType := range PathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if fileType&existing == 0 {
|
if fileType&existing == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -352,22 +403,22 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.
|
|||||||
}
|
}
|
||||||
|
|
||||||
spath := p.sectorPath(sid, fileType)
|
spath := p.sectorPath(sid, fileType)
|
||||||
SetPathByType(&out, fileType, spath)
|
storiface.SetPathByType(&out, fileType, spath)
|
||||||
SetPathByType(&storageIDs, fileType, string(info.ID))
|
storiface.SetPathByType(&storageIDs, fileType, string(info.ID))
|
||||||
|
|
||||||
existing ^= fileType
|
existing ^= fileType
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, fileType := range PathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if fileType&allocate == 0 {
|
if fileType&allocate == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
sis, err := st.index.StorageBestAlloc(ctx, fileType, ssize, pathType)
|
sis, err := st.index.StorageBestAlloc(ctx, fileType, ssize, pathType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("finding best storage for allocating : %w", err)
|
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("finding best storage for allocating : %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var best string
|
var best string
|
||||||
@ -383,11 +434,11 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pathType == PathSealing) && !si.CanSeal {
|
if (pathType == storiface.PathSealing) && !si.CanSeal {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pathType == PathStorage) && !si.CanStore {
|
if (pathType == storiface.PathStorage) && !si.CanStore {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -399,11 +450,11 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.
|
|||||||
}
|
}
|
||||||
|
|
||||||
if best == "" {
|
if best == "" {
|
||||||
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("couldn't find a suitable path for a sector")
|
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("couldn't find a suitable path for a sector")
|
||||||
}
|
}
|
||||||
|
|
||||||
SetPathByType(&out, fileType, best)
|
storiface.SetPathByType(&out, fileType, best)
|
||||||
SetPathByType(&storageIDs, fileType, string(bestID))
|
storiface.SetPathByType(&storageIDs, fileType, string(bestID))
|
||||||
allocate ^= fileType
|
allocate ^= fileType
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -437,7 +488,7 @@ func (st *Local) Local(ctx context.Context) ([]StoragePath, error) {
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType, force bool) error {
|
func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool) error {
|
||||||
if bits.OnesCount(uint(typ)) != 1 {
|
if bits.OnesCount(uint(typ)) != 1 {
|
||||||
return xerrors.New("delete expects one file type")
|
return xerrors.New("delete expects one file type")
|
||||||
}
|
}
|
||||||
@ -460,7 +511,7 @@ func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileTyp
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *Local) RemoveCopies(ctx context.Context, sid abi.SectorID, typ SectorFileType) error {
|
func (st *Local) RemoveCopies(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType) error {
|
||||||
if bits.OnesCount(uint(typ)) != 1 {
|
if bits.OnesCount(uint(typ)) != 1 {
|
||||||
return xerrors.New("delete expects one file type")
|
return xerrors.New("delete expects one file type")
|
||||||
}
|
}
|
||||||
@ -496,7 +547,7 @@ func (st *Local) RemoveCopies(ctx context.Context, sid abi.SectorID, typ SectorF
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorFileType, storage ID) error {
|
func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, storage ID) error {
|
||||||
p, ok := st.paths[storage]
|
p, ok := st.paths[storage]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
@ -520,28 +571,28 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorF
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types SectorFileType) error {
|
func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error {
|
||||||
dest, destIds, err := st.AcquireSector(ctx, s, ssize, FTNone, types, PathStorage, AcquireMove)
|
dest, destIds, err := st.AcquireSector(ctx, s, ssize, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire dest storage: %w", err)
|
return xerrors.Errorf("acquire dest storage: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
src, srcIds, err := st.AcquireSector(ctx, s, ssize, types, FTNone, PathStorage, AcquireMove)
|
src, srcIds, err := st.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire src storage: %w", err)
|
return xerrors.Errorf("acquire src storage: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, fileType := range PathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if fileType&types == 0 {
|
if fileType&types == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
sst, err := st.index.StorageInfo(ctx, ID(PathByType(srcIds, fileType)))
|
sst, err := st.index.StorageInfo(ctx, ID(storiface.PathByType(srcIds, fileType)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to get source storage info: %w", err)
|
return xerrors.Errorf("failed to get source storage info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dst, err := st.index.StorageInfo(ctx, ID(PathByType(destIds, fileType)))
|
dst, err := st.index.StorageInfo(ctx, ID(storiface.PathByType(destIds, fileType)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to get source storage info: %w", err)
|
return xerrors.Errorf("failed to get source storage info: %w", err)
|
||||||
}
|
}
|
||||||
@ -558,17 +609,17 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.Sect
|
|||||||
|
|
||||||
log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore)
|
log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore)
|
||||||
|
|
||||||
if err := st.index.StorageDropSector(ctx, ID(PathByType(srcIds, fileType)), s, fileType); err != nil {
|
if err := st.index.StorageDropSector(ctx, ID(storiface.PathByType(srcIds, fileType)), s, fileType); err != nil {
|
||||||
return xerrors.Errorf("dropping source sector from index: %w", err)
|
return xerrors.Errorf("dropping source sector from index: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := move(PathByType(src, fileType), PathByType(dest, fileType)); err != nil {
|
if err := move(storiface.PathByType(src, fileType), storiface.PathByType(dest, fileType)); err != nil {
|
||||||
// TODO: attempt some recovery (check if src is still there, re-declare)
|
// TODO: attempt some recovery (check if src is still there, re-declare)
|
||||||
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
|
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := st.index.StorageDeclareSector(ctx, ID(PathByType(destIds, fileType)), s, fileType, true); err != nil {
|
if err := st.index.StorageDeclareSector(ctx, ID(storiface.PathByType(destIds, fileType)), s, fileType, true); err != nil {
|
||||||
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(PathByType(destIds, fileType)), err)
|
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(storiface.PathByType(destIds, fileType)), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
54
extern/sector-storage/stores/remote.go
vendored
54
extern/sector-storage/stores/remote.go
vendored
@ -38,7 +38,7 @@ type Remote struct {
|
|||||||
fetching map[abi.SectorID]chan struct{}
|
fetching map[abi.SectorID]chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types SectorFileType) error {
|
func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error {
|
||||||
// TODO: do this on remotes too
|
// TODO: do this on remotes too
|
||||||
// (not that we really need to do that since it's always called by the
|
// (not that we really need to do that since it's always called by the
|
||||||
// worker which pulled the copy)
|
// worker which pulled the copy)
|
||||||
@ -58,9 +58,9 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) {
|
func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) {
|
||||||
if existing|allocate != existing^allocate {
|
if existing|allocate != existing^allocate {
|
||||||
return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector")
|
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector")
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@ -79,7 +79,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
|||||||
case <-c:
|
case <-c:
|
||||||
continue
|
continue
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return SectorPaths{}, SectorPaths{}, ctx.Err()
|
return storiface.SectorPaths{}, storiface.SectorPaths{}, ctx.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,62 +92,62 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se
|
|||||||
|
|
||||||
paths, stores, err := r.local.AcquireSector(ctx, s, ssize, existing, allocate, pathType, op)
|
paths, stores, err := r.local.AcquireSector(ctx, s, ssize, existing, allocate, pathType, op)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("local acquire error: %w", err)
|
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("local acquire error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var toFetch SectorFileType
|
var toFetch storiface.SectorFileType
|
||||||
for _, fileType := range PathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if fileType&existing == 0 {
|
if fileType&existing == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if PathByType(paths, fileType) == "" {
|
if storiface.PathByType(paths, fileType) == "" {
|
||||||
toFetch |= fileType
|
toFetch |= fileType
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
apaths, ids, err := r.local.AcquireSector(ctx, s, ssize, FTNone, toFetch, pathType, op)
|
apaths, ids, err := r.local.AcquireSector(ctx, s, ssize, storiface.FTNone, toFetch, pathType, op)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err)
|
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
odt := FSOverheadSeal
|
odt := storiface.FSOverheadSeal
|
||||||
if pathType == PathStorage {
|
if pathType == storiface.PathStorage {
|
||||||
odt = FsOverheadFinalized
|
odt = storiface.FsOverheadFinalized
|
||||||
}
|
}
|
||||||
|
|
||||||
releaseStorage, err := r.local.Reserve(ctx, s, ssize, toFetch, ids, odt)
|
releaseStorage, err := r.local.Reserve(ctx, s, ssize, toFetch, ids, odt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err)
|
return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err)
|
||||||
}
|
}
|
||||||
defer releaseStorage()
|
defer releaseStorage()
|
||||||
|
|
||||||
for _, fileType := range PathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if fileType&existing == 0 {
|
if fileType&existing == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if PathByType(paths, fileType) != "" {
|
if storiface.PathByType(paths, fileType) != "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
dest := PathByType(apaths, fileType)
|
dest := storiface.PathByType(apaths, fileType)
|
||||||
storageID := PathByType(ids, fileType)
|
storageID := storiface.PathByType(ids, fileType)
|
||||||
|
|
||||||
url, err := r.acquireFromRemote(ctx, s, fileType, dest)
|
url, err := r.acquireFromRemote(ctx, s, fileType, dest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return SectorPaths{}, SectorPaths{}, err
|
return storiface.SectorPaths{}, storiface.SectorPaths{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
SetPathByType(&paths, fileType, dest)
|
storiface.SetPathByType(&paths, fileType, dest)
|
||||||
SetPathByType(&stores, fileType, storageID)
|
storiface.SetPathByType(&stores, fileType, storageID)
|
||||||
|
|
||||||
if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == AcquireMove); err != nil {
|
if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == storiface.AcquireMove); err != nil {
|
||||||
log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err)
|
log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if op == AcquireMove {
|
if op == storiface.AcquireMove {
|
||||||
if err := r.deleteFromRemote(ctx, url); err != nil {
|
if err := r.deleteFromRemote(ctx, url); err != nil {
|
||||||
log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err)
|
log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err)
|
||||||
}
|
}
|
||||||
@ -169,7 +169,7 @@ func tempFetchDest(spath string, create bool) (string, error) {
|
|||||||
return filepath.Join(tempdir, b), nil
|
return filepath.Join(tempdir, b), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType SectorFileType, dest string) (string, error) {
|
func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType storiface.SectorFileType, dest string) (string, error) {
|
||||||
si, err := r.index.StorageFindSector(ctx, s, fileType, 0, false)
|
si, err := r.index.StorageFindSector(ctx, s, fileType, 0, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@ -281,9 +281,9 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types SectorFileType) error {
|
func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error {
|
||||||
// Make sure we have the data local
|
// Make sure we have the data local
|
||||||
_, _, err := r.AcquireSector(ctx, s, ssize, types, FTNone, PathStorage, AcquireMove)
|
_, _, err := r.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("acquire src storage (remote): %w", err)
|
return xerrors.Errorf("acquire src storage (remote): %w", err)
|
||||||
}
|
}
|
||||||
@ -291,7 +291,7 @@ func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.Sect
|
|||||||
return r.local.MoveStorage(ctx, s, ssize, types)
|
return r.local.MoveStorage(ctx, s, ssize, types)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType, force bool) error {
|
func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool) error {
|
||||||
if bits.OnesCount(uint(typ)) != 1 {
|
if bits.OnesCount(uint(typ)) != 1 {
|
||||||
return xerrors.New("delete expects one file type")
|
return xerrors.New("delete expects one file type")
|
||||||
}
|
}
|
||||||
|
142
extern/sector-storage/storiface/cbor_gen.go
vendored
Normal file
142
extern/sector-storage/storiface/cbor_gen.go
vendored
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package storiface
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
xerrors "golang.org/x/xerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = xerrors.Errorf
|
||||||
|
|
||||||
|
func (t *CallID) MarshalCBOR(w io.Writer) error {
|
||||||
|
if t == nil {
|
||||||
|
_, err := w.Write(cbg.CborNull)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.Write([]byte{162}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scratch := make([]byte, 9)
|
||||||
|
|
||||||
|
// t.Sector (abi.SectorID) (struct)
|
||||||
|
if len("Sector") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"Sector\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sector"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("Sector")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.Sector.MarshalCBOR(w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.ID (uuid.UUID) (array)
|
||||||
|
if len("ID") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"ID\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("ID")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(t.ID) > cbg.ByteArrayMaxLen {
|
||||||
|
return xerrors.Errorf("Byte array in field t.ID was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ID))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := w.Write(t.ID[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *CallID) UnmarshalCBOR(r io.Reader) error {
|
||||||
|
*t = CallID{}
|
||||||
|
|
||||||
|
br := cbg.GetPeeker(r)
|
||||||
|
scratch := make([]byte, 8)
|
||||||
|
|
||||||
|
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajMap {
|
||||||
|
return fmt.Errorf("cbor input should be of type map")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > cbg.MaxLength {
|
||||||
|
return fmt.Errorf("CallID: map struct too large (%d)", extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
var name string
|
||||||
|
n := extra
|
||||||
|
|
||||||
|
for i := uint64(0); i < n; i++ {
|
||||||
|
|
||||||
|
{
|
||||||
|
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
name = string(sval)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch name {
|
||||||
|
// t.Sector (abi.SectorID) (struct)
|
||||||
|
case "Sector":
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
if err := t.Sector.UnmarshalCBOR(br); err != nil {
|
||||||
|
return xerrors.Errorf("unmarshaling t.Sector: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.ID (uuid.UUID) (array)
|
||||||
|
case "ID":
|
||||||
|
|
||||||
|
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > cbg.ByteArrayMaxLen {
|
||||||
|
return fmt.Errorf("t.ID: byte array too large (%d)", extra)
|
||||||
|
}
|
||||||
|
if maj != cbg.MajByteString {
|
||||||
|
return fmt.Errorf("expected byte array")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra != 16 {
|
||||||
|
return fmt.Errorf("expected array to have 16 elements")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.ID = [16]uint8{}
|
||||||
|
|
||||||
|
if _, err := io.ReadFull(br, t.ID[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
package stores
|
package storiface
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -16,6 +16,8 @@ const (
|
|||||||
FileTypes = iota
|
FileTypes = iota
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
FTNone SectorFileType = 0
|
FTNone SectorFileType = 0
|
||||||
)
|
)
|
15
extern/sector-storage/storiface/storage.go
vendored
Normal file
15
extern/sector-storage/storiface/storage.go
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
package storiface
|
||||||
|
|
||||||
|
type PathType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
PathStorage PathType = "storage"
|
||||||
|
PathSealing PathType = "sealing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AcquireMode string
|
||||||
|
|
||||||
|
const (
|
||||||
|
AcquireMove AcquireMode = "move"
|
||||||
|
AcquireCopy AcquireMode = "copy"
|
||||||
|
)
|
56
extern/sector-storage/storiface/worker.go
vendored
56
extern/sector-storage/storiface/worker.go
vendored
@ -1,9 +1,17 @@
|
|||||||
package storiface
|
package storiface
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -24,7 +32,8 @@ type WorkerResources struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type WorkerStats struct {
|
type WorkerStats struct {
|
||||||
Info WorkerInfo
|
Info WorkerInfo
|
||||||
|
Enabled bool
|
||||||
|
|
||||||
MemUsedMin uint64
|
MemUsedMin uint64
|
||||||
MemUsedMax uint64
|
MemUsedMax uint64
|
||||||
@ -33,10 +42,51 @@ type WorkerStats struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type WorkerJob struct {
|
type WorkerJob struct {
|
||||||
ID uint64
|
ID CallID
|
||||||
Sector abi.SectorID
|
Sector abi.SectorID
|
||||||
Task sealtasks.TaskType
|
Task sealtasks.TaskType
|
||||||
|
|
||||||
RunWait int // 0 - running, 1+ - assigned
|
RunWait int // -1 - ret-wait, 0 - running, 1+ - assigned
|
||||||
Start time.Time
|
Start time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CallID struct {
|
||||||
|
Sector abi.SectorID
|
||||||
|
ID uuid.UUID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CallID) String() string {
|
||||||
|
return fmt.Sprintf("%d-%d-%s", c.Sector.Miner, c.Sector.Number, c.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ fmt.Stringer = &CallID{}
|
||||||
|
|
||||||
|
var UndefCall CallID
|
||||||
|
|
||||||
|
type WorkerCalls interface {
|
||||||
|
AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error)
|
||||||
|
SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error)
|
||||||
|
SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (CallID, error)
|
||||||
|
SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error)
|
||||||
|
SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (CallID, error)
|
||||||
|
FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (CallID, error)
|
||||||
|
ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (CallID, error)
|
||||||
|
MoveStorage(ctx context.Context, sector abi.SectorID, types SectorFileType) (CallID, error)
|
||||||
|
UnsealPiece(context.Context, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error)
|
||||||
|
ReadPiece(context.Context, io.Writer, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize) (CallID, error)
|
||||||
|
Fetch(context.Context, abi.SectorID, SectorFileType, PathType, AcquireMode) (CallID, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type WorkerReturn interface {
|
||||||
|
ReturnAddPiece(ctx context.Context, callID CallID, pi abi.PieceInfo, err string) error
|
||||||
|
ReturnSealPreCommit1(ctx context.Context, callID CallID, p1o storage.PreCommit1Out, err string) error
|
||||||
|
ReturnSealPreCommit2(ctx context.Context, callID CallID, sealed storage.SectorCids, err string) error
|
||||||
|
ReturnSealCommit1(ctx context.Context, callID CallID, out storage.Commit1Out, err string) error
|
||||||
|
ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err string) error
|
||||||
|
ReturnFinalizeSector(ctx context.Context, callID CallID, err string) error
|
||||||
|
ReturnReleaseUnsealed(ctx context.Context, callID CallID, err string) error
|
||||||
|
ReturnMoveStorage(ctx context.Context, callID CallID, err string) error
|
||||||
|
ReturnUnsealPiece(ctx context.Context, callID CallID, err string) error
|
||||||
|
ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err string) error
|
||||||
|
ReturnFetch(ctx context.Context, callID CallID, err string) error
|
||||||
|
}
|
||||||
|
81
extern/sector-storage/teststorage_test.go
vendored
Normal file
81
extern/sector-storage/teststorage_test.go
vendored
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||||
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type apres struct {
|
||||||
|
pi abi.PieceInfo
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type testExec struct {
|
||||||
|
apch chan chan apres
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
||||||
|
resp := make(chan apres)
|
||||||
|
t.apch <- resp
|
||||||
|
ar := <-resp
|
||||||
|
return ar.pi, ar.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testExec) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ffiwrapper.Storage = &testExec{}
|
82
extern/sector-storage/testworker_test.go
vendored
82
extern/sector-storage/testworker_test.go
vendored
@ -3,7 +3,9 @@ package sectorstorage
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
@ -18,11 +20,18 @@ import (
|
|||||||
type testWorker struct {
|
type testWorker struct {
|
||||||
acceptTasks map[sealtasks.TaskType]struct{}
|
acceptTasks map[sealtasks.TaskType]struct{}
|
||||||
lstor *stores.Local
|
lstor *stores.Local
|
||||||
|
ret storiface.WorkerReturn
|
||||||
|
|
||||||
mockSeal *mock.SectorMgr
|
mockSeal *mock.SectorMgr
|
||||||
|
|
||||||
|
pc1s int
|
||||||
|
pc1lk sync.Mutex
|
||||||
|
pc1wait *sync.WaitGroup
|
||||||
|
|
||||||
|
session uuid.UUID
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestWorker(wcfg WorkerConfig, lstor *stores.Local) *testWorker {
|
func newTestWorker(wcfg WorkerConfig, lstor *stores.Local, ret storiface.WorkerReturn) *testWorker {
|
||||||
ssize, err := wcfg.SealProof.SectorSize()
|
ssize, err := wcfg.SealProof.SectorSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -36,61 +45,98 @@ func newTestWorker(wcfg WorkerConfig, lstor *stores.Local) *testWorker {
|
|||||||
return &testWorker{
|
return &testWorker{
|
||||||
acceptTasks: acceptTasks,
|
acceptTasks: acceptTasks,
|
||||||
lstor: lstor,
|
lstor: lstor,
|
||||||
|
ret: ret,
|
||||||
|
|
||||||
mockSeal: mock.NewMockSectorMgr(ssize, nil),
|
mockSeal: mock.NewMockSectorMgr(ssize, nil),
|
||||||
|
|
||||||
|
session: uuid.New(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
func (t *testWorker) asyncCall(sector abi.SectorID, work func(ci storiface.CallID)) (storiface.CallID, error) {
|
||||||
return t.mockSeal.SealPreCommit1(ctx, sector, ticket, pieces)
|
ci := storiface.CallID{
|
||||||
|
Sector: sector,
|
||||||
|
ID: uuid.New(),
|
||||||
|
}
|
||||||
|
|
||||||
|
go work(ci)
|
||||||
|
|
||||||
|
return ci, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
|
func (t *testWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error {
|
func (t *testWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||||
return t.mockSeal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
|
return t.asyncCall(sector, func(ci storiface.CallID) {
|
||||||
|
p, err := t.mockSeal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
|
||||||
|
if err := t.ret.ReturnAddPiece(ctx, ci, p, errstr(err)); err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||||
|
return t.asyncCall(sector, func(ci storiface.CallID) {
|
||||||
|
t.pc1s++
|
||||||
|
|
||||||
|
if t.pc1wait != nil {
|
||||||
|
t.pc1wait.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
t.pc1lk.Lock()
|
||||||
|
defer t.pc1lk.Unlock()
|
||||||
|
|
||||||
|
p1o, err := t.mockSeal.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||||
|
if err := t.ret.ReturnSealPreCommit1(ctx, ci, p1o, errstr(err)); err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
func (t *testWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
|
func (t *testWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
func (t *testWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
|
func (t *testWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) Remove(ctx context.Context, sector abi.SectorID) error {
|
func (t *testWorker) Remove(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error {
|
func (t *testWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
|
func (t *testWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||||
return nil
|
return t.asyncCall(sector, func(ci storiface.CallID) {
|
||||||
|
if err := t.ret.ReturnFetch(ctx, ci, ""); err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
func (t *testWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
||||||
@ -116,8 +162,8 @@ func (t *testWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) Closing(ctx context.Context) (<-chan struct{}, error) {
|
func (t *testWorker) Session(context.Context) (uuid.UUID, error) {
|
||||||
return ctx.Done(), nil
|
return t.session, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *testWorker) Close() error {
|
func (t *testWorker) Close() error {
|
||||||
|
129
extern/sector-storage/work_tracker.go
vendored
129
extern/sector-storage/work_tracker.go
vendored
@ -1,129 +0,0 @@
|
|||||||
package sectorstorage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
|
||||||
)
|
|
||||||
|
|
||||||
type workTracker struct {
|
|
||||||
lk sync.Mutex
|
|
||||||
|
|
||||||
ctr uint64
|
|
||||||
running map[uint64]storiface.WorkerJob
|
|
||||||
|
|
||||||
// TODO: done, aggregate stats, queue stats, scheduler feedback
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wt *workTracker) track(sid abi.SectorID, task sealtasks.TaskType) func() {
|
|
||||||
wt.lk.Lock()
|
|
||||||
defer wt.lk.Unlock()
|
|
||||||
|
|
||||||
id := wt.ctr
|
|
||||||
wt.ctr++
|
|
||||||
|
|
||||||
wt.running[id] = storiface.WorkerJob{
|
|
||||||
ID: id,
|
|
||||||
Sector: sid,
|
|
||||||
Task: task,
|
|
||||||
Start: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
return func() {
|
|
||||||
wt.lk.Lock()
|
|
||||||
defer wt.lk.Unlock()
|
|
||||||
|
|
||||||
delete(wt.running, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wt *workTracker) worker(w Worker) Worker {
|
|
||||||
return &trackedWorker{
|
|
||||||
Worker: w,
|
|
||||||
tracker: wt,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wt *workTracker) Running() []storiface.WorkerJob {
|
|
||||||
wt.lk.Lock()
|
|
||||||
defer wt.lk.Unlock()
|
|
||||||
|
|
||||||
out := make([]storiface.WorkerJob, 0, len(wt.running))
|
|
||||||
for _, job := range wt.running {
|
|
||||||
out = append(out, job)
|
|
||||||
}
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
type trackedWorker struct {
|
|
||||||
Worker
|
|
||||||
|
|
||||||
tracker *workTracker
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
|
|
||||||
defer t.tracker.track(sector, sealtasks.TTPreCommit1)()
|
|
||||||
|
|
||||||
return t.Worker.SealPreCommit1(ctx, sector, ticket, pieces)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
|
|
||||||
defer t.tracker.track(sector, sealtasks.TTPreCommit2)()
|
|
||||||
|
|
||||||
return t.Worker.SealPreCommit2(ctx, sector, pc1o)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
|
||||||
defer t.tracker.track(sector, sealtasks.TTCommit1)()
|
|
||||||
|
|
||||||
return t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
|
|
||||||
defer t.tracker.track(sector, sealtasks.TTCommit2)()
|
|
||||||
|
|
||||||
return t.Worker.SealCommit2(ctx, sector, c1o)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
|
|
||||||
defer t.tracker.track(sector, sealtasks.TTFinalize)()
|
|
||||||
|
|
||||||
return t.Worker.FinalizeSector(ctx, sector, keepUnsealed)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
|
|
||||||
defer t.tracker.track(sector, sealtasks.TTAddPiece)()
|
|
||||||
|
|
||||||
return t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
|
|
||||||
defer t.tracker.track(s, sealtasks.TTFetch)()
|
|
||||||
|
|
||||||
return t.Worker.Fetch(ctx, s, ft, ptype, am)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error {
|
|
||||||
defer t.tracker.track(id, sealtasks.TTUnseal)()
|
|
||||||
|
|
||||||
return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
|
|
||||||
defer t.tracker.track(id, sealtasks.TTReadUnsealed)()
|
|
||||||
|
|
||||||
return t.Worker.ReadPiece(ctx, writer, id, index, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ Worker = &trackedWorker{}
|
|
117
extern/sector-storage/worker_calltracker.go
vendored
Normal file
117
extern/sector-storage/worker_calltracker.go
vendored
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-statestore"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type workerCallTracker struct {
|
||||||
|
st *statestore.StateStore // by CallID
|
||||||
|
}
|
||||||
|
|
||||||
|
type CallState uint64
|
||||||
|
|
||||||
|
const (
|
||||||
|
CallStarted CallState = iota
|
||||||
|
CallDone
|
||||||
|
// returned -> remove
|
||||||
|
)
|
||||||
|
|
||||||
|
type Call struct {
|
||||||
|
ID storiface.CallID
|
||||||
|
RetType ReturnType
|
||||||
|
|
||||||
|
State CallState
|
||||||
|
|
||||||
|
Result *ManyBytes // json bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wt *workerCallTracker) onStart(ci storiface.CallID, rt ReturnType) error {
|
||||||
|
return wt.st.Begin(ci, &Call{
|
||||||
|
ID: ci,
|
||||||
|
RetType: rt,
|
||||||
|
State: CallStarted,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wt *workerCallTracker) onDone(ci storiface.CallID, ret []byte) error {
|
||||||
|
st := wt.st.Get(ci)
|
||||||
|
return st.Mutate(func(cs *Call) error {
|
||||||
|
cs.State = CallDone
|
||||||
|
cs.Result = &ManyBytes{ret}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wt *workerCallTracker) onReturned(ci storiface.CallID) error {
|
||||||
|
st := wt.st.Get(ci)
|
||||||
|
return st.End()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wt *workerCallTracker) unfinished() ([]Call, error) {
|
||||||
|
var out []Call
|
||||||
|
return out, wt.st.List(&out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ideally this would be a tag on the struct field telling cbor-gen to enforce higher max-len
|
||||||
|
type ManyBytes struct {
|
||||||
|
b []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
const many = 100 << 20
|
||||||
|
|
||||||
|
func (t *ManyBytes) MarshalCBOR(w io.Writer) error {
|
||||||
|
if t == nil {
|
||||||
|
t = &ManyBytes{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(t.b) > many {
|
||||||
|
return xerrors.Errorf("byte array in field t.Result was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
scratch := make([]byte, 9)
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.b))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := w.Write(t.b[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ManyBytes) UnmarshalCBOR(r io.Reader) error {
|
||||||
|
*t = ManyBytes{}
|
||||||
|
|
||||||
|
br := cbg.GetPeeker(r)
|
||||||
|
scratch := make([]byte, 9)
|
||||||
|
|
||||||
|
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > many {
|
||||||
|
return fmt.Errorf("byte array too large (%d)", extra)
|
||||||
|
}
|
||||||
|
if maj != cbg.MajByteString {
|
||||||
|
return fmt.Errorf("expected byte array")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > 0 {
|
||||||
|
t.b = make([]uint8, extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.ReadFull(br, t.b[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
550
extern/sector-storage/worker_local.go
vendored
Normal file
550
extern/sector-storage/worker_local.go
vendored
Normal file
@ -0,0 +1,550 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/elastic/go-sysinfo"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-statestore"
|
||||||
|
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache}
|
||||||
|
|
||||||
|
type WorkerConfig struct {
|
||||||
|
SealProof abi.RegisteredSealProof
|
||||||
|
TaskTypes []sealtasks.TaskType
|
||||||
|
NoSwap bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// used do provide custom proofs impl (mostly used in testing)
|
||||||
|
type ExecutorFunc func() (ffiwrapper.Storage, error)
|
||||||
|
|
||||||
|
type LocalWorker struct {
|
||||||
|
scfg *ffiwrapper.Config
|
||||||
|
storage stores.Store
|
||||||
|
localStore *stores.Local
|
||||||
|
sindex stores.SectorIndex
|
||||||
|
ret storiface.WorkerReturn
|
||||||
|
executor ExecutorFunc
|
||||||
|
noSwap bool
|
||||||
|
|
||||||
|
ct *workerCallTracker
|
||||||
|
acceptTasks map[sealtasks.TaskType]struct{}
|
||||||
|
running sync.WaitGroup
|
||||||
|
|
||||||
|
session uuid.UUID
|
||||||
|
closing chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker {
|
||||||
|
acceptTasks := map[sealtasks.TaskType]struct{}{}
|
||||||
|
for _, taskType := range wcfg.TaskTypes {
|
||||||
|
acceptTasks[taskType] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &LocalWorker{
|
||||||
|
scfg: &ffiwrapper.Config{
|
||||||
|
SealProofType: wcfg.SealProof,
|
||||||
|
},
|
||||||
|
storage: store,
|
||||||
|
localStore: local,
|
||||||
|
sindex: sindex,
|
||||||
|
ret: ret,
|
||||||
|
|
||||||
|
ct: &workerCallTracker{
|
||||||
|
st: cst,
|
||||||
|
},
|
||||||
|
acceptTasks: acceptTasks,
|
||||||
|
executor: executor,
|
||||||
|
noSwap: wcfg.NoSwap,
|
||||||
|
|
||||||
|
session: uuid.New(),
|
||||||
|
closing: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.executor == nil {
|
||||||
|
w.executor = w.ffiExec
|
||||||
|
}
|
||||||
|
|
||||||
|
unfinished, err := w.ct.unfinished()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("reading unfinished tasks: %+v", err)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for _, call := range unfinished {
|
||||||
|
err := xerrors.Errorf("worker restarted")
|
||||||
|
|
||||||
|
// TODO: Handle restarting PC1 once support is merged
|
||||||
|
|
||||||
|
if doReturn(context.TODO(), call.RetType, call.ID, ret, nil, err) {
|
||||||
|
if err := w.ct.onReturned(call.ID); err != nil {
|
||||||
|
log.Errorf("marking call as returned failed: %s: %+v", call.RetType, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker {
|
||||||
|
return newLocalWorker(nil, wcfg, store, local, sindex, ret, cst)
|
||||||
|
}
|
||||||
|
|
||||||
|
type localWorkerPathProvider struct {
|
||||||
|
w *LocalWorker
|
||||||
|
op storiface.AcquireMode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||||
|
ssize, err := l.w.scfg.SealProofType.SectorSize()
|
||||||
|
if err != nil {
|
||||||
|
return storiface.SectorPaths{}, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, ssize, existing, allocate, sealing, l.op)
|
||||||
|
if err != nil {
|
||||||
|
return storiface.SectorPaths{}, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
releaseStorage, err := l.w.localStore.Reserve(ctx, sector, ssize, allocate, storageIDs, storiface.FSOverheadSeal)
|
||||||
|
if err != nil {
|
||||||
|
return storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths)
|
||||||
|
|
||||||
|
return paths, func() {
|
||||||
|
releaseStorage()
|
||||||
|
|
||||||
|
for _, fileType := range pathTypes {
|
||||||
|
if fileType&allocate == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sid := storiface.PathByType(storageIDs, fileType)
|
||||||
|
|
||||||
|
if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType, l.op == storiface.AcquireMove); err != nil {
|
||||||
|
log.Errorf("declare sector error: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) {
|
||||||
|
return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReturnType string
|
||||||
|
|
||||||
|
// in: func(WorkerReturn, context.Context, CallID, err string)
|
||||||
|
// in: func(WorkerReturn, context.Context, CallID, ret T, err string)
|
||||||
|
func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, error) error {
|
||||||
|
rf := reflect.ValueOf(in)
|
||||||
|
ft := rf.Type()
|
||||||
|
withRet := ft.NumIn() == 5
|
||||||
|
|
||||||
|
return func(ctx context.Context, ci storiface.CallID, wr storiface.WorkerReturn, i interface{}, err error) error {
|
||||||
|
rctx := reflect.ValueOf(ctx)
|
||||||
|
rwr := reflect.ValueOf(wr)
|
||||||
|
rerr := reflect.ValueOf(errstr(err))
|
||||||
|
rci := reflect.ValueOf(ci)
|
||||||
|
|
||||||
|
var ro []reflect.Value
|
||||||
|
|
||||||
|
if withRet {
|
||||||
|
ret := reflect.ValueOf(i)
|
||||||
|
if i == nil {
|
||||||
|
ret = reflect.Zero(rf.Type().In(3))
|
||||||
|
}
|
||||||
|
|
||||||
|
ro = rf.Call([]reflect.Value{rwr, rctx, rci, ret, rerr})
|
||||||
|
} else {
|
||||||
|
ro = rf.Call([]reflect.Value{rwr, rctx, rci, rerr})
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ro[0].IsNil() {
|
||||||
|
return ro[0].Interface().(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, error) error{
|
||||||
|
"AddPiece": rfunc(storiface.WorkerReturn.ReturnAddPiece),
|
||||||
|
"SealPreCommit1": rfunc(storiface.WorkerReturn.ReturnSealPreCommit1),
|
||||||
|
"SealPreCommit2": rfunc(storiface.WorkerReturn.ReturnSealPreCommit2),
|
||||||
|
"SealCommit1": rfunc(storiface.WorkerReturn.ReturnSealCommit1),
|
||||||
|
"SealCommit2": rfunc(storiface.WorkerReturn.ReturnSealCommit2),
|
||||||
|
"FinalizeSector": rfunc(storiface.WorkerReturn.ReturnFinalizeSector),
|
||||||
|
"ReleaseUnsealed": rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed),
|
||||||
|
"MoveStorage": rfunc(storiface.WorkerReturn.ReturnMoveStorage),
|
||||||
|
"UnsealPiece": rfunc(storiface.WorkerReturn.ReturnUnsealPiece),
|
||||||
|
"ReadPiece": rfunc(storiface.WorkerReturn.ReturnReadPiece),
|
||||||
|
"Fetch": rfunc(storiface.WorkerReturn.ReturnFetch),
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) {
|
||||||
|
ci := storiface.CallID{
|
||||||
|
Sector: sector,
|
||||||
|
ID: uuid.New(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := l.ct.onStart(ci, rt); err != nil {
|
||||||
|
log.Errorf("tracking call (start): %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.running.Add(1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer l.running.Done()
|
||||||
|
|
||||||
|
ctx := &wctx{
|
||||||
|
vals: ctx,
|
||||||
|
closing: l.closing,
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := work(ctx, ci)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
rb, err := json.Marshal(res)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("tracking call (marshaling results): %+v", err)
|
||||||
|
} else {
|
||||||
|
if err := l.ct.onDone(ci, rb); err != nil {
|
||||||
|
log.Errorf("tracking call (done): %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if doReturn(ctx, rt, ci, l.ret, res, err) {
|
||||||
|
if err := l.ct.onReturned(ci); err != nil {
|
||||||
|
log.Errorf("tracking call (done): %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ci, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doReturn tries to send the result to manager, returns true if successful
|
||||||
|
func doReturn(ctx context.Context, rt ReturnType, ci storiface.CallID, ret storiface.WorkerReturn, res interface{}, rerr error) bool {
|
||||||
|
for {
|
||||||
|
err := returnFunc[rt](ctx, ci, ret, res, rerr)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Errorf("return error, will retry in 5s: %s: %+v", rt, err)
|
||||||
|
select {
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Errorf("failed to return results: %s", ctx.Err())
|
||||||
|
|
||||||
|
// fine to just return, worker is most likely shutting down, and
|
||||||
|
// we didn't mark the result as returned yet, so we'll try to
|
||||||
|
// re-submit it on restart
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func errstr(err error) string {
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||||
|
sb, err := l.executor()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb.NewSector(ctx, sector)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) {
|
||||||
|
sb, err := l.executor()
|
||||||
|
if err != nil {
|
||||||
|
return storiface.UndefCall, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return l.asyncCall(ctx, sector, "AddPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||||
|
return sb.AddPiece(ctx, sector, epcs, sz, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||||
|
return l.asyncCall(ctx, sector, "Fetch", func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||||
|
_, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, storiface.FTNone, ptype)
|
||||||
|
if err == nil {
|
||||||
|
done()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||||
|
return l.asyncCall(ctx, sector, "SealPreCommit1", func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||||
|
|
||||||
|
{
|
||||||
|
// cleanup previous failed attempts if they exist
|
||||||
|
if err := l.storage.Remove(ctx, sector, storiface.FTSealed, true); err != nil {
|
||||||
|
return nil, xerrors.Errorf("cleaning up sealed data: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := l.storage.Remove(ctx, sector, storiface.FTCache, true); err != nil {
|
||||||
|
return nil, xerrors.Errorf("cleaning up cache data: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sb, err := l.executor()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (storiface.CallID, error) {
|
||||||
|
sb, err := l.executor()
|
||||||
|
if err != nil {
|
||||||
|
return storiface.UndefCall, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return l.asyncCall(ctx, sector, "SealPreCommit2", func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||||
|
return sb.SealPreCommit2(ctx, sector, phase1Out)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (storiface.CallID, error) {
|
||||||
|
sb, err := l.executor()
|
||||||
|
if err != nil {
|
||||||
|
return storiface.UndefCall, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return l.asyncCall(ctx, sector, "SealCommit1", func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||||
|
return sb.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (storiface.CallID, error) {
|
||||||
|
sb, err := l.executor()
|
||||||
|
if err != nil {
|
||||||
|
return storiface.UndefCall, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return l.asyncCall(ctx, sector, "SealCommit2", func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||||
|
return sb.SealCommit2(ctx, sector, phase1Out)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage2.Range) (storiface.CallID, error) {
|
||||||
|
sb, err := l.executor()
|
||||||
|
if err != nil {
|
||||||
|
return storiface.UndefCall, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return l.asyncCall(ctx, sector, "FinalizeSector", func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||||
|
if err := sb.FinalizeSector(ctx, sector, keepUnsealed); err != nil {
|
||||||
|
return nil, xerrors.Errorf("finalizing sector: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(keepUnsealed) == 0 {
|
||||||
|
if err := l.storage.Remove(ctx, sector, storiface.FTUnsealed, true); err != nil {
|
||||||
|
return nil, xerrors.Errorf("removing unsealed data: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage2.Range) (storiface.CallID, error) {
|
||||||
|
return storiface.UndefCall, xerrors.Errorf("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if rerr := l.storage.Remove(ctx, sector, storiface.FTSealed, true); rerr != nil {
|
||||||
|
err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr))
|
||||||
|
}
|
||||||
|
if rerr := l.storage.Remove(ctx, sector, storiface.FTCache, true); rerr != nil {
|
||||||
|
err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr))
|
||||||
|
}
|
||||||
|
if rerr := l.storage.Remove(ctx, sector, storiface.FTUnsealed, true); rerr != nil {
|
||||||
|
err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr))
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
|
||||||
|
return l.asyncCall(ctx, sector, "MoveStorage", func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||||
|
ssize, err := l.scfg.SealProofType.SectorSize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, l.storage.MoveStorage(ctx, sector, ssize, types)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||||
|
sb, err := l.executor()
|
||||||
|
if err != nil {
|
||||||
|
return storiface.UndefCall, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return l.asyncCall(ctx, sector, "UnsealPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||||
|
if err = sb.UnsealPiece(ctx, sector, index, size, randomness, cid); err != nil {
|
||||||
|
return nil, xerrors.Errorf("unsealing sector: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = l.storage.RemoveCopies(ctx, sector, storiface.FTSealed); err != nil {
|
||||||
|
return nil, xerrors.Errorf("removing source data: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = l.storage.RemoveCopies(ctx, sector, storiface.FTCache); err != nil {
|
||||||
|
return nil, xerrors.Errorf("removing source data: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||||
|
sb, err := l.executor()
|
||||||
|
if err != nil {
|
||||||
|
return storiface.UndefCall, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return l.asyncCall(ctx, sector, "ReadPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
|
||||||
|
return sb.ReadPiece(ctx, writer, sector, index, size)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
||||||
|
return l.acceptTasks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
|
||||||
|
return l.localStore.Local(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) {
|
||||||
|
hostname, err := os.Hostname() // TODO: allow overriding from config
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gpus, err := ffi.GetGPUDevices()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("getting gpu devices failed: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
h, err := sysinfo.Host()
|
||||||
|
if err != nil {
|
||||||
|
return storiface.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mem, err := h.Memory()
|
||||||
|
if err != nil {
|
||||||
|
return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
memSwap := mem.VirtualTotal
|
||||||
|
if l.noSwap {
|
||||||
|
memSwap = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return storiface.WorkerInfo{
|
||||||
|
Hostname: hostname,
|
||||||
|
Resources: storiface.WorkerResources{
|
||||||
|
MemPhysical: mem.Total,
|
||||||
|
MemSwap: memSwap,
|
||||||
|
MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process
|
||||||
|
CPUs: uint64(runtime.NumCPU()),
|
||||||
|
GPUs: gpus,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) Session(ctx context.Context) (uuid.UUID, error) {
|
||||||
|
select {
|
||||||
|
case <-l.closing:
|
||||||
|
return ClosedWorkerID, nil
|
||||||
|
default:
|
||||||
|
return l.session, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalWorker) Close() error {
|
||||||
|
close(l.closing)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitQuiet blocks as long as there are tasks running
|
||||||
|
func (l *LocalWorker) WaitQuiet() {
|
||||||
|
l.running.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
type wctx struct {
|
||||||
|
vals context.Context
|
||||||
|
closing chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wctx) Deadline() (time.Time, bool) {
|
||||||
|
return time.Time{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wctx) Done() <-chan struct{} {
|
||||||
|
return w.closing
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wctx) Err() error {
|
||||||
|
select {
|
||||||
|
case <-w.closing:
|
||||||
|
return context.Canceled
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wctx) Value(key interface{}) interface{} {
|
||||||
|
return w.vals.Value(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ context.Context = &wctx{}
|
||||||
|
|
||||||
|
var _ Worker = &LocalWorker{}
|
138
extern/sector-storage/worker_tracked.go
vendored
Normal file
138
extern/sector-storage/worker_tracked.go
vendored
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
package sectorstorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type trackedWork struct {
|
||||||
|
job storiface.WorkerJob
|
||||||
|
worker WorkerID
|
||||||
|
}
|
||||||
|
|
||||||
|
type workTracker struct {
|
||||||
|
lk sync.Mutex
|
||||||
|
|
||||||
|
done map[storiface.CallID]struct{}
|
||||||
|
running map[storiface.CallID]trackedWork
|
||||||
|
|
||||||
|
// TODO: done, aggregate stats, queue stats, scheduler feedback
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wt *workTracker) onDone(callID storiface.CallID) {
|
||||||
|
wt.lk.Lock()
|
||||||
|
defer wt.lk.Unlock()
|
||||||
|
|
||||||
|
_, ok := wt.running[callID]
|
||||||
|
if !ok {
|
||||||
|
wt.done[callID] = struct{}{}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(wt.running, callID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wt *workTracker) track(wid WorkerID, sid abi.SectorID, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) {
|
||||||
|
return func(callID storiface.CallID, err error) (storiface.CallID, error) {
|
||||||
|
if err != nil {
|
||||||
|
return callID, err
|
||||||
|
}
|
||||||
|
|
||||||
|
wt.lk.Lock()
|
||||||
|
defer wt.lk.Unlock()
|
||||||
|
|
||||||
|
_, done := wt.done[callID]
|
||||||
|
if done {
|
||||||
|
delete(wt.done, callID)
|
||||||
|
return callID, err
|
||||||
|
}
|
||||||
|
|
||||||
|
wt.running[callID] = trackedWork{
|
||||||
|
job: storiface.WorkerJob{
|
||||||
|
ID: callID,
|
||||||
|
Sector: sid,
|
||||||
|
Task: task,
|
||||||
|
Start: time.Now(),
|
||||||
|
},
|
||||||
|
worker: wid,
|
||||||
|
}
|
||||||
|
|
||||||
|
return callID, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wt *workTracker) worker(wid WorkerID, w Worker) Worker {
|
||||||
|
return &trackedWorker{
|
||||||
|
Worker: w,
|
||||||
|
wid: wid,
|
||||||
|
|
||||||
|
tracker: wt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wt *workTracker) Running() []trackedWork {
|
||||||
|
wt.lk.Lock()
|
||||||
|
defer wt.lk.Unlock()
|
||||||
|
|
||||||
|
out := make([]trackedWork, 0, len(wt.running))
|
||||||
|
for _, job := range wt.running {
|
||||||
|
out = append(out, job)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
type trackedWorker struct {
|
||||||
|
Worker
|
||||||
|
wid WorkerID
|
||||||
|
|
||||||
|
tracker *workTracker
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
||||||
|
return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit1)(t.Worker.SealPreCommit1(ctx, sector, ticket, pieces))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
|
||||||
|
return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit2)(t.Worker.SealPreCommit2(ctx, sector, pc1o))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
|
||||||
|
return t.tracker.track(t.wid, sector, sealtasks.TTCommit1)(t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
|
||||||
|
return t.tracker.track(t.wid, sector, sealtasks.TTCommit2)(t.Worker.SealCommit2(ctx, sector, c1o))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
|
||||||
|
return t.tracker.track(t.wid, sector, sealtasks.TTFinalize)(t.Worker.FinalizeSector(ctx, sector, keepUnsealed))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
|
||||||
|
return t.tracker.track(t.wid, sector, sealtasks.TTAddPiece)(t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
||||||
|
return t.tracker.track(t.wid, s, sealtasks.TTFetch)(t.Worker.Fetch(ctx, s, ft, ptype, am))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
||||||
|
return t.tracker.track(t.wid, id, sealtasks.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
||||||
|
return t.tracker.track(t.wid, id, sealtasks.TTReadUnsealed)(t.Worker.ReadPiece(ctx, writer, id, index, size))
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Worker = &trackedWorker{}
|
14
extern/storage-sealing/fsm.go
vendored
14
extern/storage-sealing/fsm.go
vendored
@ -45,16 +45,22 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
|||||||
on(SectorAddPiece{}, WaitDeals),
|
on(SectorAddPiece{}, WaitDeals),
|
||||||
on(SectorStartPacking{}, Packing),
|
on(SectorStartPacking{}, Packing),
|
||||||
),
|
),
|
||||||
Packing: planOne(on(SectorPacked{}, PreCommit1)),
|
Packing: planOne(on(SectorPacked{}, GetTicket)),
|
||||||
|
GetTicket: planOne(
|
||||||
|
on(SectorTicket{}, PreCommit1),
|
||||||
|
on(SectorCommitFailed{}, CommitFailed),
|
||||||
|
),
|
||||||
PreCommit1: planOne(
|
PreCommit1: planOne(
|
||||||
on(SectorPreCommit1{}, PreCommit2),
|
on(SectorPreCommit1{}, PreCommit2),
|
||||||
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
||||||
on(SectorDealsExpired{}, DealsExpired),
|
on(SectorDealsExpired{}, DealsExpired),
|
||||||
on(SectorInvalidDealIDs{}, RecoverDealIDs),
|
on(SectorInvalidDealIDs{}, RecoverDealIDs),
|
||||||
|
on(SectorOldTicket{}, GetTicket),
|
||||||
),
|
),
|
||||||
PreCommit2: planOne(
|
PreCommit2: planOne(
|
||||||
on(SectorPreCommit2{}, PreCommitting),
|
on(SectorPreCommit2{}, PreCommitting),
|
||||||
on(SectorSealPreCommit2Failed{}, SealPreCommit2Failed),
|
on(SectorSealPreCommit2Failed{}, SealPreCommit2Failed),
|
||||||
|
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
||||||
),
|
),
|
||||||
PreCommitting: planOne(
|
PreCommitting: planOne(
|
||||||
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
|
||||||
@ -121,6 +127,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
|||||||
on(SectorRetryCommitWait{}, CommitWait),
|
on(SectorRetryCommitWait{}, CommitWait),
|
||||||
on(SectorDealsExpired{}, DealsExpired),
|
on(SectorDealsExpired{}, DealsExpired),
|
||||||
on(SectorInvalidDealIDs{}, RecoverDealIDs),
|
on(SectorInvalidDealIDs{}, RecoverDealIDs),
|
||||||
|
on(SectorTicketExpired{}, Removing),
|
||||||
),
|
),
|
||||||
FinalizeFailed: planOne(
|
FinalizeFailed: planOne(
|
||||||
on(SectorRetryFinalize{}, FinalizeSector),
|
on(SectorRetryFinalize{}, FinalizeSector),
|
||||||
@ -219,6 +226,9 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
|||||||
*<- Packing <- incoming committed capacity
|
*<- Packing <- incoming committed capacity
|
||||||
| |
|
| |
|
||||||
| v
|
| v
|
||||||
|
| GetTicket
|
||||||
|
| | ^
|
||||||
|
| v |
|
||||||
*<- PreCommit1 <--> SealPreCommit1Failed
|
*<- PreCommit1 <--> SealPreCommit1Failed
|
||||||
| | ^ ^^
|
| | ^ ^^
|
||||||
| | *----------++----\
|
| | *----------++----\
|
||||||
@ -267,6 +277,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
|||||||
log.Infof("Waiting for deals %d", state.SectorNumber)
|
log.Infof("Waiting for deals %d", state.SectorNumber)
|
||||||
case Packing:
|
case Packing:
|
||||||
return m.handlePacking, processed, nil
|
return m.handlePacking, processed, nil
|
||||||
|
case GetTicket:
|
||||||
|
return m.handleGetTicket, processed, nil
|
||||||
case PreCommit1:
|
case PreCommit1:
|
||||||
return m.handlePreCommit1, processed, nil
|
return m.handlePreCommit1, processed, nil
|
||||||
case PreCommit2:
|
case PreCommit2:
|
||||||
|
23
extern/storage-sealing/fsm_events.go
vendored
23
extern/storage-sealing/fsm_events.go
vendored
@ -101,16 +101,26 @@ func (evt SectorPacked) apply(state *SectorInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SectorTicket struct {
|
||||||
|
TicketValue abi.SealRandomness
|
||||||
|
TicketEpoch abi.ChainEpoch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (evt SectorTicket) apply(state *SectorInfo) {
|
||||||
|
state.TicketEpoch = evt.TicketEpoch
|
||||||
|
state.TicketValue = evt.TicketValue
|
||||||
|
}
|
||||||
|
|
||||||
|
type SectorOldTicket struct{}
|
||||||
|
|
||||||
|
func (evt SectorOldTicket) apply(*SectorInfo) {}
|
||||||
|
|
||||||
type SectorPreCommit1 struct {
|
type SectorPreCommit1 struct {
|
||||||
PreCommit1Out storage.PreCommit1Out
|
PreCommit1Out storage.PreCommit1Out
|
||||||
TicketValue abi.SealRandomness
|
|
||||||
TicketEpoch abi.ChainEpoch
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (evt SectorPreCommit1) apply(state *SectorInfo) {
|
func (evt SectorPreCommit1) apply(state *SectorInfo) {
|
||||||
state.PreCommit1Out = evt.PreCommit1Out
|
state.PreCommit1Out = evt.PreCommit1Out
|
||||||
state.TicketEpoch = evt.TicketEpoch
|
|
||||||
state.TicketValue = evt.TicketValue
|
|
||||||
state.PreCommit2Fails = 0
|
state.PreCommit2Fails = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,6 +206,11 @@ type SectorDealsExpired struct{ error }
|
|||||||
func (evt SectorDealsExpired) FormatError(xerrors.Printer) (next error) { return evt.error }
|
func (evt SectorDealsExpired) FormatError(xerrors.Printer) (next error) { return evt.error }
|
||||||
func (evt SectorDealsExpired) apply(*SectorInfo) {}
|
func (evt SectorDealsExpired) apply(*SectorInfo) {}
|
||||||
|
|
||||||
|
type SectorTicketExpired struct{ error }
|
||||||
|
|
||||||
|
func (evt SectorTicketExpired) FormatError(xerrors.Printer) (next error) { return evt.error }
|
||||||
|
func (evt SectorTicketExpired) apply(*SectorInfo) {}
|
||||||
|
|
||||||
type SectorCommitted struct {
|
type SectorCommitted struct {
|
||||||
Proof []byte
|
Proof []byte
|
||||||
}
|
}
|
||||||
|
8
extern/storage-sealing/fsm_test.go
vendored
8
extern/storage-sealing/fsm_test.go
vendored
@ -44,6 +44,9 @@ func TestHappyPath(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
m.planSingle(SectorPacked{})
|
m.planSingle(SectorPacked{})
|
||||||
|
require.Equal(m.t, m.state.State, GetTicket)
|
||||||
|
|
||||||
|
m.planSingle(SectorTicket{})
|
||||||
require.Equal(m.t, m.state.State, PreCommit1)
|
require.Equal(m.t, m.state.State, PreCommit1)
|
||||||
|
|
||||||
m.planSingle(SectorPreCommit1{})
|
m.planSingle(SectorPreCommit1{})
|
||||||
@ -73,7 +76,7 @@ func TestHappyPath(t *testing.T) {
|
|||||||
m.planSingle(SectorFinalized{})
|
m.planSingle(SectorFinalized{})
|
||||||
require.Equal(m.t, m.state.State, Proving)
|
require.Equal(m.t, m.state.State, Proving)
|
||||||
|
|
||||||
expected := []SectorState{Packing, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector, Proving}
|
expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector, Proving}
|
||||||
for i, n := range notif {
|
for i, n := range notif {
|
||||||
if n.before.State != expected[i] {
|
if n.before.State != expected[i] {
|
||||||
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
|
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
|
||||||
@ -98,6 +101,9 @@ func TestSeedRevert(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
m.planSingle(SectorPacked{})
|
m.planSingle(SectorPacked{})
|
||||||
|
require.Equal(m.t, m.state.State, GetTicket)
|
||||||
|
|
||||||
|
m.planSingle(SectorTicket{})
|
||||||
require.Equal(m.t, m.state.State, PreCommit1)
|
require.Equal(m.t, m.state.State, PreCommit1)
|
||||||
|
|
||||||
m.planSingle(SectorPreCommit1{})
|
m.planSingle(SectorPreCommit1{})
|
||||||
|
1
extern/storage-sealing/sealing.go
vendored
1
extern/storage-sealing/sealing.go
vendored
@ -53,6 +53,7 @@ type SealingAPI interface {
|
|||||||
StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok TipSetToken) (address.Address, error)
|
StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok TipSetToken) (address.Address, error)
|
||||||
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
|
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
|
||||||
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
|
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
|
||||||
|
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error)
|
||||||
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
|
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
|
||||||
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
|
StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error)
|
||||||
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
||||||
|
1
extern/storage-sealing/sector_state.go
vendored
1
extern/storage-sealing/sector_state.go
vendored
@ -41,6 +41,7 @@ const (
|
|||||||
Empty SectorState = "Empty"
|
Empty SectorState = "Empty"
|
||||||
WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector
|
WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector
|
||||||
Packing SectorState = "Packing" // sector not in sealStore, and not on chain
|
Packing SectorState = "Packing" // sector not in sealStore, and not on chain
|
||||||
|
GetTicket SectorState = "GetTicket" // generate ticket
|
||||||
PreCommit1 SectorState = "PreCommit1" // do PreCommit1
|
PreCommit1 SectorState = "PreCommit1" // do PreCommit1
|
||||||
PreCommit2 SectorState = "PreCommit2" // do PreCommit2
|
PreCommit2 SectorState = "PreCommit2" // do PreCommit2
|
||||||
PreCommitting SectorState = "PreCommitting" // on chain pre-commit
|
PreCommitting SectorState = "PreCommitting" // on chain pre-commit
|
||||||
|
2
extern/storage-sealing/states_failed.go
vendored
2
extern/storage-sealing/states_failed.go
vendored
@ -170,7 +170,7 @@ func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo
|
|||||||
case *ErrExpiredTicket:
|
case *ErrExpiredTicket:
|
||||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired error: %w", err)})
|
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired error: %w", err)})
|
||||||
case *ErrBadTicket:
|
case *ErrBadTicket:
|
||||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)})
|
return ctx.Send(SectorTicketExpired{xerrors.Errorf("expired ticket: %w", err)})
|
||||||
case *ErrInvalidDeals:
|
case *ErrInvalidDeals:
|
||||||
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
|
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
|
||||||
return ctx.Send(SectorInvalidDealIDs{Return: RetCommitFailed})
|
return ctx.Send(SectorInvalidDealIDs{Return: RetCommitFailed})
|
||||||
|
56
extern/storage-sealing/states_sealing.go
vendored
56
extern/storage-sealing/states_sealing.go
vendored
@ -4,10 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
|
||||||
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
@ -15,10 +12,16 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
"github.com/filecoin-project/go-statemachine"
|
"github.com/filecoin-project/go-statemachine"
|
||||||
|
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
)
|
)
|
||||||
|
|
||||||
var DealSectorPriority = 1024
|
var DealSectorPriority = 1024
|
||||||
|
var MaxTicketAge = abi.ChainEpoch(builtin0.EpochsInDay * 2)
|
||||||
|
|
||||||
func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) error {
|
func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) error {
|
||||||
log.Infow("performing filling up rest of the sector...", "sector", sector.SectorNumber)
|
log.Infow("performing filling up rest of the sector...", "sector", sector.SectorNumber)
|
||||||
@ -81,6 +84,33 @@ func (m *Sealing) getTicket(ctx statemachine.Context, sector SectorInfo) (abi.Se
|
|||||||
return abi.SealRandomness(rand), ticketEpoch, nil
|
return abi.SealRandomness(rand), ticketEpoch, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Sealing) handleGetTicket(ctx statemachine.Context, sector SectorInfo) error {
|
||||||
|
ticketValue, ticketEpoch, err := m.getTicket(ctx, sector)
|
||||||
|
if err != nil {
|
||||||
|
allocated, aerr := m.api.StateMinerSectorAllocated(ctx.Context(), m.maddr, sector.SectorNumber, nil)
|
||||||
|
if aerr == nil {
|
||||||
|
log.Errorf("error checking if sector is allocated: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if allocated {
|
||||||
|
if sector.CommitMessage != nil {
|
||||||
|
// Some recovery paths with unfortunate timing lead here
|
||||||
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector %s is committed but got into the GetTicket state", sector.SectorNumber)})
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Errorf("Sector %s precommitted but expired", sector.SectorNumber)
|
||||||
|
return ctx.Send(SectorRemove{})
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("getting ticket failed: %w", err)})
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx.Send(SectorTicket{
|
||||||
|
TicketValue: ticketValue,
|
||||||
|
TicketEpoch: ticketEpoch,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error {
|
func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error {
|
||||||
if err := checkPieces(ctx.Context(), m.maddr, sector, m.api); err != nil { // Sanity check state
|
if err := checkPieces(ctx.Context(), m.maddr, sector, m.api); err != nil { // Sanity check state
|
||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
@ -97,21 +127,23 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infow("performing sector replication...", "sector", sector.SectorNumber)
|
_, height, err := m.api.ChainHead(ctx.Context())
|
||||||
ticketValue, ticketEpoch, err := m.getTicket(ctx, sector)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("getting ticket failed: %w", err)})
|
log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), ticketValue, sector.pieceInfos())
|
if height-sector.TicketEpoch > MaxTicketAge {
|
||||||
|
return ctx.Send(SectorOldTicket{})
|
||||||
|
}
|
||||||
|
|
||||||
|
pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.TicketValue, sector.pieceInfos())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)})
|
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)})
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctx.Send(SectorPreCommit1{
|
return ctx.Send(SectorPreCommit1{
|
||||||
PreCommit1Out: pc1o,
|
PreCommit1Out: pc1o,
|
||||||
TicketValue: ticketValue,
|
|
||||||
TicketEpoch: ticketEpoch,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,6 +153,10 @@ func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo)
|
|||||||
return ctx.Send(SectorSealPreCommit2Failed{xerrors.Errorf("seal pre commit(2) failed: %w", err)})
|
return ctx.Send(SectorSealPreCommit2Failed{xerrors.Errorf("seal pre commit(2) failed: %w", err)})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cids.Unsealed == cid.Undef {
|
||||||
|
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(2) returned undefined CommD")})
|
||||||
|
}
|
||||||
|
|
||||||
return ctx.Send(SectorPreCommit2{
|
return ctx.Send(SectorPreCommit2{
|
||||||
Unsealed: cids.Unsealed,
|
Unsealed: cids.Unsealed,
|
||||||
Sealed: cids.Sealed,
|
Sealed: cids.Sealed,
|
||||||
|
20
gen/main.go
20
gen/main.go
@ -9,6 +9,8 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/chain/exchange"
|
"github.com/filecoin-project/lotus/chain/exchange"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
"github.com/filecoin-project/lotus/node/hello"
|
"github.com/filecoin-project/lotus/node/hello"
|
||||||
"github.com/filecoin-project/lotus/paychmgr"
|
"github.com/filecoin-project/lotus/paychmgr"
|
||||||
)
|
)
|
||||||
@ -75,4 +77,22 @@ func main() {
|
|||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = gen.WriteMapEncodersToFile("./extern/sector-storage/storiface/cbor_gen.go", "storiface",
|
||||||
|
storiface.CallID{},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gen.WriteMapEncodersToFile("./extern/sector-storage/cbor_gen.go", "sectorstorage",
|
||||||
|
sectorstorage.Call{},
|
||||||
|
sectorstorage.WorkState{},
|
||||||
|
sectorstorage.WorkID{},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -49,6 +49,7 @@ import (
|
|||||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/journal"
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||||
@ -340,6 +341,7 @@ func Online() Option {
|
|||||||
|
|
||||||
Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))),
|
Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))),
|
||||||
Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))),
|
Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))),
|
||||||
|
Override(new(storiface.WorkerReturn), From(new(sectorstorage.SectorManager))),
|
||||||
|
|
||||||
Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks),
|
Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks),
|
||||||
Override(new(*storage.Miner), modules.StorageMiner(config.DefaultStorageMiner().Fees)),
|
Override(new(*storage.Miner), modules.StorageMiner(config.DefaultStorageMiner().Fees)),
|
||||||
|
@ -5,9 +5,9 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
|
||||||
|
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/libp2p/go-libp2p-core/host"
|
"github.com/libp2p/go-libp2p-core/host"
|
||||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||||
"github.com/libp2p/go-libp2p-core/network"
|
"github.com/libp2p/go-libp2p-core/network"
|
||||||
@ -27,6 +27,8 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/node/modules/lp2p"
|
"github.com/filecoin-project/lotus/node/modules/lp2p"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var session = uuid.New()
|
||||||
|
|
||||||
type CommonAPI struct {
|
type CommonAPI struct {
|
||||||
fx.In
|
fx.In
|
||||||
|
|
||||||
@ -202,6 +204,10 @@ func (a *CommonAPI) Shutdown(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *CommonAPI) Session(ctx context.Context) (uuid.UUID, error) {
|
||||||
|
return session, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (a *CommonAPI) Closing(ctx context.Context) (<-chan struct{}, error) {
|
func (a *CommonAPI) Closing(ctx context.Context) (<-chan struct{}, error) {
|
||||||
return make(chan struct{}), nil // relies on jsonrpc closing
|
return make(chan struct{}), nil // relies on jsonrpc closing
|
||||||
}
|
}
|
||||||
|
@ -1134,6 +1134,25 @@ func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address
|
|||||||
return types.BigAdd(abal, vested), nil
|
return types.BigAdd(abal, vested), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *StateAPI) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) {
|
||||||
|
ts, err := a.Chain.GetTipSetFromKey(tsk)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("loading tipset %s: %w", tsk, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
act, err := a.StateManager.LoadActor(ctx, maddr, ts)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("failed to load miner actor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("failed to load miner actor state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return mas.IsAllocated(s)
|
||||||
|
}
|
||||||
|
|
||||||
// StateVerifiedClientStatus returns the data cap for the given address.
|
// StateVerifiedClientStatus returns the data cap for the given address.
|
||||||
// Returns zero if there is no entry in the data cap table for the
|
// Returns zero if there is no entry in the data cap table for the
|
||||||
// address.
|
// address.
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/libp2p/go-libp2p-core/host"
|
"github.com/libp2p/go-libp2p-core/host"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
@ -54,6 +55,7 @@ type StorageMinerAPI struct {
|
|||||||
StorageMgr *sectorstorage.Manager `optional:"true"`
|
StorageMgr *sectorstorage.Manager `optional:"true"`
|
||||||
IStorageMgr sectorstorage.SectorManager
|
IStorageMgr sectorstorage.SectorManager
|
||||||
*stores.Index
|
*stores.Index
|
||||||
|
storiface.WorkerReturn
|
||||||
DataTransfer dtypes.ProviderDataTransfer
|
DataTransfer dtypes.ProviderDataTransfer
|
||||||
Host host.Host
|
Host host.Host
|
||||||
|
|
||||||
@ -85,11 +87,11 @@ func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) {
|
|||||||
sm.StorageMgr.ServeHTTP(w, r)
|
sm.StorageMgr.ServeHTTP(w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) WorkerStats(context.Context) (map[uint64]storiface.WorkerStats, error) {
|
func (sm *StorageMinerAPI) WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) {
|
||||||
return sm.StorageMgr.WorkerStats(), nil
|
return sm.StorageMgr.WorkerStats(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StorageMinerAPI) WorkerJobs(ctx context.Context) (map[uint64][]storiface.WorkerJob, error) {
|
func (sm *StorageMinerAPI) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) {
|
||||||
return sm.StorageMgr.WorkerJobs(), nil
|
return sm.StorageMgr.WorkerJobs(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,6 +43,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-multistore"
|
"github.com/filecoin-project/go-multistore"
|
||||||
paramfetch "github.com/filecoin-project/go-paramfetch"
|
paramfetch "github.com/filecoin-project/go-paramfetch"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-statestore"
|
||||||
"github.com/filecoin-project/go-storedcounter"
|
"github.com/filecoin-project/go-storedcounter"
|
||||||
|
|
||||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
@ -50,8 +51,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
"github.com/filecoin-project/lotus/journal"
|
|
||||||
"github.com/filecoin-project/lotus/markets"
|
|
||||||
|
|
||||||
lapi "github.com/filecoin-project/lotus/api"
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
@ -59,7 +58,9 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/journal"
|
||||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/markets"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||||
"github.com/filecoin-project/lotus/miner"
|
"github.com/filecoin-project/lotus/miner"
|
||||||
@ -558,10 +559,16 @@ func RetrievalProvider(h host.Host,
|
|||||||
return retrievalimpl.NewProvider(maddr, adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), opt)
|
return retrievalimpl.NewProvider(maddr, adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth) (*sectorstorage.Manager, error) {
|
var WorkerCallsPrefix = datastore.NewKey("/worker/calls")
|
||||||
|
var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls")
|
||||||
|
|
||||||
|
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) {
|
||||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||||
|
|
||||||
sst, err := sectorstorage.New(ctx, ls, si, cfg, sc, urls, sa)
|
wsts := statestore.New(namespace.Wrap(ds, WorkerCallsPrefix))
|
||||||
|
smsts := statestore.New(namespace.Wrap(ds, ManagerWorkPrefix))
|
||||||
|
|
||||||
|
sst, err := sectorstorage.New(ctx, ls, si, cfg, sc, urls, sa, wsts, smsts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -93,6 +93,15 @@ func (s SealingAPIAdapter) StateMinerDeadlines(ctx context.Context, maddr addres
|
|||||||
return s.delegate.StateMinerDeadlines(ctx, maddr, tsk)
|
return s.delegate.StateMinerDeadlines(ctx, maddr, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s SealingAPIAdapter) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, sid abi.SectorNumber, tok sealing.TipSetToken) (bool, error) {
|
||||||
|
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.delegate.StateMinerSectorAllocated(ctx, maddr, sid, tsk)
|
||||||
|
}
|
||||||
|
|
||||||
func (s SealingAPIAdapter) StateWaitMsg(ctx context.Context, mcid cid.Cid) (sealing.MsgLookup, error) {
|
func (s SealingAPIAdapter) StateWaitMsg(ctx context.Context, mcid cid.Cid) (sealing.MsgLookup, error) {
|
||||||
wmsg, err := s.delegate.StateWaitMsg(ctx, mcid, build.MessageConfidence)
|
wmsg, err := s.delegate.StateWaitMsg(ctx, mcid, build.MessageConfidence)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -81,6 +81,7 @@ type storageMinerApi interface {
|
|||||||
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
|
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
|
||||||
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
||||||
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
||||||
|
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error)
|
||||||
StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error)
|
StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error)
|
||||||
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) // TODO: removeme eventually
|
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) // TODO: removeme eventually
|
||||||
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
type mockStorageMinerAPI struct {
|
type mockStorageMinerAPI struct {
|
||||||
partitions []api.Partition
|
partitions []api.Partition
|
||||||
pushedMessages chan *types.Message
|
pushedMessages chan *types.Message
|
||||||
|
storageMinerApi
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMockStorageMinerAPI() *mockStorageMinerAPI {
|
func newMockStorageMinerAPI() *mockStorageMinerAPI {
|
||||||
@ -47,10 +48,6 @@ func (m *mockStorageMinerAPI) StateMinerInfo(ctx context.Context, a address.Addr
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockStorageMinerAPI) StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) {
|
|
||||||
return build.NewestNetworkVersion, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockStorageMinerAPI) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
|
func (m *mockStorageMinerAPI) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
|
||||||
return abi.Randomness("ticket rand"), nil
|
return abi.Randomness("ticket rand"), nil
|
||||||
}
|
}
|
||||||
@ -96,6 +93,10 @@ func (m *mockStorageMinerAPI) StateWaitMsg(ctx context.Context, cid cid.Cid, con
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *mockStorageMinerAPI) StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) {
|
||||||
|
return build.NewestNetworkVersion, nil
|
||||||
|
}
|
||||||
|
|
||||||
type mockProver struct {
|
type mockProver struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user