lpseal: Wire up sdr task
This commit is contained in:
parent
4eb7bc91d9
commit
5d26c3a9dc
@ -3,6 +3,8 @@ package tasks
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpffi"
|
||||||
|
"github.com/filecoin-project/lotus/provider/lpseal"
|
||||||
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
@ -25,6 +27,7 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.Task
|
|||||||
as := dependencies.As
|
as := dependencies.As
|
||||||
maddrs := dependencies.Maddrs
|
maddrs := dependencies.Maddrs
|
||||||
stor := dependencies.Stor
|
stor := dependencies.Stor
|
||||||
|
lstor := dependencies.LocalStore
|
||||||
si := dependencies.Si
|
si := dependencies.Si
|
||||||
var activeTasks []harmonytask.TaskInterface
|
var activeTasks []harmonytask.TaskInterface
|
||||||
|
|
||||||
@ -35,6 +38,7 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.Task
|
|||||||
///// Task Selection
|
///// Task Selection
|
||||||
///////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////
|
||||||
{
|
{
|
||||||
|
// PoSt
|
||||||
|
|
||||||
if cfg.Subsystems.EnableWindowPost {
|
if cfg.Subsystems.EnableWindowPost {
|
||||||
wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, cfg.Fees, cfg.Proving, full, verif, lw, sender,
|
wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, cfg.Fees, cfg.Proving, full, verif, lw, sender,
|
||||||
@ -50,6 +54,25 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.Task
|
|||||||
activeTasks = append(activeTasks, winPoStTask)
|
activeTasks = append(activeTasks, winPoStTask)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Sealing
|
||||||
|
hasAnySealingTask := cfg.Subsystems.EnableSealSDR
|
||||||
|
|
||||||
|
var sp *lpseal.SealPoller
|
||||||
|
var slr *lpffi.SealCalls
|
||||||
|
if hasAnySealingTask {
|
||||||
|
sp = lpseal.NewPoller(db)
|
||||||
|
go sp.RunPoller(ctx)
|
||||||
|
|
||||||
|
slr = lpffi.NewSealCalls(stor, lstor, si)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Subsystems.EnableSealSDR {
|
||||||
|
sdrTask := lpseal.NewSDRTask(full, db, sp, slr, cfg.Subsystems.SealSDRMaxTasks)
|
||||||
|
activeTasks = append(activeTasks, sdrTask)
|
||||||
|
}
|
||||||
|
}
|
||||||
log.Infow("This lotus_provider instance handles",
|
log.Infow("This lotus_provider instance handles",
|
||||||
"miner_addresses", maddrs,
|
"miner_addresses", maddrs,
|
||||||
"tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name }))
|
"tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name }))
|
||||||
|
@ -45,3 +45,9 @@ func (p *Promise[T]) Val(ctx context.Context) T {
|
|||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Promise[T]) IsSet() bool {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
return p.done != nil
|
||||||
|
}
|
||||||
|
@ -101,6 +101,11 @@ type ProviderSubsystemsConfig struct {
|
|||||||
EnableWebGui bool
|
EnableWebGui bool
|
||||||
// The address that should listen for Web GUI requests.
|
// The address that should listen for Web GUI requests.
|
||||||
GuiAddress string
|
GuiAddress string
|
||||||
|
|
||||||
|
// EnableSealSDR enables SDR tasks to run. SDR is the long sequential computation
|
||||||
|
// creating layers. In lotus-miner this was run as part of PreCommit1.
|
||||||
|
EnableSealSDR bool
|
||||||
|
SealSDRMaxTasks int
|
||||||
}
|
}
|
||||||
|
|
||||||
type DAGStoreConfig struct {
|
type DAGStoreConfig struct {
|
||||||
|
@ -5,23 +5,72 @@ import (
|
|||||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||||
commcid "github.com/filecoin-project/go-fil-commcid"
|
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("lpffi")
|
||||||
|
|
||||||
|
/*
|
||||||
type ExternPrecommit2 func(ctx context.Context, sector storiface.SectorRef, cache, sealed string, pc1out storiface.PreCommit1Out) (sealedCID cid.Cid, unsealedCID cid.Cid, err error)
|
type ExternPrecommit2 func(ctx context.Context, sector storiface.SectorRef, cache, sealed string, pc1out storiface.PreCommit1Out) (sealedCID cid.Cid, unsealedCID cid.Cid, err error)
|
||||||
|
|
||||||
type ExternalSealer struct {
|
type ExternalSealer struct {
|
||||||
PreCommit2 ExternPrecommit2
|
PreCommit2 ExternPrecommit2
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
type SealCalls struct {
|
type SealCalls struct {
|
||||||
sectors ffiwrapper.SectorProvider
|
sectors *storageProvider
|
||||||
|
|
||||||
// externCalls cointain overrides for calling alternative sealing logic
|
/*// externCalls cointain overrides for calling alternative sealing logic
|
||||||
externCalls ExternalSealer
|
externCalls ExternalSealer*/
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSealCalls(st paths.Store, ls *paths.Local, si paths.SectorIndex) *SealCalls {
|
||||||
|
return &SealCalls{
|
||||||
|
sectors: &storageProvider{
|
||||||
|
storage: st,
|
||||||
|
localStore: ls,
|
||||||
|
sindex: si,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type storageProvider struct {
|
||||||
|
storage paths.Store
|
||||||
|
localStore *paths.Local
|
||||||
|
sindex paths.SectorIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *storageProvider) AcquireSector(ctx context.Context, sector storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||||
|
paths, storageIDs, err := l.storage.AcquireSector(ctx, sector, existing, allocate, sealing, storiface.AcquireMove)
|
||||||
|
if err != nil {
|
||||||
|
return storiface.SectorPaths{}, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
releaseStorage, err := l.localStore.Reserve(ctx, sector, allocate, storageIDs, storiface.FSOverheadSeal)
|
||||||
|
if err != nil {
|
||||||
|
return storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths)
|
||||||
|
|
||||||
|
return paths, func() {
|
||||||
|
releaseStorage()
|
||||||
|
|
||||||
|
for _, fileType := range storiface.PathTypes {
|
||||||
|
if fileType&allocate == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sid := storiface.PathByType(storageIDs, fileType)
|
||||||
|
if err := l.sindex.StorageDeclareSector(ctx, storiface.ID(sid), sector.ID, fileType, true); err != nil {
|
||||||
|
log.Errorf("declare sector error: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *SealCalls) GenerateSDR(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, commKcid cid.Cid) error {
|
func (sb *SealCalls) GenerateSDR(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, commKcid cid.Cid) error {
|
||||||
|
@ -26,16 +26,24 @@ type SealPoller struct {
|
|||||||
pollers [numPollers]promise.Promise[harmonytask.AddTaskFunc]
|
pollers [numPollers]promise.Promise[harmonytask.AddTaskFunc]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SealPoller) RunPoller(ctx context.Context) error {
|
func NewPoller(db *harmonydb.DB) *SealPoller {
|
||||||
|
return &SealPoller{
|
||||||
|
db: db,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SealPoller) RunPoller(ctx context.Context) {
|
||||||
ticker := time.NewTicker(sealPollerInterval)
|
ticker := time.NewTicker(sealPollerInterval)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
s.poll(ctx)
|
if err := s.poll(ctx); err != nil {
|
||||||
|
log.Errorf("polling sdr sector pipeline: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -86,7 +94,7 @@ func (s *SealPoller) poll(ctx context.Context) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if task.TaskSDR == nil {
|
if task.TaskSDR == nil && s.pollers[pollerSDR].IsSet() {
|
||||||
s.pollers[pollerSDR].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
|
s.pollers[pollerSDR].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
|
||||||
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_sdr = $1 WHERE sp_id = $2 AND sector_number = $3`, id, task.SpID, task.SectorNumber)
|
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_sdr = $1 WHERE sp_id = $2 AND sector_number = $3`, id, task.SpID, task.SectorNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -32,6 +32,16 @@ type SDRTask struct {
|
|||||||
maxSDR int
|
maxSDR int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewSDRTask(api SDRAPI, db *harmonydb.DB, sp *SealPoller, sc *lpffi.SealCalls, maxSDR int) *SDRTask {
|
||||||
|
return &SDRTask{
|
||||||
|
api: api,
|
||||||
|
db: db,
|
||||||
|
sp: sp,
|
||||||
|
sc: sc,
|
||||||
|
maxSDR: maxSDR,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *SDRTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
|
func (s *SDRTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
@ -28,8 +28,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache}
|
|
||||||
|
|
||||||
type WorkerConfig struct {
|
type WorkerConfig struct {
|
||||||
TaskTypes []sealtasks.TaskType
|
TaskTypes []sealtasks.TaskType
|
||||||
NoSwap bool
|
NoSwap bool
|
||||||
@ -167,7 +165,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector stor
|
|||||||
return paths, func() {
|
return paths, func() {
|
||||||
releaseStorage()
|
releaseStorage()
|
||||||
|
|
||||||
for _, fileType := range pathTypes {
|
for _, fileType := range storiface.PathTypes {
|
||||||
if fileType&allocate == 0 {
|
if fileType&allocate == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -180,16 +178,16 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector stor
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *localWorkerPathProvider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
||||||
|
return (&localWorkerPathProvider{w: l.w, op: storiface.AcquireCopy}).AcquireSector(ctx, id, existing, allocate, ptype)
|
||||||
|
}
|
||||||
|
|
||||||
func FFIExec(opts ...ffiwrapper.FFIWrapperOpt) func(l *LocalWorker) (storiface.Storage, error) {
|
func FFIExec(opts ...ffiwrapper.FFIWrapperOpt) func(l *LocalWorker) (storiface.Storage, error) {
|
||||||
return func(l *LocalWorker) (storiface.Storage, error) {
|
return func(l *LocalWorker) (storiface.Storage, error) {
|
||||||
return ffiwrapper.New(&localWorkerPathProvider{w: l}, opts...)
|
return ffiwrapper.New(&localWorkerPathProvider{w: l}, opts...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *localWorkerPathProvider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
|
|
||||||
return (&localWorkerPathProvider{w: l.w, op: storiface.AcquireCopy}).AcquireSector(ctx, id, existing, allocate, ptype)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ReturnType string
|
type ReturnType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
Loading…
Reference in New Issue
Block a user