2022-06-14 18:03:38 +00:00
|
|
|
package sealer
|
2020-10-28 12:39:28 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"golang.org/x/xerrors"
|
2020-10-28 13:23:38 +00:00
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/paths"
|
2022-06-14 18:03:38 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
2020-10-28 12:39:28 +00:00
|
|
|
)
|
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
type schedWorker struct {
|
2022-05-18 13:47:08 +00:00
|
|
|
sched *Scheduler
|
|
|
|
worker *WorkerHandle
|
2020-10-28 13:14:38 +00:00
|
|
|
|
2021-11-29 13:42:20 +00:00
|
|
|
wid storiface.WorkerID
|
2020-10-28 13:14:38 +00:00
|
|
|
|
|
|
|
heartbeatTimer *time.Ticker
|
2022-05-18 13:47:08 +00:00
|
|
|
scheduledWindows chan *SchedWindow
|
2020-10-28 13:14:38 +00:00
|
|
|
taskDone chan struct{}
|
|
|
|
|
|
|
|
windowsRequested int
|
|
|
|
}
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
func newWorkerHandle(ctx context.Context, w Worker) (*WorkerHandle, error) {
|
2020-10-28 12:39:28 +00:00
|
|
|
info, err := w.Info(ctx)
|
|
|
|
if err != nil {
|
2022-01-14 13:11:04 +00:00
|
|
|
return nil, xerrors.Errorf("getting worker info: %w", err)
|
2021-07-27 03:15:53 +00:00
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2023-02-28 08:02:18 +00:00
|
|
|
tc := newTaskCounter()
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
worker := &WorkerHandle{
|
2020-10-28 13:23:38 +00:00
|
|
|
workerRpc: w,
|
2022-05-18 13:47:08 +00:00
|
|
|
Info: info,
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2023-02-28 08:02:18 +00:00
|
|
|
preparing: NewActiveResources(tc),
|
|
|
|
active: NewActiveResources(tc),
|
2022-05-18 13:47:08 +00:00
|
|
|
Enabled: true,
|
2020-10-28 12:39:28 +00:00
|
|
|
|
|
|
|
closingMgr: make(chan struct{}),
|
|
|
|
closedMgr: make(chan struct{}),
|
|
|
|
}
|
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
return worker, nil
|
|
|
|
}
|
2021-07-27 03:15:53 +00:00
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
// context only used for startup
|
2022-05-18 13:47:08 +00:00
|
|
|
func (sh *Scheduler) runWorker(ctx context.Context, wid storiface.WorkerID, worker *WorkerHandle) error {
|
2020-10-28 12:39:28 +00:00
|
|
|
sh.workersLk.Lock()
|
2022-05-18 13:47:08 +00:00
|
|
|
_, exist := sh.Workers[wid]
|
2020-10-28 12:39:28 +00:00
|
|
|
if exist {
|
|
|
|
log.Warnw("duplicated worker added", "id", wid)
|
|
|
|
|
|
|
|
// this is ok, we're already handling this worker in a different goroutine
|
2020-12-25 07:05:01 +00:00
|
|
|
sh.workersLk.Unlock()
|
2020-10-28 12:39:28 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
sh.Workers[wid] = worker
|
2020-10-28 12:39:28 +00:00
|
|
|
sh.workersLk.Unlock()
|
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
sw := &schedWorker{
|
2020-10-28 13:23:38 +00:00
|
|
|
sched: sh,
|
2020-10-28 13:14:38 +00:00
|
|
|
worker: worker,
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
wid: wid,
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
heartbeatTimer: time.NewTicker(paths.HeartbeatInterval),
|
2022-05-18 13:47:08 +00:00
|
|
|
scheduledWindows: make(chan *SchedWindow, SchedWindows),
|
2020-10-28 13:14:38 +00:00
|
|
|
taskDone: make(chan struct{}, 1),
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
windowsRequested: 0,
|
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
go sw.handleWorker()
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
return nil
|
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
func (sw *schedWorker) handleWorker() {
|
2020-10-28 13:23:38 +00:00
|
|
|
worker, sched := sw.worker, sw.sched
|
2020-10-28 13:14:38 +00:00
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.TODO())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
defer close(worker.closedMgr)
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
log.Warnw("Worker closing", "workerid", sw.wid)
|
|
|
|
|
|
|
|
if err := sw.disable(ctx); err != nil {
|
|
|
|
log.Warnw("failed to disable worker", "worker", sw.wid, "error", err)
|
2020-10-28 12:39:28 +00:00
|
|
|
}
|
|
|
|
|
2020-10-28 13:23:38 +00:00
|
|
|
sched.workersLk.Lock()
|
2022-05-18 13:47:08 +00:00
|
|
|
delete(sched.Workers, sw.wid)
|
2020-10-28 13:23:38 +00:00
|
|
|
sched.workersLk.Unlock()
|
2020-10-28 13:14:38 +00:00
|
|
|
}()
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
defer sw.heartbeatTimer.Stop()
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
for {
|
2020-10-30 17:01:37 +00:00
|
|
|
{
|
|
|
|
sched.workersLk.Lock()
|
2022-05-18 13:47:08 +00:00
|
|
|
enabled := worker.Enabled
|
2020-10-30 17:01:37 +00:00
|
|
|
sched.workersLk.Unlock()
|
|
|
|
|
|
|
|
// ask for more windows if we need them (non-blocking)
|
|
|
|
if enabled {
|
|
|
|
if !sw.requestWindows() {
|
|
|
|
return // graceful shutdown
|
|
|
|
}
|
2020-10-28 13:14:38 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
// wait for more windows to come in, or for tasks to get finished (blocking)
|
2020-10-28 12:39:28 +00:00
|
|
|
for {
|
2020-10-28 13:14:38 +00:00
|
|
|
// ping the worker and check session
|
|
|
|
if !sw.checkSession(ctx) {
|
|
|
|
return // invalid session / exiting
|
2020-10-28 12:39:28 +00:00
|
|
|
}
|
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
// session looks good
|
2020-10-30 17:32:59 +00:00
|
|
|
{
|
|
|
|
sched.workersLk.Lock()
|
2022-05-18 13:47:08 +00:00
|
|
|
enabled := worker.Enabled
|
|
|
|
worker.Enabled = true
|
2020-10-30 17:32:59 +00:00
|
|
|
sched.workersLk.Unlock()
|
|
|
|
|
|
|
|
if !enabled {
|
|
|
|
// go send window requests
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
// wait for more tasks to be assigned by the main scheduler or for the worker
|
2024-03-12 08:10:57 +00:00
|
|
|
// to finish processing a task
|
2020-11-09 20:44:28 +00:00
|
|
|
update, pokeSched, ok := sw.waitForUpdates()
|
2020-10-28 13:14:38 +00:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2020-11-09 20:44:28 +00:00
|
|
|
if pokeSched {
|
|
|
|
// a task has finished preparing, which can mean that we've freed some space on some worker
|
|
|
|
select {
|
|
|
|
case sched.workerChange <- struct{}{}:
|
|
|
|
default: // workerChange is buffered, and scheduling is global, so it's ok if we don't send here
|
|
|
|
}
|
|
|
|
}
|
2020-10-28 13:14:38 +00:00
|
|
|
if update {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
// process assigned windows (non-blocking)
|
2020-10-28 13:23:38 +00:00
|
|
|
sched.workersLk.RLock()
|
2020-10-28 13:14:38 +00:00
|
|
|
worker.wndLk.Lock()
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:23:38 +00:00
|
|
|
sw.workerCompactWindows()
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
// send tasks to the worker
|
|
|
|
sw.processAssignedWindows()
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
worker.wndLk.Unlock()
|
2020-10-28 13:23:38 +00:00
|
|
|
sched.workersLk.RUnlock()
|
2020-10-28 13:14:38 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
func (sw *schedWorker) disable(ctx context.Context) error {
|
|
|
|
done := make(chan struct{})
|
|
|
|
|
|
|
|
// request cleanup in the main scheduler goroutine
|
|
|
|
select {
|
2020-10-28 13:23:38 +00:00
|
|
|
case sw.sched.workerDisable <- workerDisableReq{
|
2020-10-28 13:14:38 +00:00
|
|
|
activeWindows: sw.worker.activeWindows,
|
|
|
|
wid: sw.wid,
|
|
|
|
done: func() {
|
|
|
|
close(done)
|
|
|
|
},
|
|
|
|
}:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
2020-10-28 13:23:38 +00:00
|
|
|
case <-sw.sched.closing:
|
2020-10-28 13:14:38 +00:00
|
|
|
return nil
|
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
// wait for cleanup to complete
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
2020-10-28 13:23:38 +00:00
|
|
|
case <-sw.sched.closing:
|
2020-10-28 13:14:38 +00:00
|
|
|
return nil
|
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
sw.worker.activeWindows = sw.worker.activeWindows[:0]
|
|
|
|
sw.windowsRequested = 0
|
|
|
|
return nil
|
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
func (sw *schedWorker) checkSession(ctx context.Context) bool {
|
|
|
|
for {
|
2022-06-14 18:25:52 +00:00
|
|
|
sctx, scancel := context.WithTimeout(ctx, paths.HeartbeatInterval/2)
|
2020-10-28 13:23:38 +00:00
|
|
|
curSes, err := sw.worker.workerRpc.Session(sctx)
|
2020-10-28 13:14:38 +00:00
|
|
|
scancel()
|
|
|
|
if err != nil {
|
|
|
|
// Likely temporary error
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
log.Warnw("failed to check worker session", "error", err)
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
if err := sw.disable(ctx); err != nil {
|
|
|
|
log.Warnw("failed to disable worker with session error", "worker", sw.wid, "error", err)
|
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
select {
|
|
|
|
case <-sw.heartbeatTimer.C:
|
|
|
|
continue
|
|
|
|
case w := <-sw.scheduledWindows:
|
|
|
|
// was in flight when initially disabled, return
|
|
|
|
sw.worker.wndLk.Lock()
|
|
|
|
sw.worker.activeWindows = append(sw.worker.activeWindows, w)
|
|
|
|
sw.worker.wndLk.Unlock()
|
|
|
|
|
|
|
|
if err := sw.disable(ctx); err != nil {
|
|
|
|
log.Warnw("failed to disable worker with session error", "worker", sw.wid, "error", err)
|
|
|
|
}
|
2020-10-28 13:23:38 +00:00
|
|
|
case <-sw.sched.closing:
|
2020-10-28 13:14:38 +00:00
|
|
|
return false
|
|
|
|
case <-sw.worker.closingMgr:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2021-11-29 13:42:20 +00:00
|
|
|
if storiface.WorkerID(curSes) != sw.wid {
|
2020-10-28 13:14:38 +00:00
|
|
|
if curSes != ClosedWorkerID {
|
|
|
|
// worker restarted
|
|
|
|
log.Warnw("worker session changed (worker restarted?)", "initial", sw.wid, "current", curSes)
|
2020-10-28 12:39:28 +00:00
|
|
|
}
|
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
return false
|
2020-10-28 12:39:28 +00:00
|
|
|
}
|
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sw *schedWorker) requestWindows() bool {
|
|
|
|
for ; sw.windowsRequested < SchedWindows; sw.windowsRequested++ {
|
|
|
|
select {
|
2022-05-18 13:47:08 +00:00
|
|
|
case sw.sched.windowRequests <- &SchedWindowRequest{
|
|
|
|
Worker: sw.wid,
|
|
|
|
Done: sw.scheduledWindows,
|
2020-10-28 13:14:38 +00:00
|
|
|
}:
|
2020-10-28 13:23:38 +00:00
|
|
|
case <-sw.sched.closing:
|
2020-10-28 13:14:38 +00:00
|
|
|
return false
|
|
|
|
case <-sw.worker.closingMgr:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-11-09 20:44:28 +00:00
|
|
|
func (sw *schedWorker) waitForUpdates() (update bool, sched bool, ok bool) {
|
2020-10-28 13:14:38 +00:00
|
|
|
select {
|
|
|
|
case <-sw.heartbeatTimer.C:
|
2020-11-09 20:44:28 +00:00
|
|
|
return false, false, true
|
2020-10-28 13:14:38 +00:00
|
|
|
case w := <-sw.scheduledWindows:
|
|
|
|
sw.worker.wndLk.Lock()
|
|
|
|
sw.worker.activeWindows = append(sw.worker.activeWindows, w)
|
|
|
|
sw.worker.wndLk.Unlock()
|
2020-11-09 20:44:28 +00:00
|
|
|
return true, false, true
|
2020-10-28 13:14:38 +00:00
|
|
|
case <-sw.taskDone:
|
|
|
|
log.Debugw("task done", "workerid", sw.wid)
|
2020-11-09 20:44:28 +00:00
|
|
|
return true, true, true
|
2020-10-28 13:23:38 +00:00
|
|
|
case <-sw.sched.closing:
|
2020-10-28 13:14:38 +00:00
|
|
|
case <-sw.worker.closingMgr:
|
|
|
|
}
|
2020-10-28 13:23:38 +00:00
|
|
|
|
2020-11-09 20:44:28 +00:00
|
|
|
return false, false, false
|
2020-10-28 12:39:28 +00:00
|
|
|
}
|
|
|
|
|
2020-10-28 13:23:38 +00:00
|
|
|
func (sw *schedWorker) workerCompactWindows() {
|
|
|
|
worker := sw.worker
|
|
|
|
|
2020-10-28 12:39:28 +00:00
|
|
|
// move tasks from older windows to newer windows if older windows
|
|
|
|
// still can fit them
|
|
|
|
if len(worker.activeWindows) > 1 {
|
|
|
|
for wi, window := range worker.activeWindows[1:] {
|
|
|
|
lower := worker.activeWindows[wi]
|
|
|
|
var moved []int
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
for ti, todo := range window.Todo {
|
|
|
|
needRes := worker.Info.Resources.ResourceSpec(todo.Sector.ProofType, todo.TaskType)
|
2023-05-10 19:43:42 +00:00
|
|
|
if !lower.Allocated.CanHandleRequest(todo.SchedId, todo.SealTask(), needRes, sw.wid, "compactWindows", worker.Info) {
|
2020-10-28 12:39:28 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
moved = append(moved, ti)
|
2022-05-18 13:47:08 +00:00
|
|
|
lower.Todo = append(lower.Todo, todo)
|
2023-05-10 19:43:42 +00:00
|
|
|
lower.Allocated.Add(todo.SchedId, todo.SealTask(), worker.Info.Resources, needRes)
|
|
|
|
window.Allocated.Free(todo.SchedId, todo.SealTask(), worker.Info.Resources, needRes)
|
2020-10-28 12:39:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(moved) > 0 {
|
2022-05-18 13:47:08 +00:00
|
|
|
newTodo := make([]*WorkerRequest, 0, len(window.Todo)-len(moved))
|
|
|
|
for i, t := range window.Todo {
|
2020-10-28 12:39:28 +00:00
|
|
|
if len(moved) > 0 && moved[0] == i {
|
|
|
|
moved = moved[1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
newTodo = append(newTodo, t)
|
|
|
|
}
|
2022-05-18 13:47:08 +00:00
|
|
|
window.Todo = newTodo
|
2020-10-28 12:39:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var compacted int
|
2022-05-18 13:47:08 +00:00
|
|
|
var newWindows []*SchedWindow
|
2020-10-28 12:39:28 +00:00
|
|
|
|
|
|
|
for _, window := range worker.activeWindows {
|
2022-05-18 13:47:08 +00:00
|
|
|
if len(window.Todo) == 0 {
|
2020-10-28 12:39:28 +00:00
|
|
|
compacted++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
newWindows = append(newWindows, window)
|
|
|
|
}
|
|
|
|
|
|
|
|
worker.activeWindows = newWindows
|
2020-10-28 13:23:38 +00:00
|
|
|
sw.windowsRequested -= compacted
|
2020-10-28 12:39:28 +00:00
|
|
|
}
|
|
|
|
|
2020-10-28 13:14:38 +00:00
|
|
|
func (sw *schedWorker) processAssignedWindows() {
|
2021-09-15 14:37:27 +00:00
|
|
|
sw.assignReadyWork()
|
|
|
|
sw.assignPreparingWork()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sw *schedWorker) assignPreparingWork() {
|
2020-10-28 13:14:38 +00:00
|
|
|
worker := sw.worker
|
|
|
|
|
|
|
|
assignLoop:
|
|
|
|
// process windows in order
|
|
|
|
for len(worker.activeWindows) > 0 {
|
|
|
|
firstWindow := worker.activeWindows[0]
|
|
|
|
|
|
|
|
// process tasks within a window, preferring tasks at lower indexes
|
2022-05-18 13:47:08 +00:00
|
|
|
for len(firstWindow.Todo) > 0 {
|
2020-10-28 13:14:38 +00:00
|
|
|
tidx := -1
|
|
|
|
|
|
|
|
worker.lk.Lock()
|
2022-05-18 13:47:08 +00:00
|
|
|
for t, todo := range firstWindow.Todo {
|
2023-02-28 09:33:15 +00:00
|
|
|
needResPrep := worker.Info.Resources.PrepResourceSpec(todo.Sector.ProofType, todo.TaskType, todo.prepare.PrepType)
|
2023-05-10 19:43:42 +00:00
|
|
|
if worker.preparing.CanHandleRequest(todo.SchedId, todo.PrepSealTask(), needResPrep, sw.wid, "startPreparing", worker.Info) {
|
2020-10-28 13:14:38 +00:00
|
|
|
tidx = t
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
worker.lk.Unlock()
|
|
|
|
|
|
|
|
if tidx == -1 {
|
|
|
|
break assignLoop
|
|
|
|
}
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
todo := firstWindow.Todo[tidx]
|
2020-10-28 13:14:38 +00:00
|
|
|
|
2022-08-10 19:33:19 +00:00
|
|
|
log.Debugf("assign worker sector %d to %s", todo.Sector.ID.Number, worker.Info.Hostname)
|
2021-09-15 14:37:27 +00:00
|
|
|
err := sw.startProcessingTask(todo)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("startProcessingTask error: %+v", err)
|
|
|
|
go todo.respond(xerrors.Errorf("startProcessingTask error: %w", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: we're not freeing window.allocated resources here very much on purpose
|
2022-05-18 13:47:08 +00:00
|
|
|
copy(firstWindow.Todo[tidx:], firstWindow.Todo[tidx+1:])
|
|
|
|
firstWindow.Todo[len(firstWindow.Todo)-1] = nil
|
|
|
|
firstWindow.Todo = firstWindow.Todo[:len(firstWindow.Todo)-1]
|
2021-09-15 14:37:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
copy(worker.activeWindows, worker.activeWindows[1:])
|
|
|
|
worker.activeWindows[len(worker.activeWindows)-1] = nil
|
|
|
|
worker.activeWindows = worker.activeWindows[:len(worker.activeWindows)-1]
|
|
|
|
|
|
|
|
sw.windowsRequested--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sw *schedWorker) assignReadyWork() {
|
|
|
|
worker := sw.worker
|
|
|
|
|
|
|
|
worker.lk.Lock()
|
|
|
|
defer worker.lk.Unlock()
|
|
|
|
|
|
|
|
if worker.active.hasWorkWaiting() {
|
|
|
|
// prepared tasks have priority
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
assignLoop:
|
|
|
|
// process windows in order
|
|
|
|
for len(worker.activeWindows) > 0 {
|
|
|
|
firstWindow := worker.activeWindows[0]
|
|
|
|
|
|
|
|
// process tasks within a window, preferring tasks at lower indexes
|
2022-05-18 13:47:08 +00:00
|
|
|
for len(firstWindow.Todo) > 0 {
|
2021-09-15 14:37:27 +00:00
|
|
|
tidx := -1
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
for t, todo := range firstWindow.Todo {
|
|
|
|
if todo.TaskType != sealtasks.TTCommit1 && todo.TaskType != sealtasks.TTCommit2 { // todo put in task
|
2021-09-15 14:37:27 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-05-27 14:01:32 +00:00
|
|
|
needRes := worker.Info.Resources.ResourceSpec(todo.Sector.ProofType, todo.TaskType)
|
2023-05-10 19:43:42 +00:00
|
|
|
if worker.active.CanHandleRequest(todo.SchedId, todo.SealTask(), needRes, sw.wid, "startPreparing", worker.Info) {
|
2021-09-15 14:37:27 +00:00
|
|
|
tidx = t
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if tidx == -1 {
|
|
|
|
break assignLoop
|
|
|
|
}
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
todo := firstWindow.Todo[tidx]
|
2021-09-15 14:37:27 +00:00
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
log.Debugf("assign worker sector %d (ready)", todo.Sector.ID.Number)
|
2021-09-15 14:37:27 +00:00
|
|
|
err := sw.startProcessingReadyTask(todo)
|
2020-10-28 13:14:38 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2020-11-24 11:09:48 +00:00
|
|
|
log.Errorf("startProcessingTask error: %+v", err)
|
2020-10-28 13:14:38 +00:00
|
|
|
go todo.respond(xerrors.Errorf("startProcessingTask error: %w", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: we're not freeing window.allocated resources here very much on purpose
|
2022-05-18 13:47:08 +00:00
|
|
|
copy(firstWindow.Todo[tidx:], firstWindow.Todo[tidx+1:])
|
|
|
|
firstWindow.Todo[len(firstWindow.Todo)-1] = nil
|
|
|
|
firstWindow.Todo = firstWindow.Todo[:len(firstWindow.Todo)-1]
|
2020-10-28 13:14:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
copy(worker.activeWindows, worker.activeWindows[1:])
|
|
|
|
worker.activeWindows[len(worker.activeWindows)-1] = nil
|
|
|
|
worker.activeWindows = worker.activeWindows[:len(worker.activeWindows)-1]
|
|
|
|
|
|
|
|
sw.windowsRequested--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
func (sw *schedWorker) startProcessingTask(req *WorkerRequest) error {
|
2020-10-28 13:23:38 +00:00
|
|
|
w, sh := sw.worker, sw.sched
|
2020-10-28 13:14:38 +00:00
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
needRes := w.Info.Resources.ResourceSpec(req.Sector.ProofType, req.TaskType)
|
2023-02-28 09:33:15 +00:00
|
|
|
needResPrep := w.Info.Resources.PrepResourceSpec(req.Sector.ProofType, req.TaskType, req.prepare.PrepType)
|
2020-10-28 12:39:28 +00:00
|
|
|
|
|
|
|
w.lk.Lock()
|
2023-05-10 19:43:42 +00:00
|
|
|
w.preparing.Add(req.SchedId, req.PrepSealTask(), w.Info.Resources, needResPrep)
|
2020-10-28 12:39:28 +00:00
|
|
|
w.lk.Unlock()
|
|
|
|
|
|
|
|
go func() {
|
2020-10-28 13:23:38 +00:00
|
|
|
// first run the prepare step (e.g. fetching sector data from other worker)
|
2022-05-18 13:47:08 +00:00
|
|
|
tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRpc)
|
2021-10-15 19:04:03 +00:00
|
|
|
tw.start()
|
2023-02-28 09:33:15 +00:00
|
|
|
err := req.prepare.Action(req.Ctx, tw)
|
2021-09-15 13:34:50 +00:00
|
|
|
w.lk.Lock()
|
2020-10-28 12:39:28 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2023-05-10 19:43:42 +00:00
|
|
|
w.preparing.Free(req.SchedId, req.PrepSealTask(), w.Info.Resources, needResPrep)
|
2020-10-28 12:39:28 +00:00
|
|
|
w.lk.Unlock()
|
|
|
|
|
|
|
|
select {
|
2021-09-15 14:37:27 +00:00
|
|
|
case sw.taskDone <- struct{}{}:
|
2020-10-28 12:39:28 +00:00
|
|
|
case <-sh.closing:
|
|
|
|
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
|
2021-10-03 15:09:43 +00:00
|
|
|
default: // there is a notification pending already
|
2020-10-28 12:39:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case req.ret <- workerResponse{err: err}:
|
2022-05-18 13:47:08 +00:00
|
|
|
case <-req.Ctx.Done():
|
2020-10-28 12:39:28 +00:00
|
|
|
log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err)
|
|
|
|
case <-sh.closing:
|
|
|
|
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
tw = sh.workTracker.worker(sw.wid, w.Info, w.workerRpc)
|
2021-10-15 19:04:03 +00:00
|
|
|
|
|
|
|
// start tracking work first early in case we need to wait for resources
|
|
|
|
werr := make(chan error, 1)
|
|
|
|
go func() {
|
2022-05-18 13:47:08 +00:00
|
|
|
werr <- req.work(req.Ctx, tw)
|
2021-10-15 19:04:03 +00:00
|
|
|
}()
|
|
|
|
|
2020-10-28 13:23:38 +00:00
|
|
|
// wait (if needed) for resources in the 'active' window
|
2023-05-10 19:43:42 +00:00
|
|
|
err = w.active.withResources(req.SchedId, sw.wid, w.Info, req.SealTask(), needRes, &w.lk, func() error {
|
|
|
|
w.preparing.Free(req.SchedId, req.PrepSealTask(), w.Info.Resources, needResPrep)
|
2020-10-28 12:39:28 +00:00
|
|
|
w.lk.Unlock()
|
2021-09-15 13:34:50 +00:00
|
|
|
defer w.lk.Lock() // we MUST return locked from this function
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2023-05-10 19:43:42 +00:00
|
|
|
// make sure the worker loop sees that the prepare task has finished
|
2020-10-28 12:39:28 +00:00
|
|
|
select {
|
2021-09-15 14:37:27 +00:00
|
|
|
case sw.taskDone <- struct{}{}:
|
2020-10-28 12:39:28 +00:00
|
|
|
case <-sh.closing:
|
2021-10-03 15:09:43 +00:00
|
|
|
default: // there is a notification pending already
|
2020-10-28 12:39:28 +00:00
|
|
|
}
|
|
|
|
|
2020-10-28 13:23:38 +00:00
|
|
|
// Do the work!
|
2021-10-15 19:04:03 +00:00
|
|
|
tw.start()
|
|
|
|
err = <-werr
|
2020-10-28 12:39:28 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
case req.ret <- workerResponse{err: err}:
|
2022-05-18 13:47:08 +00:00
|
|
|
case <-req.Ctx.Done():
|
2020-10-28 12:39:28 +00:00
|
|
|
log.Warnf("request got cancelled before we could respond")
|
|
|
|
case <-sh.closing:
|
|
|
|
log.Warnf("scheduler closed while sending response")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2021-09-15 13:34:50 +00:00
|
|
|
w.lk.Unlock()
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2023-05-10 19:43:42 +00:00
|
|
|
// make sure the worker loop sees that the task has finished
|
|
|
|
select {
|
|
|
|
case sw.taskDone <- struct{}{}:
|
|
|
|
default: // there is a notification pending already
|
|
|
|
}
|
|
|
|
|
2020-10-28 12:39:28 +00:00
|
|
|
// This error should always be nil, since nothing is setting it, but just to be safe:
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("error executing worker (withResources): %+v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
func (sw *schedWorker) startProcessingReadyTask(req *WorkerRequest) error {
|
2021-09-15 14:37:27 +00:00
|
|
|
w, sh := sw.worker, sw.sched
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
needRes := w.Info.Resources.ResourceSpec(req.Sector.ProofType, req.TaskType)
|
2021-09-15 14:37:27 +00:00
|
|
|
|
2023-05-10 19:43:42 +00:00
|
|
|
w.active.Add(req.SchedId, req.SealTask(), w.Info.Resources, needRes)
|
2021-09-15 14:37:27 +00:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
// Do the work!
|
2022-05-18 13:47:08 +00:00
|
|
|
tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRpc)
|
2021-10-15 19:04:03 +00:00
|
|
|
tw.start()
|
2022-05-18 13:47:08 +00:00
|
|
|
err := req.work(req.Ctx, tw)
|
2021-09-15 14:37:27 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
case req.ret <- workerResponse{err: err}:
|
2022-05-18 13:47:08 +00:00
|
|
|
case <-req.Ctx.Done():
|
2021-09-15 14:37:27 +00:00
|
|
|
log.Warnf("request got cancelled before we could respond")
|
|
|
|
case <-sh.closing:
|
|
|
|
log.Warnf("scheduler closed while sending response")
|
|
|
|
}
|
|
|
|
|
|
|
|
w.lk.Lock()
|
|
|
|
|
2023-05-10 19:43:42 +00:00
|
|
|
w.active.Free(req.SchedId, req.SealTask(), w.Info.Resources, needRes)
|
2021-09-15 14:37:27 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
case sw.taskDone <- struct{}{}:
|
|
|
|
case <-sh.closing:
|
|
|
|
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
|
2021-10-03 15:09:43 +00:00
|
|
|
default: // there is a notification pending already
|
2021-09-15 14:37:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
w.lk.Unlock()
|
|
|
|
|
|
|
|
// This error should always be nil, since nothing is setting it, but just to be safe:
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("error executing worker (ready): %+v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
func (sh *Scheduler) workerCleanup(wid storiface.WorkerID, w *WorkerHandle) {
|
2020-10-28 12:39:28 +00:00
|
|
|
select {
|
|
|
|
case <-w.closingMgr:
|
|
|
|
default:
|
|
|
|
close(w.closingMgr)
|
|
|
|
}
|
|
|
|
|
|
|
|
sh.workersLk.Unlock()
|
|
|
|
select {
|
|
|
|
case <-w.closedMgr:
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
log.Errorf("timeout closing worker manager goroutine %d", wid)
|
|
|
|
}
|
|
|
|
sh.workersLk.Lock()
|
|
|
|
|
|
|
|
if !w.cleanupStarted {
|
|
|
|
w.cleanupStarted = true
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
newWindows := make([]*SchedWindowRequest, 0, len(sh.OpenWindows))
|
|
|
|
for _, window := range sh.OpenWindows {
|
|
|
|
if window.Worker != wid {
|
2020-10-28 12:39:28 +00:00
|
|
|
newWindows = append(newWindows, window)
|
|
|
|
}
|
|
|
|
}
|
2022-05-18 13:47:08 +00:00
|
|
|
sh.OpenWindows = newWindows
|
2020-10-28 12:39:28 +00:00
|
|
|
|
2020-11-23 07:07:50 +00:00
|
|
|
log.Debugf("worker %s dropped", wid)
|
2020-10-28 12:39:28 +00:00
|
|
|
}
|
|
|
|
}
|