lotus/storage/sealer/sched_resources.go

186 lines
5.1 KiB
Go
Raw Normal View History

package sealer
2020-07-09 11:49:01 +00:00
import (
2022-11-28 17:02:16 +00:00
"sync"
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
2020-07-09 11:49:01 +00:00
)
2022-05-27 14:15:52 +00:00
type ActiveResources struct {
memUsedMin uint64
memUsedMax uint64
gpuUsed float64
cpuUse uint64
taskCounters map[sealtasks.SealTaskType]int
cond *sync.Cond
waiting int
}
2022-05-27 14:15:52 +00:00
func NewActiveResources() *ActiveResources {
return &ActiveResources{
taskCounters: map[sealtasks.SealTaskType]int{},
}
}
2022-05-27 14:15:52 +00:00
func (a *ActiveResources) withResources(id storiface.WorkerID, wr storiface.WorkerInfo, tt sealtasks.SealTaskType, r storiface.Resources, locker sync.Locker, cb func() error) error {
for !a.CanHandleRequest(tt, r, id, "withResources", wr) {
2020-07-09 11:49:01 +00:00
if a.cond == nil {
a.cond = sync.NewCond(locker)
}
2021-09-15 14:37:27 +00:00
a.waiting++
2020-07-09 11:49:01 +00:00
a.cond.Wait()
2021-09-15 14:37:27 +00:00
a.waiting--
2020-07-09 11:49:01 +00:00
}
a.Add(tt, wr.Resources, r)
2020-07-09 11:49:01 +00:00
err := cb()
a.Free(tt, wr.Resources, r)
2020-07-09 11:49:01 +00:00
return err
}
2021-09-15 14:37:27 +00:00
// must be called with the same lock as the one passed to withResources
2022-05-27 14:15:52 +00:00
func (a *ActiveResources) hasWorkWaiting() bool {
2021-09-15 14:37:27 +00:00
return a.waiting > 0
}
2022-05-27 14:15:52 +00:00
// add task resources to ActiveResources and return utilization difference
func (a *ActiveResources) Add(tt sealtasks.SealTaskType, wr storiface.WorkerResources, r storiface.Resources) float64 {
startUtil := a.utilization(wr)
if r.GPUUtilization > 0 {
a.gpuUsed += r.GPUUtilization
}
a.cpuUse += r.Threads(wr.CPUs, len(wr.GPUs))
2020-07-09 11:49:01 +00:00
a.memUsedMin += r.MinMemory
a.memUsedMax += r.MaxMemory
2022-05-25 15:30:00 +00:00
a.taskCounters[tt]++
return a.utilization(wr) - startUtil
2020-07-09 11:49:01 +00:00
}
2022-05-27 14:15:52 +00:00
func (a *ActiveResources) Free(tt sealtasks.SealTaskType, wr storiface.WorkerResources, r storiface.Resources) {
if r.GPUUtilization > 0 {
a.gpuUsed -= r.GPUUtilization
2020-07-09 11:49:01 +00:00
}
a.cpuUse -= r.Threads(wr.CPUs, len(wr.GPUs))
2020-07-09 11:49:01 +00:00
a.memUsedMin -= r.MinMemory
a.memUsedMax -= r.MaxMemory
a.taskCounters[tt]--
2021-09-15 14:37:27 +00:00
if a.cond != nil {
a.cond.Broadcast()
}
2020-07-09 11:49:01 +00:00
}
// CanHandleRequest evaluates if the worker has enough available resources to
2021-06-21 19:49:16 +00:00
// handle the request.
2022-05-27 14:15:52 +00:00
func (a *ActiveResources) CanHandleRequest(tt sealtasks.SealTaskType, needRes storiface.Resources, wid storiface.WorkerID, caller string, info storiface.WorkerInfo) bool {
if needRes.MaxConcurrent > 0 {
if a.taskCounters[tt] >= needRes.MaxConcurrent {
log.Debugf("sched: not scheduling on worker %s for %s; at task limit tt=%s, curcount=%d", wid, caller, tt, a.taskCounters[tt])
return false
}
}
2021-06-21 19:49:16 +00:00
if info.IgnoreResources {
// shortcircuit; if this worker is ignoring resources, it can always handle the request.
return true
}
2020-07-09 11:49:01 +00:00
2021-06-21 19:49:16 +00:00
res := info.Resources
2020-07-09 11:49:01 +00:00
// TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running)
memNeeded := needRes.MinMemory + needRes.BaseMinMemory
memUsed := a.memUsedMin
// assume that MemUsed can be swapped, so only check it in the vmem Check
memAvail := res.MemPhysical - memUsed
if memNeeded > memAvail {
log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM available", wid, caller, memNeeded/mib, memAvail/mib)
2020-07-09 11:49:01 +00:00
return false
}
vmemNeeded := needRes.MaxMemory + needRes.BaseMinMemory
vmemUsed := a.memUsedMax
workerMemoryReserved := res.MemUsed + res.MemSwapUsed // memory used outside lotus-worker (used by the OS, etc.)
if vmemUsed < workerMemoryReserved {
vmemUsed = workerMemoryReserved
}
vmemAvail := (res.MemPhysical + res.MemSwap) - vmemUsed
2020-07-09 11:49:01 +00:00
if vmemNeeded > vmemAvail {
log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM available", wid, caller, vmemNeeded/mib, vmemAvail/mib)
2020-07-09 11:49:01 +00:00
return false
}
if a.cpuUse+needRes.Threads(res.CPUs, len(res.GPUs)) > res.CPUs {
log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs, len(res.GPUs)), a.cpuUse, res.CPUs)
return false
2020-07-09 11:49:01 +00:00
}
if len(res.GPUs) > 0 && needRes.GPUUtilization > 0 {
if a.gpuUsed+needRes.GPUUtilization > float64(len(res.GPUs)) {
log.Debugf("sched: not scheduling on worker %s for %s; GPU(s) in use", wid, caller)
2020-07-09 11:49:01 +00:00
return false
}
}
return true
}
// utilization returns a number in 0..1 range indicating fraction of used resources
2022-05-27 14:15:52 +00:00
func (a *ActiveResources) utilization(wr storiface.WorkerResources) float64 { // todo task type
2020-07-09 11:49:01 +00:00
var max float64
cpu := float64(a.cpuUse) / float64(wr.CPUs)
max = cpu
memUsed := a.memUsedMin
if memUsed < wr.MemUsed {
memUsed = wr.MemUsed
}
memMin := float64(memUsed) / float64(wr.MemPhysical)
2020-07-09 11:49:01 +00:00
if memMin > max {
max = memMin
}
vmemUsed := a.memUsedMax
if a.memUsedMax < wr.MemUsed+wr.MemSwapUsed {
vmemUsed = wr.MemUsed + wr.MemSwapUsed
}
memMax := float64(vmemUsed) / float64(wr.MemPhysical+wr.MemSwap)
2020-07-09 11:49:01 +00:00
if memMax > max {
max = memMax
}
if len(wr.GPUs) > 0 {
gpuMax := a.gpuUsed / float64(len(wr.GPUs))
if gpuMax > max {
max = gpuMax
}
}
2020-07-09 11:49:01 +00:00
return max
}
func (wh *WorkerHandle) Utilization() float64 {
wh.lk.Lock()
u := wh.active.utilization(wh.Info.Resources)
u += wh.preparing.utilization(wh.Info.Resources)
wh.lk.Unlock()
wh.wndLk.Lock()
for _, window := range wh.activeWindows {
u += window.Allocated.utilization(wh.Info.Resources)
}
wh.wndLk.Unlock()
return u
}