Merge remote-tracking branch 'origin/master' into next

This commit is contained in:
Łukasz Magiera 2020-07-01 11:21:05 +02:00
commit a2de752a33
6 changed files with 22 additions and 25 deletions

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit 5bb4a309bce9d446ac618f34a8b9e2883af2002f Subproject commit 5342c7c97d1a1df4650629d14f2823d52889edd9

View File

@ -459,6 +459,10 @@ func openFDs(t *testing.T) int {
if strings.HasPrefix(l, "/dev/nvidia") { if strings.HasPrefix(l, "/dev/nvidia") {
skip++ skip++
} }
if strings.HasPrefix(l, "/var/tmp/filecoin-proof-parameters/") {
skip++
}
} }
return len(dent) - skip return len(dent) - skip

2
go.mod
View File

@ -9,7 +9,7 @@ require (
github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5
github.com/filecoin-project/go-paramfetch v0.0.1 github.com/filecoin-project/go-paramfetch v0.0.1
github.com/filecoin-project/specs-actors v0.6.0 github.com/filecoin-project/specs-actors v0.6.1
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea
github.com/google/uuid v1.1.1 github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.4 github.com/gorilla/mux v1.7.4

4
go.sum
View File

@ -43,8 +43,8 @@ github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go
github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE= github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE=
github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
github.com/filecoin-project/specs-actors v0.6.0 h1:IepUsmDGY60QliENVTkBTAkwqGWw9kNbbHOcU/9oiC0= github.com/filecoin-project/specs-actors v0.6.1 h1:rhHlEzqcuuQU6oKc4csuq+/kQBDZ4EXtSomoN2XApCA=
github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY= github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY=
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=

View File

@ -20,8 +20,6 @@ func (r Resources) MultiThread() bool {
return r.Threads == -1 return r.Threads == -1
} }
const MaxCachingOverhead = 32 << 30
var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{
sealtasks.TTAddPiece: { sealtasks.TTAddPiece: {
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ // This is probably a bit conservative abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ // This is probably a bit conservative
@ -68,27 +66,27 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
sealtasks.TTPreCommit1: { sealtasks.TTPreCommit1: {
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
MaxMemory: 128 << 30, MaxMemory: 128 << 30,
MinMemory: 96 << 30, MinMemory: 112 << 30,
Threads: 1, Threads: 1,
BaseMinMemory: 60 << 30, BaseMinMemory: 10 << 20,
}, },
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{
MaxMemory: 64 << 30, MaxMemory: 64 << 30,
MinMemory: 48 << 30, MinMemory: 56 << 30,
Threads: 1, Threads: 1,
BaseMinMemory: 30 << 30, BaseMinMemory: 10 << 20,
}, },
abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{
MaxMemory: 3 << 29, // 1.5G MaxMemory: 1 << 30,
MinMemory: 1 << 30, MinMemory: 768 << 20,
Threads: 1, Threads: 1,
BaseMinMemory: 1 << 30, BaseMinMemory: 1 << 20,
}, },
abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{
MaxMemory: 2 << 10, MaxMemory: 2 << 10,
@ -195,7 +193,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
}, },
sealtasks.TTCommit2: { sealtasks.TTCommit2: {
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
MaxMemory: 260 << 30, // TODO: Confirm MaxMemory: 190 << 30, // TODO: Confirm
MinMemory: 60 << 30, MinMemory: 60 << 30,
Threads: -1, Threads: -1,

View File

@ -250,7 +250,7 @@ func (sh *scheduler) maybeSchedRequest(req *workerRequest) (bool, error) {
} }
tried++ tried++
if !canHandleRequest(needRes, sh.spt, wid, worker.info.Resources, worker.preparing) { if !canHandleRequest(needRes, wid, worker.info.Resources, worker.preparing) {
continue continue
} }
@ -316,7 +316,7 @@ func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequ
return return
} }
err = w.active.withResources(sh.spt, wid, w.info.Resources, needRes, &sh.workersLk, func() error { err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error {
w.preparing.free(w.info.Resources, needRes) w.preparing.free(w.info.Resources, needRes)
sh.workersLk.Unlock() sh.workersLk.Unlock()
defer sh.workersLk.Lock() // we MUST return locked from this function defer sh.workersLk.Lock() // we MUST return locked from this function
@ -350,8 +350,8 @@ func (sh *scheduler) assignWorker(wid WorkerID, w *workerHandle, req *workerRequ
return nil return nil
} }
func (a *activeResources) withResources(spt abi.RegisteredSealProof, id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error {
for !canHandleRequest(r, spt, id, wr, a) { for !canHandleRequest(r, id, wr, a) {
if a.cond == nil { if a.cond == nil {
a.cond = sync.NewCond(locker) a.cond = sync.NewCond(locker)
} }
@ -396,7 +396,7 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) {
a.memUsedMax -= r.MaxMemory a.memUsedMax -= r.MaxMemory
} }
func canHandleRequest(needRes Resources, spt abi.RegisteredSealProof, wid WorkerID, res storiface.WorkerResources, active *activeResources) bool { func canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources, active *activeResources) bool {
// TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running)
minNeedMem := res.MemReserved + active.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory minNeedMem := res.MemReserved + active.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory
@ -406,12 +406,7 @@ func canHandleRequest(needRes Resources, spt abi.RegisteredSealProof, wid Worker
} }
maxNeedMem := res.MemReserved + active.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory maxNeedMem := res.MemReserved + active.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory
if spt == abi.RegisteredSealProof_StackedDrg32GiBV1 {
maxNeedMem += MaxCachingOverhead
}
if spt == abi.RegisteredSealProof_StackedDrg64GiBV1 {
maxNeedMem += MaxCachingOverhead * 2 // ewwrhmwh
}
if maxNeedMem > res.MemSwap+res.MemPhysical { if maxNeedMem > res.MemSwap+res.MemPhysical {
log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
return false return false