sched: Fix worker reenabling

This commit is contained in:
Łukasz Magiera 2020-10-30 18:01:37 +01:00
parent af1d45d969
commit 774e2ecebf
3 changed files with 78 additions and 16 deletions

View File

@ -10,6 +10,7 @@ import (
"path/filepath"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
@ -376,3 +377,59 @@ func TestRestartWorker(t *testing.T) {
require.NoError(t, err)
require.Empty(t, uf)
}
func TestReenableWorker(t *testing.T) {
logging.SetAllLoggers(logging.LevelDebug)
stores.HeartbeatInterval = 5 * time.Millisecond
ctx, done := context.WithCancel(context.Background())
defer done()
ds := datastore.NewMapDatastore()
m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds)
defer cleanup()
localTasks := []sealtasks.TaskType{
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch,
}
wds := datastore.NewMapDatastore()
arch := make(chan chan apres)
w := newLocalWorker(func() (ffiwrapper.Storage, error) {
return &testExec{apch: arch}, nil
}, WorkerConfig{
SealProof: 0,
TaskTypes: localTasks,
}, stor, lstor, idx, m, statestore.New(wds))
err := m.AddWorker(ctx, w)
require.NoError(t, err)
time.Sleep(time.Millisecond * 100)
// disable
atomic.StoreInt64(&w.testDisable, 1)
for i := 0; i < 100; i++ {
if !m.WorkerStats()[w.session].Enabled {
break
}
time.Sleep(time.Millisecond * 3)
}
require.False(t, m.WorkerStats()[w.session].Enabled)
// reenable
atomic.StoreInt64(&w.testDisable, 0)
for i := 0; i < 100; i++ {
if m.WorkerStats()[w.session].Enabled {
break
}
time.Sleep(time.Millisecond * 3)
}
require.True(t, m.WorkerStats()[w.session].Enabled)
}

View File

@ -104,6 +104,7 @@ func (sw *schedWorker) handleWorker() {
defer sw.heartbeatTimer.Stop()
for {
{
sched.workersLk.Lock()
enabled := worker.enabled
sched.workersLk.Unlock()
@ -114,6 +115,7 @@ func (sw *schedWorker) handleWorker() {
return // graceful shutdown
}
}
}
// wait for more windows to come in, or for tasks to get finished (blocking)
for {
@ -123,13 +125,10 @@ func (sw *schedWorker) handleWorker() {
}
// session looks good
if !enabled {
sched.workersLk.Lock()
worker.enabled = true
sched.workersLk.Unlock()
// we'll send window requests on the next loop
}
sched.workersLk.Unlock()
// wait for more tasks to be assigned by the main scheduler or for the worker
// to finish precessing a task

View File

@ -8,6 +8,7 @@ import (
"reflect"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/elastic/go-sysinfo"
@ -52,6 +53,7 @@ type LocalWorker struct {
running sync.WaitGroup
session uuid.UUID
testDisable int64
closing chan struct{}
}
@ -501,6 +503,10 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) {
}
func (l *LocalWorker) Session(ctx context.Context) (uuid.UUID, error) {
if atomic.LoadInt64(&l.testDisable) == 1 {
return uuid.UUID{}, xerrors.Errorf("disabled")
}
select {
case <-l.closing:
return ClosedWorkerID, nil