lotus/sched_test.go

426 lines
11 KiB
Go
Raw Normal View History

2020-06-24 21:06:56 +00:00
package sectorstorage
import (
"context"
2020-07-16 21:41:04 +00:00
"io"
"sync"
2020-06-24 21:06:56 +00:00
"testing"
2020-07-16 21:41:04 +00:00
"time"
2020-06-24 21:06:56 +00:00
2020-07-16 21:41:04 +00:00
"github.com/ipfs/go-cid"
2020-06-24 21:06:56 +00:00
"github.com/stretchr/testify/require"
2020-07-16 21:41:04 +00:00
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/fsutil"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
"github.com/filecoin-project/specs-storage/storage"
2020-06-24 21:06:56 +00:00
)
func TestWithPriority(t *testing.T) {
ctx := context.Background()
require.Equal(t, DefaultSchedPriority, getPriority(ctx))
ctx = WithPriority(ctx, 2222)
require.Equal(t, 2222, getPriority(ctx))
}
2020-07-16 21:41:04 +00:00
type schedTestWorker struct {
2020-07-16 21:41:15 +00:00
name string
2020-07-16 21:41:04 +00:00
taskTypes map[sealtasks.TaskType]struct{}
2020-07-16 21:41:15 +00:00
paths []stores.StoragePath
2020-07-16 21:41:04 +00:00
2020-07-16 21:41:15 +00:00
closed bool
2020-07-16 21:41:04 +00:00
closing chan struct{}
}
func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
panic("implement me")
}
func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
panic("implement me")
}
func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
panic("implement me")
}
func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
panic("implement me")
}
func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
panic("implement me")
}
func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
panic("implement me")
}
func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) error {
panic("implement me")
}
func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
panic("implement me")
}
func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
panic("implement me")
}
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error {
panic("implement me")
}
func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
panic("implement me")
}
func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error {
panic("implement me")
}
func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error {
panic("implement me")
}
func (s *schedTestWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
return s.taskTypes, nil
}
func (s *schedTestWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
return s.paths, nil
}
func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) {
return storiface.WorkerInfo{
2020-07-16 21:41:15 +00:00
Hostname: s.name,
2020-07-16 21:41:04 +00:00
Resources: storiface.WorkerResources{
MemPhysical: 128 << 30,
MemSwap: 200 << 30,
MemReserved: 2 << 30,
CPUs: 32,
GPUs: []string{"a GPU"},
},
}, nil
}
func (s *schedTestWorker) Closing(ctx context.Context) (<-chan struct{}, error) {
return s.closing, nil
}
func (s *schedTestWorker) Close() error {
if !s.closed {
s.closed = true
close(s.closing)
}
return nil
}
var _ Worker = &schedTestWorker{}
func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}) {
w := &schedTestWorker{
2020-07-16 21:41:15 +00:00
name: name,
2020-07-16 21:41:04 +00:00
taskTypes: taskTypes,
2020-07-16 21:41:15 +00:00
paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "<octopus>food</octopus>", CanSeal: true, CanStore: true}},
2020-07-16 21:41:04 +00:00
closing: make(chan struct{}),
}
for _, path := range w.paths {
err := index.StorageAttach(context.TODO(), stores.StorageInfo{
2020-07-16 21:41:15 +00:00
ID: path.ID,
URLs: nil,
Weight: path.Weight,
CanSeal: path.CanSeal,
CanStore: path.CanStore,
2020-07-16 21:41:04 +00:00
}, fsutil.FsStat{
Capacity: 1 << 40,
Available: 1 << 40,
Reserved: 3,
})
require.NoError(t, err)
}
info, err := w.Info(context.TODO())
require.NoError(t, err)
sched.newWorkers <- &workerHandle{
w: w,
info: info,
preparing: &activeResources{},
active: &activeResources{},
}
}
func TestSchedStartStop(t *testing.T) {
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
sched := newScheduler(spt)
go sched.runSched()
addTestWorker(t, sched, stores.NewIndex(), "fred", nil)
sched.schedClose()
}
func TestSched(t *testing.T) {
ctx := context.Background()
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
sectorAte := abi.SectorID{
Miner: 8,
Number: 8,
}
type workerSpec struct {
2020-07-16 21:41:15 +00:00
name string
2020-07-16 21:41:04 +00:00
taskTypes map[sealtasks.TaskType]struct{}
}
noopPrepare := func(ctx context.Context, w Worker) error {
return nil
}
type runMeta struct {
done map[string]chan struct{}
wg sync.WaitGroup
}
type task func(*testing.T, *scheduler, *stores.Index, *runMeta)
sched := func(taskName, expectWorker string, taskType sealtasks.TaskType) task {
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
done := make(chan struct{})
rm.done[taskName] = done
sel := newAllocSelector(ctx, index, stores.FTCache, stores.PathSealing)
rm.wg.Add(1)
go func() {
defer rm.wg.Done()
err := sched.Schedule(ctx, sectorAte, taskType, sel, noopPrepare, func(ctx context.Context, w Worker) error {
wi, err := w.Info(ctx)
require.NoError(t, err)
require.Equal(t, expectWorker, wi.Hostname)
log.Info("IN ", taskName)
for {
_, ok := <-done
if !ok {
break
}
}
log.Info("OUT ", taskName)
return nil
})
require.NoError(t, err)
}()
}
}
taskStarted := func(name string) task {
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
rm.done[name] <- struct{}{}
}
}
taskDone := func(name string) task {
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
rm.done[name] <- struct{}{}
close(rm.done[name])
}
}
taskNotScheduled := func(name string) task {
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
select {
case rm.done[name] <- struct{}{}:
t.Fatal("not expected")
case <-time.After(10 * time.Millisecond): // TODO: better synchronization thingy
}
}
}
testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) {
return func(t *testing.T) {
index := stores.NewIndex()
sched := newScheduler(spt)
go sched.runSched()
for _, worker := range workers {
addTestWorker(t, sched, index, worker.name, worker.taskTypes)
}
rm := runMeta{
done: map[string]chan struct{}{},
}
for _, task := range tasks {
task(t, sched, index, &rm)
}
log.Info("wait for async stuff")
rm.wg.Wait()
sched.schedClose()
}
}
multTask := func(tasks ...task) task {
return func(t *testing.T, s *scheduler, index *stores.Index, meta *runMeta) {
for _, tsk := range tasks {
tsk(t, s, index, meta)
}
}
}
t.Run("one-pc1", testFunc([]workerSpec{
2020-07-16 21:41:15 +00:00
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
2020-07-16 21:41:04 +00:00
}, []task{
sched("pc1-1", "fred", sealtasks.TTPreCommit1),
taskDone("pc1-1"),
}))
t.Run("pc1-2workers-1", testFunc([]workerSpec{
2020-07-16 21:41:15 +00:00
{name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}},
{name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
2020-07-16 21:41:04 +00:00
}, []task{
sched("pc1-1", "fred1", sealtasks.TTPreCommit1),
taskDone("pc1-1"),
}))
t.Run("pc1-2workers-2", testFunc([]workerSpec{
2020-07-16 21:41:15 +00:00
{name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
{name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}},
2020-07-16 21:41:04 +00:00
}, []task{
sched("pc1-1", "fred1", sealtasks.TTPreCommit1),
taskDone("pc1-1"),
}))
t.Run("pc1-block-pc2", testFunc([]workerSpec{
2020-07-16 21:41:15 +00:00
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
2020-07-16 21:41:04 +00:00
}, []task{
sched("pc1", "fred", sealtasks.TTPreCommit1),
taskStarted("pc1"),
sched("pc2", "fred", sealtasks.TTPreCommit2),
taskNotScheduled("pc2"),
taskDone("pc1"),
taskDone("pc2"),
}))
t.Run("pc2-block-pc1", testFunc([]workerSpec{
2020-07-16 21:41:15 +00:00
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
2020-07-16 21:41:04 +00:00
}, []task{
sched("pc2", "fred", sealtasks.TTPreCommit2),
taskStarted("pc2"),
sched("pc1", "fred", sealtasks.TTPreCommit1),
taskNotScheduled("pc1"),
taskDone("pc2"),
taskDone("pc1"),
}))
t.Run("pc1-batching", testFunc([]workerSpec{
2020-07-16 21:41:15 +00:00
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
2020-07-16 21:41:04 +00:00
}, []task{
sched("t1", "fred", sealtasks.TTPreCommit1),
taskStarted("t1"),
sched("t2", "fred", sealtasks.TTPreCommit1),
taskStarted("t2"),
// with worker settings, we can only run 2 parallel PC1s
// start 2 more to fill fetch buffer
sched("t3", "fred", sealtasks.TTPreCommit1),
taskNotScheduled("t3"),
sched("t4", "fred", sealtasks.TTPreCommit1),
taskNotScheduled("t4"),
taskDone("t1"),
taskDone("t2"),
taskStarted("t3"),
taskStarted("t4"),
taskDone("t3"),
taskDone("t4"),
}))
twoPC1 := func(prefix string, schedAssert func(name string) task) task {
return multTask(
2020-07-16 21:41:15 +00:00
sched(prefix+"-a", "fred", sealtasks.TTPreCommit1),
schedAssert(prefix+"-a"),
2020-07-16 21:41:04 +00:00
2020-07-16 21:41:15 +00:00
sched(prefix+"-b", "fred", sealtasks.TTPreCommit1),
schedAssert(prefix+"-b"),
)
2020-07-16 21:41:04 +00:00
}
twoPC1Done := func(prefix string) task {
return multTask(
2020-07-16 21:41:15 +00:00
taskDone(prefix+"-1"),
taskDone(prefix+"-b"),
)
2020-07-16 21:41:04 +00:00
}
t.Run("pc1-pc2-prio", testFunc([]workerSpec{
2020-07-16 21:41:15 +00:00
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
2020-07-16 21:41:04 +00:00
}, []task{
// fill exec/fetch buffers
twoPC1("w0", taskStarted),
twoPC1("w1", taskNotScheduled),
// fill worker windows
twoPC1("w2", taskNotScheduled),
twoPC1("w3", taskNotScheduled),
// windowed
sched("t1", "fred", sealtasks.TTPreCommit1),
taskNotScheduled("t1"),
sched("t2", "fred", sealtasks.TTPreCommit1),
taskNotScheduled("t2"),
sched("t3", "fred", sealtasks.TTPreCommit2),
taskNotScheduled("t3"),
twoPC1Done("w0"),
twoPC1Done("w1"),
twoPC1Done("w2"),
twoPC1Done("w3"),
taskStarted("t1"),
taskNotScheduled("t2"),
taskNotScheduled("t3"),
taskDone("t1"),
taskStarted("t2"),
taskStarted("t3"),
taskDone("t2"),
taskDone("t3"),
}))
}