2022-08-29 14:25:30 +00:00
|
|
|
// stm: #unit
|
2022-06-14 18:03:38 +00:00
|
|
|
package sealer
|
2020-06-24 21:06:56 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-07-16 23:26:55 +00:00
|
|
|
"fmt"
|
2020-07-16 21:41:04 +00:00
|
|
|
"io"
|
2020-07-16 23:26:55 +00:00
|
|
|
"runtime"
|
2020-08-28 19:38:21 +00:00
|
|
|
"sort"
|
2020-07-16 21:41:04 +00:00
|
|
|
"sync"
|
2020-06-24 21:06:56 +00:00
|
|
|
"testing"
|
2020-07-16 21:41:04 +00:00
|
|
|
"time"
|
2020-06-24 21:06:56 +00:00
|
|
|
|
2020-10-18 10:35:44 +00:00
|
|
|
"github.com/google/uuid"
|
2020-07-16 21:41:04 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
2020-08-24 17:13:36 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2020-06-24 21:06:56 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-09-07 03:49:10 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2022-06-14 15:00:51 +00:00
|
|
|
prooftypes "github.com/filecoin-project/go-state-types/proof"
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2022-11-28 17:02:16 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2022-06-14 18:25:52 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/paths"
|
2022-06-14 18:03:38 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
2022-06-15 10:06:22 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
2020-06-24 21:06:56 +00:00
|
|
|
)
|
|
|
|
|
2020-08-28 14:33:41 +00:00
|
|
|
func init() {
|
|
|
|
InitWait = 10 * time.Millisecond
|
|
|
|
}
|
|
|
|
|
2020-06-24 21:06:56 +00:00
|
|
|
func TestWithPriority(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
require.Equal(t, DefaultSchedPriority, getPriority(ctx))
|
|
|
|
|
|
|
|
ctx = WithPriority(ctx, 2222)
|
|
|
|
|
|
|
|
require.Equal(t, 2222, getPriority(ctx))
|
|
|
|
}
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2021-06-21 19:28:15 +00:00
|
|
|
var decentWorkerResources = storiface.WorkerResources{
|
|
|
|
MemPhysical: 128 << 30,
|
|
|
|
MemSwap: 200 << 30,
|
2021-09-09 21:41:59 +00:00
|
|
|
MemUsed: 1 << 30,
|
|
|
|
MemSwapUsed: 1 << 30,
|
2021-06-21 19:28:15 +00:00
|
|
|
CPUs: 32,
|
2021-11-29 14:25:01 +00:00
|
|
|
GPUs: []string{},
|
2021-06-21 19:28:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var constrainedWorkerResources = storiface.WorkerResources{
|
|
|
|
MemPhysical: 1 << 30,
|
2021-09-09 21:41:59 +00:00
|
|
|
MemUsed: 1 << 30,
|
|
|
|
MemSwapUsed: 1 << 30,
|
2021-06-21 19:28:15 +00:00
|
|
|
CPUs: 1,
|
|
|
|
}
|
|
|
|
|
2020-07-16 21:41:04 +00:00
|
|
|
type schedTestWorker struct {
|
2020-07-16 21:41:15 +00:00
|
|
|
name string
|
2020-07-16 21:41:04 +00:00
|
|
|
taskTypes map[sealtasks.TaskType]struct{}
|
2022-01-18 10:57:04 +00:00
|
|
|
paths []storiface.StoragePath
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-07-16 21:41:15 +00:00
|
|
|
closed bool
|
2020-10-18 10:35:44 +00:00
|
|
|
session uuid.UUID
|
2021-06-21 19:28:15 +00:00
|
|
|
|
|
|
|
resources storiface.WorkerResources
|
|
|
|
ignoreResources bool
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
|
2022-09-16 21:45:23 +00:00
|
|
|
func (s *schedTestWorker) DownloadSectorData(ctx context.Context, sector storiface.SectorRef, finalized bool, src map[storiface.SectorFileType]storiface.SectorLocation) (storiface.CallID, error) {
|
2022-09-02 12:19:29 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (storiface.CallID, error) {
|
2022-04-26 19:43:24 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector storiface.SectorRef, pc1o storiface.PreCommit1Out) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) SealCommit1(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storiface.SectorCids) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) SealCommit2(ctx context.Context, sector storiface.SectorRef, c1o storiface.Commit1Out) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-11-16 17:11:20 +00:00
|
|
|
func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector storiface.SectorRef) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-11-16 17:11:20 +00:00
|
|
|
func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector storiface.SectorRef, keepUnsealed []storiface.Range) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) Remove(ctx context.Context, sector storiface.SectorRef) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) NewSector(ctx context.Context, sector storiface.SectorRef) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) AddPiece(ctx context.Context, sector storiface.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) ReplicaUpdate(ctx context.Context, sector storiface.SectorRef, peices []abi.PieceInfo) (storiface.CallID, error) {
|
2021-11-10 18:53:00 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) ProveReplicaUpdate1(ctx context.Context, sector storiface.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) {
|
2021-11-10 18:53:00 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) ProveReplicaUpdate2(ctx context.Context, sector storiface.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storiface.ReplicaVanillaProofs) (storiface.CallID, error) {
|
2021-11-10 18:53:00 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) GenerateSectorKeyFromData(ctx context.Context, sector storiface.SectorRef, commD cid.Cid) (storiface.CallID, error) {
|
2021-12-01 19:01:55 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-11-16 17:11:20 +00:00
|
|
|
func (s *schedTestWorker) FinalizeReplicaUpdate(ctx context.Context, sector storiface.SectorRef) (storiface.CallID, error) {
|
2022-02-08 16:03:51 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storiface.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) Fetch(ctx context.Context, id storiface.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) UnsealPiece(ctx context.Context, id storiface.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id storiface.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-04-20 21:34:28 +00:00
|
|
|
func (s *schedTestWorker) GenerateWinningPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, randomness abi.PoStRandomness) ([]prooftypes.PoStProof, error) {
|
2021-07-27 03:15:53 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
func (s *schedTestWorker) GenerateWindowPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, partitionIdx int, randomness abi.PoStRandomness) (storiface.WindowPoStResult, error) {
|
2021-07-27 03:15:53 +00:00
|
|
|
panic("implement me")
|
|
|
|
}
|
|
|
|
|
2020-07-16 21:41:04 +00:00
|
|
|
func (s *schedTestWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
|
|
|
return s.taskTypes, nil
|
|
|
|
}
|
|
|
|
|
2022-01-18 10:57:04 +00:00
|
|
|
func (s *schedTestWorker) Paths(ctx context.Context) ([]storiface.StoragePath, error) {
|
2020-07-16 21:41:04 +00:00
|
|
|
return s.paths, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) {
|
|
|
|
return storiface.WorkerInfo{
|
2021-06-21 19:28:15 +00:00
|
|
|
Hostname: s.name,
|
|
|
|
IgnoreResources: s.ignoreResources,
|
|
|
|
Resources: s.resources,
|
2020-07-16 21:41:04 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2020-10-18 10:35:44 +00:00
|
|
|
func (s *schedTestWorker) Session(context.Context) (uuid.UUID, error) {
|
|
|
|
return s.session, nil
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *schedTestWorker) Close() error {
|
|
|
|
if !s.closed {
|
2020-07-17 10:59:12 +00:00
|
|
|
log.Info("close schedTestWorker")
|
2020-07-16 21:41:04 +00:00
|
|
|
s.closed = true
|
2020-10-18 10:35:44 +00:00
|
|
|
s.session = uuid.UUID{}
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var _ Worker = &schedTestWorker{}
|
|
|
|
|
2023-11-15 13:59:35 +00:00
|
|
|
func addTestWorker(t *testing.T, sched *Scheduler, index *paths.MemIndex, name string, taskTypes map[sealtasks.TaskType]struct{}, resources storiface.WorkerResources, ignoreResources bool) {
|
2020-07-16 21:41:04 +00:00
|
|
|
w := &schedTestWorker{
|
2020-07-16 21:41:15 +00:00
|
|
|
name: name,
|
2020-07-16 21:41:04 +00:00
|
|
|
taskTypes: taskTypes,
|
2022-01-18 10:57:04 +00:00
|
|
|
paths: []storiface.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "<octopus>food</octopus>", CanSeal: true, CanStore: true}},
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-10-18 10:35:44 +00:00
|
|
|
session: uuid.New(),
|
2021-06-21 19:28:15 +00:00
|
|
|
|
|
|
|
resources: resources,
|
|
|
|
ignoreResources: ignoreResources,
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, path := range w.paths {
|
2022-01-18 10:57:04 +00:00
|
|
|
err := index.StorageAttach(context.TODO(), storiface.StorageInfo{
|
2020-07-16 21:41:15 +00:00
|
|
|
ID: path.ID,
|
|
|
|
URLs: nil,
|
|
|
|
Weight: path.Weight,
|
|
|
|
CanSeal: path.CanSeal,
|
|
|
|
CanStore: path.CanStore,
|
2020-07-16 21:41:04 +00:00
|
|
|
}, fsutil.FsStat{
|
2021-02-18 15:44:34 +00:00
|
|
|
Capacity: 1 << 40,
|
|
|
|
Available: 1 << 40,
|
|
|
|
FSAvailable: 1 << 40,
|
|
|
|
Reserved: 3,
|
2020-07-16 21:41:04 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
sessID, err := w.Session(context.TODO())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
wid := storiface.WorkerID(sessID)
|
|
|
|
|
|
|
|
wh, err := newWorkerHandle(context.TODO(), w)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NoError(t, sched.runWorker(context.TODO(), wid, wh))
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestSchedStartStop(t *testing.T) {
|
2022-11-28 17:56:28 +00:00
|
|
|
sched, err := newScheduler(context.Background(), "")
|
2022-05-23 14:58:43 +00:00
|
|
|
require.NoError(t, err)
|
2020-07-16 21:41:04 +00:00
|
|
|
go sched.runSched()
|
|
|
|
|
2023-11-15 13:59:35 +00:00
|
|
|
addTestWorker(t, sched, paths.NewMemIndex(nil), "fred", nil, decentWorkerResources, false)
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-07-17 10:59:12 +00:00
|
|
|
require.NoError(t, sched.Close(context.TODO()))
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestSched(t *testing.T) {
|
2021-12-15 14:30:42 +00:00
|
|
|
//stm: @WORKER_JOBS_001
|
2021-11-29 14:25:01 +00:00
|
|
|
storiface.ParallelNum = 1
|
|
|
|
storiface.ParallelDenom = 1
|
|
|
|
|
2020-07-17 10:59:12 +00:00
|
|
|
ctx, done := context.WithTimeout(context.Background(), 30*time.Second)
|
2020-07-16 23:26:55 +00:00
|
|
|
defer done()
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
|
2020-07-16 21:41:04 +00:00
|
|
|
|
|
|
|
type workerSpec struct {
|
2020-07-16 21:41:15 +00:00
|
|
|
name string
|
2020-07-16 21:41:04 +00:00
|
|
|
taskTypes map[sealtasks.TaskType]struct{}
|
2021-06-21 19:28:15 +00:00
|
|
|
|
|
|
|
resources storiface.WorkerResources
|
|
|
|
ignoreResources bool
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 23:32:49 +00:00
|
|
|
noopAction := func(ctx context.Context, w Worker) error {
|
2020-07-16 21:41:04 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type runMeta struct {
|
|
|
|
done map[string]chan struct{}
|
|
|
|
|
|
|
|
wg sync.WaitGroup
|
|
|
|
}
|
|
|
|
|
2023-11-15 13:59:35 +00:00
|
|
|
type task func(*testing.T, *Scheduler, *paths.MemIndex, *runMeta)
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
sched := func(taskName, expectWorker string, sid abi.SectorNumber, taskType sealtasks.TaskType) task {
|
|
|
|
_, _, l, _ := runtime.Caller(1)
|
|
|
|
_, _, l2, _ := runtime.Caller(2)
|
|
|
|
|
2023-11-15 13:59:35 +00:00
|
|
|
return func(t *testing.T, sched *Scheduler, index *paths.MemIndex, rm *runMeta) {
|
2020-07-16 21:41:04 +00:00
|
|
|
done := make(chan struct{})
|
|
|
|
rm.done[taskName] = done
|
|
|
|
|
2024-04-01 18:19:00 +00:00
|
|
|
sel := newAllocSelector(index, storiface.FTCache, storiface.PathSealing, abi.ActorID(1000))
|
2020-07-16 21:41:04 +00:00
|
|
|
|
|
|
|
rm.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer rm.wg.Done()
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
sectorRef := storiface.SectorRef{
|
2020-11-05 06:34:24 +00:00
|
|
|
ID: abi.SectorID{
|
|
|
|
Miner: 8,
|
|
|
|
Number: sid,
|
|
|
|
},
|
|
|
|
ProofType: spt,
|
2020-07-16 23:26:55 +00:00
|
|
|
}
|
|
|
|
|
2023-02-28 09:33:15 +00:00
|
|
|
prep := PrepareAction{
|
|
|
|
Action: func(ctx context.Context, w Worker) error {
|
|
|
|
wi, err := w.Info(ctx)
|
|
|
|
require.NoError(t, err)
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2023-02-28 09:33:15 +00:00
|
|
|
require.Equal(t, expectWorker, wi.Hostname)
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2023-02-28 09:33:15 +00:00
|
|
|
log.Info("IN ", taskName)
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2023-02-28 09:33:15 +00:00
|
|
|
for {
|
|
|
|
_, ok := <-done
|
|
|
|
if !ok {
|
|
|
|
break
|
|
|
|
}
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
|
2023-02-28 09:33:15 +00:00
|
|
|
log.Info("OUT ", taskName)
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2023-02-28 09:33:15 +00:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
PrepType: taskType,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := sched.Schedule(ctx, sectorRef, taskType, sel, prep, noopAction)
|
2021-11-29 14:25:01 +00:00
|
|
|
if err != context.Canceled {
|
|
|
|
require.NoError(t, err, fmt.Sprint(l, l2))
|
|
|
|
}
|
2020-07-16 21:41:04 +00:00
|
|
|
}()
|
2020-07-16 23:26:55 +00:00
|
|
|
|
|
|
|
<-sched.testSync
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
taskStarted := func(name string) task {
|
2020-07-16 23:26:55 +00:00
|
|
|
_, _, l, _ := runtime.Caller(1)
|
|
|
|
_, _, l2, _ := runtime.Caller(2)
|
2023-11-15 13:59:35 +00:00
|
|
|
return func(t *testing.T, sched *Scheduler, index *paths.MemIndex, rm *runMeta) {
|
2020-07-16 23:26:55 +00:00
|
|
|
select {
|
|
|
|
case rm.done[name] <- struct{}{}:
|
|
|
|
case <-ctx.Done():
|
|
|
|
t.Fatal("ctx error", ctx.Err(), l, l2)
|
|
|
|
}
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
taskDone := func(name string) task {
|
2020-07-16 23:26:55 +00:00
|
|
|
_, _, l, _ := runtime.Caller(1)
|
|
|
|
_, _, l2, _ := runtime.Caller(2)
|
2023-11-15 13:59:35 +00:00
|
|
|
return func(t *testing.T, sched *Scheduler, index *paths.MemIndex, rm *runMeta) {
|
2020-07-16 23:26:55 +00:00
|
|
|
select {
|
|
|
|
case rm.done[name] <- struct{}{}:
|
|
|
|
case <-ctx.Done():
|
|
|
|
t.Fatal("ctx error", ctx.Err(), l, l2)
|
|
|
|
}
|
2020-07-16 21:41:04 +00:00
|
|
|
close(rm.done[name])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
taskNotScheduled := func(name string) task {
|
2020-07-16 23:26:55 +00:00
|
|
|
_, _, l, _ := runtime.Caller(1)
|
|
|
|
_, _, l2, _ := runtime.Caller(2)
|
2023-11-15 13:59:35 +00:00
|
|
|
return func(t *testing.T, sched *Scheduler, index *paths.MemIndex, rm *runMeta) {
|
2020-07-16 21:41:04 +00:00
|
|
|
select {
|
|
|
|
case rm.done[name] <- struct{}{}:
|
2020-07-16 23:26:55 +00:00
|
|
|
t.Fatal("not expected", l, l2)
|
2020-07-16 21:41:04 +00:00
|
|
|
case <-time.After(10 * time.Millisecond): // TODO: better synchronization thingy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) {
|
|
|
|
return func(t *testing.T) {
|
2023-11-15 13:59:35 +00:00
|
|
|
index := paths.NewMemIndex(nil)
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2022-11-28 17:56:28 +00:00
|
|
|
sched, err := newScheduler(ctx, "")
|
2022-05-23 14:58:43 +00:00
|
|
|
require.NoError(t, err)
|
2020-07-16 23:26:55 +00:00
|
|
|
sched.testSync = make(chan struct{})
|
|
|
|
|
2020-07-16 21:41:04 +00:00
|
|
|
go sched.runSched()
|
|
|
|
|
|
|
|
for _, worker := range workers {
|
2021-06-21 19:28:15 +00:00
|
|
|
addTestWorker(t, sched, index, worker.name, worker.taskTypes, worker.resources, worker.ignoreResources)
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rm := runMeta{
|
|
|
|
done: map[string]chan struct{}{},
|
|
|
|
}
|
|
|
|
|
2020-08-28 19:38:21 +00:00
|
|
|
for i, task := range tasks {
|
|
|
|
log.Info("TASK", i)
|
2020-07-16 21:41:04 +00:00
|
|
|
task(t, sched, index, &rm)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("wait for async stuff")
|
|
|
|
rm.wg.Wait()
|
|
|
|
|
2020-07-17 10:59:12 +00:00
|
|
|
require.NoError(t, sched.Close(context.TODO()))
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
multTask := func(tasks ...task) task {
|
2023-11-15 13:59:35 +00:00
|
|
|
return func(t *testing.T, s *Scheduler, index *paths.MemIndex, meta *runMeta) {
|
2020-07-16 21:41:04 +00:00
|
|
|
for _, tsk := range tasks {
|
|
|
|
tsk(t, s, index, meta)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-21 19:49:24 +00:00
|
|
|
// checks behaviour with workers with constrained resources
|
|
|
|
// the first one is not ignoring resource constraints, so we assign to the second worker, who is
|
|
|
|
t.Run("constrained-resources", testFunc([]workerSpec{
|
|
|
|
{name: "fred1", resources: constrainedWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
|
|
|
|
{name: "fred2", resources: constrainedWorkerResources, ignoreResources: true, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
|
2021-06-21 19:28:15 +00:00
|
|
|
}, []task{
|
2021-06-21 19:49:24 +00:00
|
|
|
sched("pc1-1", "fred2", 8, sealtasks.TTPreCommit1),
|
2021-06-21 19:28:15 +00:00
|
|
|
taskStarted("pc1-1"),
|
2021-06-21 19:49:24 +00:00
|
|
|
taskDone("pc1-1"),
|
2021-06-21 19:28:15 +00:00
|
|
|
}))
|
|
|
|
|
2020-07-16 21:41:04 +00:00
|
|
|
t.Run("one-pc1", testFunc([]workerSpec{
|
2021-06-21 19:28:15 +00:00
|
|
|
{name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
|
2020-07-16 21:41:04 +00:00
|
|
|
}, []task{
|
2020-07-16 23:26:55 +00:00
|
|
|
sched("pc1-1", "fred", 8, sealtasks.TTPreCommit1),
|
2020-07-16 21:41:04 +00:00
|
|
|
taskDone("pc1-1"),
|
|
|
|
}))
|
|
|
|
|
|
|
|
t.Run("pc1-2workers-1", testFunc([]workerSpec{
|
2021-06-21 19:28:15 +00:00
|
|
|
{name: "fred2", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}},
|
|
|
|
{name: "fred1", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
|
2020-07-16 21:41:04 +00:00
|
|
|
}, []task{
|
2020-07-16 23:26:55 +00:00
|
|
|
sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1),
|
2020-07-16 21:41:04 +00:00
|
|
|
taskDone("pc1-1"),
|
|
|
|
}))
|
|
|
|
|
|
|
|
t.Run("pc1-2workers-2", testFunc([]workerSpec{
|
2021-06-21 19:28:15 +00:00
|
|
|
{name: "fred1", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
|
|
|
|
{name: "fred2", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}},
|
2020-07-16 21:41:04 +00:00
|
|
|
}, []task{
|
2020-07-16 23:26:55 +00:00
|
|
|
sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1),
|
2020-07-16 21:41:04 +00:00
|
|
|
taskDone("pc1-1"),
|
|
|
|
}))
|
|
|
|
|
|
|
|
t.Run("pc1-block-pc2", testFunc([]workerSpec{
|
2021-06-21 19:28:15 +00:00
|
|
|
{name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
|
2020-07-16 21:41:04 +00:00
|
|
|
}, []task{
|
2020-07-16 23:26:55 +00:00
|
|
|
sched("pc1", "fred", 8, sealtasks.TTPreCommit1),
|
2020-07-16 21:41:04 +00:00
|
|
|
taskStarted("pc1"),
|
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
sched("pc2", "fred", 8, sealtasks.TTPreCommit2),
|
2020-07-16 21:41:04 +00:00
|
|
|
taskNotScheduled("pc2"),
|
|
|
|
|
|
|
|
taskDone("pc1"),
|
|
|
|
taskDone("pc2"),
|
|
|
|
}))
|
|
|
|
|
|
|
|
t.Run("pc2-block-pc1", testFunc([]workerSpec{
|
2021-06-21 19:28:15 +00:00
|
|
|
{name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
|
2020-07-16 21:41:04 +00:00
|
|
|
}, []task{
|
2020-07-16 23:26:55 +00:00
|
|
|
sched("pc2", "fred", 8, sealtasks.TTPreCommit2),
|
2020-07-16 21:41:04 +00:00
|
|
|
taskStarted("pc2"),
|
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
sched("pc1", "fred", 8, sealtasks.TTPreCommit1),
|
2020-07-16 21:41:04 +00:00
|
|
|
taskNotScheduled("pc1"),
|
|
|
|
|
|
|
|
taskDone("pc2"),
|
|
|
|
taskDone("pc1"),
|
|
|
|
}))
|
|
|
|
|
|
|
|
t.Run("pc1-batching", testFunc([]workerSpec{
|
2021-06-21 19:28:15 +00:00
|
|
|
{name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
|
2020-07-16 21:41:04 +00:00
|
|
|
}, []task{
|
2020-07-16 23:26:55 +00:00
|
|
|
sched("t1", "fred", 8, sealtasks.TTPreCommit1),
|
2020-07-16 21:41:04 +00:00
|
|
|
taskStarted("t1"),
|
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
sched("t2", "fred", 8, sealtasks.TTPreCommit1),
|
2020-07-16 21:41:04 +00:00
|
|
|
taskStarted("t2"),
|
|
|
|
|
|
|
|
// with worker settings, we can only run 2 parallel PC1s
|
|
|
|
|
|
|
|
// start 2 more to fill fetch buffer
|
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
sched("t3", "fred", 8, sealtasks.TTPreCommit1),
|
2020-07-16 21:41:04 +00:00
|
|
|
taskNotScheduled("t3"),
|
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
sched("t4", "fred", 8, sealtasks.TTPreCommit1),
|
2020-07-16 21:41:04 +00:00
|
|
|
taskNotScheduled("t4"),
|
|
|
|
|
|
|
|
taskDone("t1"),
|
|
|
|
taskDone("t2"),
|
|
|
|
|
|
|
|
taskStarted("t3"),
|
|
|
|
taskStarted("t4"),
|
|
|
|
|
|
|
|
taskDone("t3"),
|
|
|
|
taskDone("t4"),
|
|
|
|
}))
|
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
twoPC1 := func(prefix string, sid abi.SectorNumber, schedAssert func(name string) task) task {
|
2020-07-16 21:41:04 +00:00
|
|
|
return multTask(
|
2020-07-16 23:26:55 +00:00
|
|
|
sched(prefix+"-a", "fred", sid, sealtasks.TTPreCommit1),
|
2020-07-16 21:41:15 +00:00
|
|
|
schedAssert(prefix+"-a"),
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-07-16 23:32:49 +00:00
|
|
|
sched(prefix+"-b", "fred", sid+1, sealtasks.TTPreCommit1),
|
2020-07-16 21:41:15 +00:00
|
|
|
schedAssert(prefix+"-b"),
|
|
|
|
)
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
twoPC1Act := func(prefix string, schedAssert func(name string) task) task {
|
2020-07-16 21:41:04 +00:00
|
|
|
return multTask(
|
2020-07-16 23:26:55 +00:00
|
|
|
schedAssert(prefix+"-a"),
|
|
|
|
schedAssert(prefix+"-b"),
|
2020-07-16 21:41:15 +00:00
|
|
|
)
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
|
|
|
|
2020-08-28 19:38:21 +00:00
|
|
|
diag := func() task {
|
2023-11-15 13:59:35 +00:00
|
|
|
return func(t *testing.T, s *Scheduler, index *paths.MemIndex, meta *runMeta) {
|
2020-08-28 19:38:21 +00:00
|
|
|
time.Sleep(20 * time.Millisecond)
|
|
|
|
for _, request := range s.diag().Requests {
|
|
|
|
log.Infof("!!! sDIAG: sid(%d) task(%s)", request.Sector.Number, request.TaskType)
|
|
|
|
}
|
|
|
|
|
|
|
|
wj := (&Manager{sched: s}).WorkerJobs()
|
|
|
|
|
|
|
|
type line struct {
|
|
|
|
storiface.WorkerJob
|
2020-10-18 10:35:44 +00:00
|
|
|
wid uuid.UUID
|
2020-08-28 19:38:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
lines := make([]line, 0)
|
|
|
|
|
|
|
|
for wid, jobs := range wj {
|
|
|
|
for _, job := range jobs {
|
|
|
|
lines = append(lines, line{
|
|
|
|
WorkerJob: job,
|
2020-10-18 10:35:44 +00:00
|
|
|
wid: wid,
|
2020-08-28 19:38:21 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// oldest first
|
|
|
|
sort.Slice(lines, func(i, j int) bool {
|
|
|
|
if lines[i].RunWait != lines[j].RunWait {
|
|
|
|
return lines[i].RunWait < lines[j].RunWait
|
|
|
|
}
|
|
|
|
return lines[i].Start.Before(lines[j].Start)
|
|
|
|
})
|
|
|
|
|
|
|
|
for _, l := range lines {
|
|
|
|
log.Infof("!!! wDIAG: rw(%d) sid(%d) t(%s)", l.RunWait, l.Sector.Number, l.Task)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-16 23:32:49 +00:00
|
|
|
// run this one a bunch of times, it had a very annoying tendency to fail randomly
|
|
|
|
for i := 0; i < 40; i++ {
|
2020-07-16 23:26:55 +00:00
|
|
|
t.Run("pc1-pc2-prio", testFunc([]workerSpec{
|
2021-06-21 19:28:15 +00:00
|
|
|
{name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
|
2020-07-16 23:26:55 +00:00
|
|
|
}, []task{
|
2020-07-16 23:32:49 +00:00
|
|
|
// fill queues
|
2020-07-16 23:26:55 +00:00
|
|
|
twoPC1("w0", 0, taskStarted),
|
|
|
|
twoPC1("w1", 2, taskNotScheduled),
|
2020-08-28 19:38:21 +00:00
|
|
|
sched("w2", "fred", 4, sealtasks.TTPreCommit1),
|
|
|
|
taskNotScheduled("w2"),
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
// windowed
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-07-16 23:32:49 +00:00
|
|
|
sched("t1", "fred", 8, sealtasks.TTPreCommit1),
|
2020-07-16 23:26:55 +00:00
|
|
|
taskNotScheduled("t1"),
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-07-16 23:32:49 +00:00
|
|
|
sched("t2", "fred", 9, sealtasks.TTPreCommit1),
|
2020-07-16 23:26:55 +00:00
|
|
|
taskNotScheduled("t2"),
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-07-16 23:32:49 +00:00
|
|
|
sched("t3", "fred", 10, sealtasks.TTPreCommit2),
|
2020-07-16 23:26:55 +00:00
|
|
|
taskNotScheduled("t3"),
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-08-28 19:38:21 +00:00
|
|
|
diag(),
|
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
twoPC1Act("w0", taskDone),
|
|
|
|
twoPC1Act("w1", taskStarted),
|
2020-08-28 19:38:21 +00:00
|
|
|
taskNotScheduled("w2"),
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
twoPC1Act("w1", taskDone),
|
2020-08-28 19:38:21 +00:00
|
|
|
taskStarted("w2"),
|
|
|
|
|
|
|
|
taskDone("w2"),
|
|
|
|
|
|
|
|
diag(),
|
2020-07-16 21:41:04 +00:00
|
|
|
|
2020-07-16 23:26:55 +00:00
|
|
|
taskStarted("t3"),
|
|
|
|
taskNotScheduled("t1"),
|
|
|
|
taskNotScheduled("t2"),
|
|
|
|
|
|
|
|
taskDone("t3"),
|
|
|
|
|
|
|
|
taskStarted("t1"),
|
|
|
|
taskStarted("t2"),
|
|
|
|
|
|
|
|
taskDone("t1"),
|
|
|
|
taskDone("t2"),
|
|
|
|
}))
|
|
|
|
}
|
2020-07-16 21:41:04 +00:00
|
|
|
}
|
2020-08-24 17:13:36 +00:00
|
|
|
|
|
|
|
type slowishSelector bool
|
|
|
|
|
2022-11-28 17:02:16 +00:00
|
|
|
func (s slowishSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, a SchedWorker) (bool, bool, error) {
|
2022-11-29 10:45:31 +00:00
|
|
|
// note: we don't care about output here, just the time those calls take
|
|
|
|
// (selector Ok/Cmp is called in the scheduler)
|
2022-11-28 17:02:16 +00:00
|
|
|
_, _ = a.Paths(ctx)
|
|
|
|
_, _ = a.TaskTypes(ctx)
|
2022-05-23 21:27:28 +00:00
|
|
|
return bool(s), false, nil
|
2020-08-24 17:13:36 +00:00
|
|
|
}
|
|
|
|
|
2022-11-28 17:02:16 +00:00
|
|
|
func (s slowishSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b SchedWorker) (bool, error) {
|
2022-11-29 10:45:31 +00:00
|
|
|
// note: we don't care about output here, just the time those calls take
|
|
|
|
// (selector Ok/Cmp is called in the scheduler)
|
2022-11-28 17:02:16 +00:00
|
|
|
_, _ = a.Paths(ctx)
|
2020-08-24 17:13:36 +00:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var _ WorkerSelector = slowishSelector(true)
|
|
|
|
|
2022-11-28 17:02:16 +00:00
|
|
|
type tw struct {
|
|
|
|
api.Worker
|
|
|
|
io.Closer
|
|
|
|
}
|
|
|
|
|
2020-08-24 17:13:36 +00:00
|
|
|
func BenchmarkTrySched(b *testing.B) {
|
|
|
|
logging.SetAllLoggers(logging.LevelInfo)
|
|
|
|
defer logging.SetAllLoggers(logging.LevelDebug)
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
test := func(windows, queue int) func(b *testing.B) {
|
|
|
|
return func(b *testing.B) {
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
b.StopTimer()
|
|
|
|
|
2022-11-28 17:02:16 +00:00
|
|
|
var whnd api.WorkerStruct
|
|
|
|
whnd.Internal.TaskTypes = func(p0 context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
|
|
|
time.Sleep(100 * time.Microsecond)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
whnd.Internal.Paths = func(p0 context.Context) ([]storiface.StoragePath, error) {
|
|
|
|
time.Sleep(100 * time.Microsecond)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2022-11-28 17:56:28 +00:00
|
|
|
sched, err := newScheduler(ctx, "")
|
2022-05-23 14:58:43 +00:00
|
|
|
require.NoError(b, err)
|
2022-05-18 13:47:08 +00:00
|
|
|
sched.Workers[storiface.WorkerID{}] = &WorkerHandle{
|
2022-11-28 17:02:16 +00:00
|
|
|
workerRpc: &tw{Worker: &whnd},
|
2022-05-18 13:47:08 +00:00
|
|
|
Info: storiface.WorkerInfo{
|
2020-08-24 17:13:36 +00:00
|
|
|
Hostname: "t",
|
|
|
|
Resources: decentWorkerResources,
|
|
|
|
},
|
2022-11-28 17:02:16 +00:00
|
|
|
Enabled: true,
|
2023-02-28 08:02:18 +00:00
|
|
|
preparing: NewActiveResources(newTaskCounter()),
|
|
|
|
active: NewActiveResources(newTaskCounter()),
|
2020-08-24 17:13:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < windows; i++ {
|
2022-05-18 13:47:08 +00:00
|
|
|
sched.OpenWindows = append(sched.OpenWindows, &SchedWindowRequest{
|
|
|
|
Worker: storiface.WorkerID{},
|
|
|
|
Done: make(chan *SchedWindow, 1000),
|
2020-08-24 17:13:36 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < queue; i++ {
|
2022-05-18 13:47:08 +00:00
|
|
|
sched.SchedQueue.Push(&WorkerRequest{
|
|
|
|
TaskType: sealtasks.TTCommit2,
|
|
|
|
Sel: slowishSelector(true),
|
|
|
|
Ctx: ctx,
|
2020-08-24 17:13:36 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
b.StartTimer()
|
|
|
|
|
|
|
|
sched.trySched()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
b.Run("1w-1q", test(1, 1))
|
|
|
|
b.Run("500w-1q", test(500, 1))
|
|
|
|
b.Run("1w-500q", test(1, 500))
|
|
|
|
b.Run("200w-400q", test(200, 400))
|
|
|
|
}
|
2020-08-28 16:26:17 +00:00
|
|
|
|
|
|
|
func TestWindowCompact(t *testing.T) {
|
2022-05-18 13:47:08 +00:00
|
|
|
sh := Scheduler{}
|
2020-11-05 06:34:24 +00:00
|
|
|
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
|
2020-08-28 16:26:17 +00:00
|
|
|
|
|
|
|
test := func(start [][]sealtasks.TaskType, expect [][]sealtasks.TaskType) func(t *testing.T) {
|
|
|
|
return func(t *testing.T) {
|
2022-05-18 13:47:08 +00:00
|
|
|
wh := &WorkerHandle{
|
|
|
|
Info: storiface.WorkerInfo{
|
2020-08-28 16:26:17 +00:00
|
|
|
Resources: decentWorkerResources,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, windowTasks := range start {
|
2022-05-27 14:01:32 +00:00
|
|
|
window := &SchedWindow{
|
2023-02-28 08:02:18 +00:00
|
|
|
Allocated: *NewActiveResources(newTaskCounter()),
|
2022-05-25 12:44:11 +00:00
|
|
|
}
|
2020-08-28 16:26:17 +00:00
|
|
|
|
|
|
|
for _, task := range windowTasks {
|
2022-05-18 13:47:08 +00:00
|
|
|
window.Todo = append(window.Todo, &WorkerRequest{
|
|
|
|
TaskType: task,
|
2022-06-17 11:31:05 +00:00
|
|
|
Sector: storiface.SectorRef{ProofType: spt},
|
2020-11-05 12:43:05 +00:00
|
|
|
})
|
2023-05-10 19:43:42 +00:00
|
|
|
window.Allocated.Add(uuid.UUID{}, task.SealTask(spt), wh.Info.Resources, storiface.ResourceTable[task][spt])
|
2020-08-28 16:26:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
wh.activeWindows = append(wh.activeWindows, window)
|
|
|
|
}
|
|
|
|
|
2020-10-28 13:34:28 +00:00
|
|
|
sw := schedWorker{
|
2020-10-28 14:10:43 +00:00
|
|
|
sched: &sh,
|
|
|
|
worker: wh,
|
2020-10-28 13:34:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sw.workerCompactWindows()
|
|
|
|
require.Equal(t, len(start)-len(expect), -sw.windowsRequested)
|
2020-08-28 16:26:17 +00:00
|
|
|
|
|
|
|
for wi, tasks := range expect {
|
2023-02-28 08:02:18 +00:00
|
|
|
expectRes := NewActiveResources(newTaskCounter())
|
2020-08-28 16:26:17 +00:00
|
|
|
|
|
|
|
for ti, task := range tasks {
|
2022-05-18 13:47:08 +00:00
|
|
|
require.Equal(t, task, wh.activeWindows[wi].Todo[ti].TaskType, "%d, %d", wi, ti)
|
2023-05-10 19:43:42 +00:00
|
|
|
expectRes.Add(uuid.UUID{}, task.SealTask(spt), wh.Info.Resources, storiface.ResourceTable[task][spt])
|
2020-08-28 16:26:17 +00:00
|
|
|
}
|
|
|
|
|
2022-05-18 13:47:08 +00:00
|
|
|
require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].Allocated.cpuUse, "%d", wi)
|
|
|
|
require.Equal(t, expectRes.gpuUsed, wh.activeWindows[wi].Allocated.gpuUsed, "%d", wi)
|
|
|
|
require.Equal(t, expectRes.memUsedMin, wh.activeWindows[wi].Allocated.memUsedMin, "%d", wi)
|
|
|
|
require.Equal(t, expectRes.memUsedMax, wh.activeWindows[wi].Allocated.memUsedMax, "%d", wi)
|
2020-08-28 16:26:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("2-pc1-windows", test(
|
|
|
|
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1}, {sealtasks.TTPreCommit1}},
|
|
|
|
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1}}),
|
|
|
|
)
|
|
|
|
|
|
|
|
t.Run("1-window", test(
|
|
|
|
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1}},
|
|
|
|
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1}}),
|
|
|
|
)
|
|
|
|
|
|
|
|
t.Run("2-pc2-windows", test(
|
|
|
|
[][]sealtasks.TaskType{{sealtasks.TTPreCommit2}, {sealtasks.TTPreCommit2}},
|
|
|
|
[][]sealtasks.TaskType{{sealtasks.TTPreCommit2}, {sealtasks.TTPreCommit2}}),
|
|
|
|
)
|
|
|
|
|
|
|
|
t.Run("2pc1-pc1ap", test(
|
|
|
|
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1}, {sealtasks.TTPreCommit1, sealtasks.TTAddPiece}},
|
|
|
|
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1, sealtasks.TTAddPiece}, {sealtasks.TTPreCommit1}}),
|
|
|
|
)
|
2020-08-29 04:41:19 +00:00
|
|
|
|
|
|
|
t.Run("2pc1-pc1appc2", test(
|
|
|
|
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1}, {sealtasks.TTPreCommit1, sealtasks.TTAddPiece, sealtasks.TTPreCommit2}},
|
|
|
|
[][]sealtasks.TaskType{{sealtasks.TTPreCommit1, sealtasks.TTPreCommit1, sealtasks.TTAddPiece}, {sealtasks.TTPreCommit1, sealtasks.TTPreCommit2}}),
|
|
|
|
)
|
2020-08-28 16:26:17 +00:00
|
|
|
}
|