60d576241c
* Fix 1.21 regression: GET_32G_MAX_CONCURRENT + mixed prepared/executing leads to stuck scheduler If you have 12 GET tasks and GET_32G_MAX_CONCURRENT=1, sealing jobs will only show assigned tasks for GET of the miner and is stuck. I believe this to be a regression of 1.21 unifying the counters, in the case of GETs where PrepType and TaskType both being seal/v0/fetch leading to a state where tasks are blocked since already counted towards the limit. * itests: Repro issue from PR #10633 * make counters int (non-working) * fix: worker sched: Send taskDone notifs after tasks are done * itests: Make TestPledgeMaxConcurrentGet actually reproduce the issue * make the linter happy --------- Co-authored-by: Steffen Butzer <steffen.butzer@outlook.com>
65 lines
1.9 KiB
Go
65 lines
1.9 KiB
Go
package itests
|
|
|
|
import (
|
|
"context"
|
|
"os"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"github.com/filecoin-project/lotus/itests/kit"
|
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
|
)
|
|
|
|
// Regression check for a fix introduced in https://github.com/filecoin-project/lotus/pull/10633
|
|
func TestPledgeMaxConcurrentGet(t *testing.T) {
|
|
require.NoError(t, os.Setenv("GET_2K_MAX_CONCURRENT", "1"))
|
|
t.Cleanup(func() {
|
|
require.NoError(t, os.Unsetenv("GET_2K_MAX_CONCURRENT"))
|
|
})
|
|
|
|
kit.QuietMiningLogs()
|
|
|
|
blockTime := 50 * time.Millisecond
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
_, miner, ens := kit.EnsembleMinimal(t, kit.NoStorage()) // no mock proofs
|
|
ens.InterconnectAll().BeginMiningMustPost(blockTime)
|
|
|
|
// separate sealed and storage paths so that finalize move needs to happen
|
|
miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
|
meta.CanSeal = true
|
|
})
|
|
miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
|
meta.CanStore = true
|
|
})
|
|
|
|
// NOTE: This test only repros the issue when Fetch tasks take ~10s, there's
|
|
// no great way to do that in a non-horribly-hacky way
|
|
|
|
/* The horribly hacky way:
|
|
|
|
diff --git a/storage/sealer/sched_worker.go b/storage/sealer/sched_worker.go
|
|
index 35acd755d..76faec859 100644
|
|
--- a/storage/sealer/sched_worker.go
|
|
+++ b/storage/sealer/sched_worker.go
|
|
@@ -513,6 +513,10 @@ func (sw *schedWorker) startProcessingTask(req *WorkerRequest) error {
|
|
tw.start()
|
|
err = <-werr
|
|
|
|
+ if req.TaskType == sealtasks.TTFetch {
|
|
+ time.Sleep(10 * time.Second)
|
|
+ }
|
|
+
|
|
select {
|
|
case req.ret <- workerResponse{err: err}:
|
|
case <-req.Ctx.Done():
|
|
|
|
*/
|
|
|
|
miner.PledgeSectors(ctx, 3, 0, nil)
|
|
}
|