itests: Test PoSt worker RPC error handling

This commit is contained in:
Łukasz Magiera 2023-03-06 14:30:22 +01:00
parent e170487faf
commit 4b99472b35
3 changed files with 50 additions and 12 deletions

View File

@ -336,6 +336,8 @@ func (n *Ensemble) Worker(minerNode *TestMiner, worker *TestWorker, opts ...Node
MinerNode: minerNode,
RemoteListener: rl,
options: options,
Stop: func(ctx context.Context) error { return nil },
}
n.inactive.workers = append(n.inactive.workers, worker)

View File

@ -96,6 +96,12 @@ func workerRpc(t *testing.T, m *TestWorker) *TestWorker {
require.NoError(t, err)
t.Cleanup(stop)
m.Stop = func(ctx context.Context) error {
srv.Close()
srv.CloseClientConnections()
return nil
}
m.ListenAddr, m.Worker = maddr, cl
return m
}

View File

@ -366,7 +366,7 @@ func TestWindowPostWorkerManualPoSt(t *testing.T) {
sectors := 2 * 48 * 2
client, miner, _, ens := kit.EnsembleWorker(t,
client, miner, _, _ := kit.EnsembleWorker(t,
kit.PresealSectors(sectors), // 2 sectors per partition, 2 partitions in all 48 deadlines
kit.LatestActorsAt(-1),
kit.ThroughRPC(),
@ -378,17 +378,8 @@ func TestWindowPostWorkerManualPoSt(t *testing.T) {
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
bm := ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond)[0]
di = di.NextNotElapsed()
t.Log("Running one proving period")
waitUntil := di.Open + di.WPoStChallengeWindow*2 - 2
client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Log("Waiting for post message")
bm.Stop()
tryDl := func(dl uint64) {
p, err := miner.ComputeWindowPoSt(ctx, dl, types.EmptyTSK)
require.NoError(t, err)
@ -398,10 +389,49 @@ func TestWindowPostWorkerManualPoSt(t *testing.T) {
tryDl(0)
tryDl(40)
tryDl(di.Index + 4)
}
lastPending, err := client.MpoolPending(ctx, types.EmptyTSK)
func TestWindowPostWorkerDisconnected(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_ = logging.SetLogLevel("storageminer", "INFO")
sectors := 2 * 48 * 2
client, miner, badWorker, ens := kit.EnsembleWorker(t,
kit.PresealSectors(sectors), // 2 sectors per partition, 2 partitions in all 48 deadlines
kit.LatestActorsAt(-1),
kit.ThroughRPC(),
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}))
var goodWorker kit.TestWorker
ens.Worker(miner, &goodWorker, kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}), kit.ThroughRPC()).Start()
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
require.Len(t, lastPending, 0)
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
di = di.NextNotElapsed()
tryDl := func(dl uint64) {
p, err := miner.ComputeWindowPoSt(ctx, dl, types.EmptyTSK)
require.NoError(t, err)
require.Len(t, p, 1)
require.Equal(t, dl, p[0].Deadline)
}
tryDl(0) // this will run on the not-yet-bad badWorker
err = badWorker.Stop(ctx)
require.NoError(t, err)
tryDl(10) // will fail on the badWorker, then should retry on the goodWorker
time.Sleep(15 * time.Second)
tryDl(40) // after HeartbeatInterval, the badWorker should be marked as disabled
}
func TestSchedulerRemoveRequest(t *testing.T) {