From 8fcf59facca1b79e435722866074a41e44a2444c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Sat, 22 Apr 2023 12:47:11 +0200 Subject: [PATCH] itests: Test PoSt V1_1 on workers --- itests/worker_test.go | 230 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 230 insertions(+) diff --git a/itests/worker_test.go b/itests/worker_test.go index 4e845afe8..87bb0047d 100644 --- a/itests/worker_test.go +++ b/itests/worker_test.go @@ -1,6 +1,7 @@ package itests import ( + "bytes" "context" "strings" "sync/atomic" @@ -13,13 +14,17 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + miner11 "github.com/filecoin-project/go-state-types/builtin/v11/miner" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/storage/paths" sealing "github.com/filecoin-project/lotus/storage/pipeline" @@ -500,3 +505,228 @@ func TestWorkerName(t *testing.T) { require.True(t, found) } + +// Tests that V1_1 proofs on post workers with faults +func TestWindowPostV1P1NV20WorkerFault(t *testing.T) { + kit.QuietMiningLogs() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blocktime := 2 * time.Millisecond + + sectors := 2 * 48 * 2 + var badsector uint64 = 100000 + + client, miner, _, ens := kit.EnsembleWorker(t, + kit.PresealSectors(sectors), // 2 sectors per partition, 2 partitions in all 48 deadlines + kit.GenesisNetworkVersion(network.Version20), + kit.ConstructorOpts( + node.Override(new(config.ProvingConfig), func() config.ProvingConfig { + c := config.DefaultStorageMiner() + c.Proving.DisableBuiltinWindowPoSt = true + return c.Proving + }), + node.Override(new(*wdpost.WindowPoStScheduler), modules.WindowPostScheduler( + config.DefaultStorageMiner().Fees, + config.ProvingConfig{ + DisableBuiltinWindowPoSt: true, + DisableBuiltinWinningPoSt: false, + DisableWDPoStPreChecks: false, + }, + )), + node.Override(new(paths.Store), func(store *paths.Remote) paths.Store { + return &badWorkerStorage{ + Store: store, + badsector: &badsector, + notBadCount: 1, + } + })), + kit.ThroughRPC(), + kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}), + kit.WithWorkerStorage(func(store paths.Store) paths.Store { + return &badWorkerStorage{ + Store: store, + badsector: &badsector, + } + })) + + bm := ens.InterconnectAll().BeginMining(blocktime)[0] + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + // wait for some crons(?) + require.Eventually(t, func() bool { + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + parts, err := client.StateMinerPartitions(ctx, maddr, di.Index, types.EmptyTSK) + require.NoError(t, err) + + return len(parts) > 1 + }, 30*time.Second, 100*time.Millisecond) + + // Wait until just before a deadline opens + { + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + di = di.NextNotElapsed() + + t.Log("Running one proving period") + waitUntil := di.Open + di.WPoStChallengeWindow - di.WPoStChallengeLookback - 1 + client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + + t.Log("Waiting for post message") + bm.Stop() + } + + // Remove one sector in the next deadline (so it's skipped) + { + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + parts, err := client.StateMinerPartitions(ctx, maddr, di.Index+1, types.EmptyTSK) + require.NoError(t, err) + require.Greater(t, len(parts), 0) + + secs := parts[0].AllSectors + n, err := secs.Count() + require.NoError(t, err) + require.Equal(t, uint64(2), n) + + // Drop the sector in second partition + sid, err := secs.First() + require.NoError(t, err) + + t.Logf("Drop sector %d; dl %d part %d", sid, di.Index+1, 0) + + atomic.StoreUint64(&badsector, sid) + require.NoError(t, err) + } + + bm.MineBlocksMustPost(ctx, 2*time.Millisecond) + + mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + en := wact.Nonce + + // wait for a new message to be sent from worker address, it will be a PoSt + +waitForProof: + for { + //stm: @CHAIN_STATE_GET_ACTOR_001 + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + if wact.Nonce > en { + break waitForProof + } + + build.Clock.Sleep(blocktime) + } + + slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) + require.NoError(t, err) + + pmr, err := client.StateSearchMsg(ctx, types.EmptyTSK, slm[0], -1, false) + require.NoError(t, err) + + nv, err := client.StateNetworkVersion(ctx, pmr.TipSet) + require.NoError(t, err) + require.Equal(t, network.Version20, nv) + + require.True(t, pmr.Receipt.ExitCode.IsSuccess()) + + slmsg, err := client.ChainGetMessage(ctx, slm[0]) + require.NoError(t, err) + + var params miner11.SubmitWindowedPoStParams + require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params))) + require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof) + + require.Len(t, params.Partitions, 2) + sc0, err := params.Partitions[0].Skipped.Count() + require.NoError(t, err) + require.Equal(t, uint64(1), sc0) + sc1, err := params.Partitions[1].Skipped.Count() + require.NoError(t, err) + require.Equal(t, uint64(0), sc1) +} + +// Tests that V1_1 proofs on post worker +func TestWindowPostV1P1NV20Worker(t *testing.T) { + kit.QuietMiningLogs() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blocktime := 2 * time.Millisecond + + client, miner, _, ens := kit.EnsembleWorker(t, + kit.GenesisNetworkVersion(network.Version20), + kit.ConstructorOpts( + node.Override(new(config.ProvingConfig), func() config.ProvingConfig { + c := config.DefaultStorageMiner() + c.Proving.DisableBuiltinWindowPoSt = true + return c.Proving + }), + node.Override(new(*wdpost.WindowPoStScheduler), modules.WindowPostScheduler( + config.DefaultStorageMiner().Fees, + config.ProvingConfig{ + DisableBuiltinWindowPoSt: true, + DisableBuiltinWinningPoSt: false, + DisableWDPoStPreChecks: false, + }, + ))), + kit.ThroughRPC(), + kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt})) + + ens.InterconnectAll().BeginMining(blocktime) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + en := wact.Nonce + + // wait for a new message to be sent from worker address, it will be a PoSt + +waitForProof: + for { + //stm: @CHAIN_STATE_GET_ACTOR_001 + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + if wact.Nonce > en { + break waitForProof + } + + build.Clock.Sleep(blocktime) + } + + slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) + require.NoError(t, err) + + pmr, err := client.StateSearchMsg(ctx, types.EmptyTSK, slm[0], -1, false) + require.NoError(t, err) + + nv, err := client.StateNetworkVersion(ctx, pmr.TipSet) + require.NoError(t, err) + require.Equal(t, network.Version20, nv) + + require.True(t, pmr.Receipt.ExitCode.IsSuccess()) + + slmsg, err := client.ChainGetMessage(ctx, slm[0]) + require.NoError(t, err) + + var params miner11.SubmitWindowedPoStParams + require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params))) + require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof) +}