From 59938414fc10a830dae39e375668e1840c525d67 Mon Sep 17 00:00:00 2001 From: Rod Vagg Date: Thu, 6 Jun 2024 14:47:25 +1000 Subject: [PATCH] test: actors: manual CC onboarding and proving integration test (#12017) * remove client CLI * remove markets CLI from miner * remove markets from all CLI * remove client API * update go mod * remove EnableMarkets flag * remove market subsystem * remove dagstore * remove index provider * remove graphsync and data-transfer * remove markets * go mod tidy * fix cbor gen deps * remove deal making from config * remove eol alert * go mod tidy * changes as per review * make jen * changes as per review * test: actors: manual CC onboarding and proving integration test * test: actors: manual CC onboarding itest with real proofs * test: actors: fix lint issue, require proofs in CI * test: actors: rename real proofs test, fix dispute window wait * feat: add TestUnmanagedMiner in the itest kit for non-storage managed miners * feat: test: improve UnmanagedMiner test harness * feat: test: MineBlocksMustPost can watch for >1 miners (#12063) * feat: test: MineBlocksMustPost can watch for >1 miners * feat: test: wait for both sectors at the end of test * feat: test: minor manual onboarding test fixups and speed up * feat: test: handle case where miners have close deadline ends * Implement snap deals test for manual sector onboarding (#12066) * changes as per review * thread safety * test for snap deals * remove extraneous change * Apply suggestions from code review Co-authored-by: Rod Vagg * cancel CC Post after snap deals --------- Co-authored-by: Rod Vagg * fix config --------- Co-authored-by: aarshkshah1992 --- .github/workflows/test.yml | 2 + itests/kit/blockminer.go | 133 +++- itests/kit/ensemble.go | 110 +++- itests/kit/node_full.go | 25 + itests/kit/node_unmanaged.go | 1042 ++++++++++++++++++++++++++++++ itests/manual_onboarding_test.go | 174 +++++ 6 files changed, 1447 insertions(+), 39 deletions(-) create mode 100644 itests/kit/node_unmanaged.go create mode 100644 itests/manual_onboarding_test.go diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b57a74af0..2a5648a54 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -97,6 +97,7 @@ jobs: "itest-get_messages_in_ts": ["self-hosted", "linux", "x64", "xlarge"], "itest-lite_migration": ["self-hosted", "linux", "x64", "xlarge"], "itest-lookup_robust_address": ["self-hosted", "linux", "x64", "xlarge"], + "itest-manual_onboarding": ["self-hosted", "linux", "x64", "xlarge"], "itest-mempool": ["self-hosted", "linux", "x64", "xlarge"], "itest-mpool_msg_uuid": ["self-hosted", "linux", "x64", "xlarge"], "itest-mpool_push_with_uuid": ["self-hosted", "linux", "x64", "xlarge"], @@ -129,6 +130,7 @@ jobs: "itest-deals", "itest-direct_data_onboard_verified", "itest-direct_data_onboard", + "itest-manual_onboarding", "itest-net", "itest-path_detach_redeclare", "itest-path_type_filters", diff --git a/itests/kit/blockminer.go b/itests/kit/blockminer.go index 40d23a6cd..758425678 100644 --- a/itests/kit/blockminer.go +++ b/itests/kit/blockminer.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-state-types/abi" @@ -20,6 +21,7 @@ import ( "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/miner" ) @@ -29,11 +31,13 @@ type BlockMiner struct { t *testing.T miner *TestMiner - nextNulls int64 - pause chan struct{} - unpause chan struct{} - wg sync.WaitGroup - cancel context.CancelFunc + nextNulls int64 + postWatchMiners []address.Address + postWatchMinersLk sync.Mutex + pause chan struct{} + unpause chan struct{} + wg sync.WaitGroup + cancel context.CancelFunc } func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner { @@ -46,19 +50,58 @@ func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner { } } +type minerDeadline struct { + addr address.Address + deadline dline.Info +} + +type minerDeadlines []minerDeadline + +func (mds minerDeadlines) CloseList() []abi.ChainEpoch { + var ret []abi.ChainEpoch + for _, md := range mds { + ret = append(ret, md.deadline.Last()) + } + return ret +} + +func (mds minerDeadlines) MinerStringList() []string { + var ret []string + for _, md := range mds { + ret = append(ret, md.addr.String()) + } + return ret +} + +// FilterByLast returns a new minerDeadlines with only the deadlines that have a Last() epoch +// greater than or equal to last. +func (mds minerDeadlines) FilterByLast(last abi.ChainEpoch) minerDeadlines { + var ret minerDeadlines + for _, md := range mds { + if last >= md.deadline.Last() { + ret = append(ret, md) + } + } + return ret +} + type partitionTracker struct { + minerAddr address.Address partitions []api.Partition posted bitfield.BitField } -func newPartitionTracker(ctx context.Context, dlIdx uint64, bm *BlockMiner) *partitionTracker { - dlines, err := bm.miner.FullNode.StateMinerDeadlines(ctx, bm.miner.ActorAddr, types.EmptyTSK) - require.NoError(bm.t, err) +// newPartitionTracker creates a new partitionTracker that tracks the deadline index dlIdx for the +// given minerAddr. It uses the BlockMiner bm to interact with the chain. +func newPartitionTracker(ctx context.Context, t *testing.T, client v1api.FullNode, minerAddr address.Address, dlIdx uint64) *partitionTracker { + dlines, err := client.StateMinerDeadlines(ctx, minerAddr, types.EmptyTSK) + require.NoError(t, err) dl := dlines[dlIdx] - parts, err := bm.miner.FullNode.StateMinerPartitions(ctx, bm.miner.ActorAddr, dlIdx, types.EmptyTSK) - require.NoError(bm.t, err) + parts, err := client.StateMinerPartitions(ctx, minerAddr, dlIdx, types.EmptyTSK) + require.NoError(t, err) return &partitionTracker{ + minerAddr: minerAddr, partitions: parts, posted: dl.PostSubmissions, } @@ -74,11 +117,11 @@ func (p *partitionTracker) done(t *testing.T) bool { return uint64(len(p.partitions)) == p.count(t) } -func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types.Message) (ret bool) { +func (p *partitionTracker) recordIfPost(t *testing.T, msg *types.Message) (ret bool) { defer func() { ret = p.done(t) }() - if !(msg.To == bm.miner.ActorAddr) { + if !(msg.To == p.minerAddr) { return } if msg.Method != builtin.MethodsMiner.SubmitWindowedPoSt { @@ -92,19 +135,18 @@ func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types return } -func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *dline.Info) { - - tracker := newPartitionTracker(ctx, dlinfo.Index, bm) +func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, minerAddr address.Address, dlinfo dline.Info) { + tracker := newPartitionTracker(ctx, bm.t, bm.miner.FullNode, minerAddr, dlinfo.Index) if !tracker.done(bm.t) { // need to wait for post bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t)) - poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) //subscribe before checking pending so we don't miss any events + poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) // subscribe before checking pending so we don't miss any events require.NoError(bm.t, err) // First check pending messages we'll mine this epoch msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK) require.NoError(bm.t, err) for _, msg := range msgs { - if tracker.recordIfPost(bm.t, bm, &msg.Message) { + if tracker.recordIfPost(bm.t, &msg.Message) { fmt.Printf("found post in mempool pending\n") } } @@ -114,13 +156,13 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d msgs, err := bm.miner.FullNode.ChainGetBlockMessages(ctx, bc) require.NoError(bm.t, err) for _, msg := range msgs.BlsMessages { - if tracker.recordIfPost(bm.t, bm, msg) { + if tracker.recordIfPost(bm.t, msg) { fmt.Printf("found post in message of prev tipset\n") } } for _, msg := range msgs.SecpkMessages { - if tracker.recordIfPost(bm.t, bm, &msg.Message) { + if tracker.recordIfPost(bm.t, &msg.Message) { fmt.Printf("found post in message of prev tipset\n") } } @@ -139,7 +181,7 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d bm.t.Logf("pool event: %d", evt.Type) if evt.Type == api.MpoolAdd { bm.t.Logf("incoming message %v", evt.Message) - if tracker.recordIfPost(bm.t, bm, &evt.Message.Message) { + if tracker.recordIfPost(bm.t, &evt.Message.Message) { fmt.Printf("found post in mempool evt\n") break POOL } @@ -151,11 +193,24 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d } } +// WatchMinerForPost adds a miner to the list of miners that the BlockMiner will watch for window +// post submissions when using MineBlocksMustPost. This is useful when we have more than just the +// BlockMiner submitting posts, particularly in the case of UnmanagedMiners which don't participate +// in block mining. +func (bm *BlockMiner) WatchMinerForPost(minerAddr address.Address) { + bm.postWatchMinersLk.Lock() + bm.postWatchMiners = append(bm.postWatchMiners, minerAddr) + bm.postWatchMinersLk.Unlock() +} + // Like MineBlocks but refuses to mine until the window post scheduler has wdpost messages in the mempool // and everything shuts down if a post fails. It also enforces that every block mined succeeds func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Duration) { time.Sleep(time.Second) + // watch for our own window posts + bm.WatchMinerForPost(bm.miner.ActorAddr) + // wrap context in a cancellable context. ctx, bm.cancel = context.WithCancel(ctx) bm.wg.Add(1) @@ -182,11 +237,25 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur ts, err := bm.miner.FullNode.ChainHead(ctx) require.NoError(bm.t, err) - dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, bm.miner.ActorAddr, ts.Key()) - require.NoError(bm.t, err) - if ts.Height()+5+abi.ChainEpoch(nulls) >= dlinfo.Last() { // Next block brings us past the last epoch in dline, we need to wait for miner to post - bm.t.Logf("forcing post to get in before deadline closes at %d", dlinfo.Last()) - bm.forcePoSt(ctx, ts, dlinfo) + // Get current deadline information for all miners, then filter by the ones that are about to + // close so we can force a post for them. + bm.postWatchMinersLk.Lock() + var impendingDeadlines minerDeadlines + for _, minerAddr := range bm.postWatchMiners { + dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, minerAddr, ts.Key()) + require.NoError(bm.t, err) + require.NotNil(bm.t, dlinfo, "no deadline info for miner %s", minerAddr) + impendingDeadlines = append(impendingDeadlines, minerDeadline{addr: minerAddr, deadline: *dlinfo}) + } + bm.postWatchMinersLk.Unlock() + impendingDeadlines = impendingDeadlines.FilterByLast(ts.Height() + 5 + abi.ChainEpoch(nulls)) + + if len(impendingDeadlines) > 0 { + // Next block brings us too close for at least one deadline, we need to wait for miners to post + bm.t.Logf("forcing post to get in if due before deadline closes at %v for %v", impendingDeadlines.CloseList(), impendingDeadlines.MinerStringList()) + for _, md := range impendingDeadlines { + bm.forcePoSt(ctx, ts, md.addr, md.deadline) + } } var target abi.ChainEpoch @@ -216,10 +285,13 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur return } if !success { - // if we are mining a new null block and it brings us past deadline boundary we need to wait for miner to post - if ts.Height()+5+abi.ChainEpoch(nulls+i) >= dlinfo.Last() { - bm.t.Logf("forcing post to get in before deadline closes at %d", dlinfo.Last()) - bm.forcePoSt(ctx, ts, dlinfo) + // if we are mining a new null block and it brings us past deadline boundary we need to wait for miners to post + impendingDeadlines = impendingDeadlines.FilterByLast(ts.Height() + 5 + abi.ChainEpoch(nulls+i)) + if len(impendingDeadlines) > 0 { + bm.t.Logf("forcing post to get in if due before deadline closes at %v for %v", impendingDeadlines.CloseList(), impendingDeadlines.MinerStringList()) + for _, md := range impendingDeadlines { + bm.forcePoSt(ctx, ts, md.addr, md.deadline) + } } } } @@ -378,4 +450,7 @@ func (bm *BlockMiner) Stop() { close(bm.pause) bm.pause = nil } + bm.postWatchMinersLk.Lock() + bm.postWatchMiners = nil + bm.postWatchMinersLk.Unlock() } diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index ccdd43632..d8f6e7f91 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -118,15 +118,17 @@ type Ensemble struct { options *ensembleOpts inactive struct { - fullnodes []*TestFullNode - miners []*TestMiner - workers []*TestWorker + fullnodes []*TestFullNode + miners []*TestMiner + unmanagedMiners []*TestUnmanagedMiner + workers []*TestWorker } active struct { - fullnodes []*TestFullNode - miners []*TestMiner - workers []*TestWorker - bms map[*TestMiner]*BlockMiner + fullnodes []*TestFullNode + miners []*TestMiner + unmanagedMiners []*TestUnmanagedMiner + workers []*TestWorker + bms map[*TestMiner]*BlockMiner } genesis struct { version network.Version @@ -239,9 +241,7 @@ func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts .. tdir, err := os.MkdirTemp("", "preseal-memgen") require.NoError(n.t, err) - minerCnt := len(n.inactive.miners) + len(n.active.miners) - - actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt)) + actorAddr, err := address.NewIDAddress(genesis2.MinerStart + n.minerCount()) require.NoError(n.t, err) if options.mainMiner != nil { @@ -313,12 +313,25 @@ func (n *Ensemble) AddInactiveMiner(m *TestMiner) { n.inactive.miners = append(n.inactive.miners, m) } +func (n *Ensemble) AddInactiveUnmanagedMiner(m *TestUnmanagedMiner) { + n.inactive.unmanagedMiners = append(n.inactive.unmanagedMiners, m) +} + func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble { n.MinerEnroll(minerNode, full, opts...) n.AddInactiveMiner(minerNode) return n } +func (n *Ensemble) UnmanagedMiner(full *TestFullNode, opts ...NodeOpt) (*TestUnmanagedMiner, *Ensemble) { + actorAddr, err := address.NewIDAddress(genesis2.MinerStart + n.minerCount()) + require.NoError(n.t, err) + + minerNode := NewTestUnmanagedMiner(n.t, full, actorAddr, opts...) + n.AddInactiveUnmanagedMiner(minerNode) + return minerNode, n +} + // Worker enrolls a new worker, using the provided full node for chain // interactions. func (n *Ensemble) Worker(minerNode *TestMiner, worker *TestWorker, opts ...NodeOpt) *Ensemble { @@ -805,6 +818,79 @@ func (n *Ensemble) Start() *Ensemble { // to active, so clear the slice. n.inactive.miners = n.inactive.miners[:0] + // Create all inactive manual miners. + for _, m := range n.inactive.unmanagedMiners { + proofType, err := miner.WindowPoStProofTypeFromSectorSize(m.options.sectorSize, n.genesis.version) + require.NoError(n.t, err) + + params, aerr := actors.SerializeParams(&power3.CreateMinerParams{ + Owner: m.OwnerKey.Address, + Worker: m.OwnerKey.Address, + WindowPoStProofType: proofType, + Peer: abi.PeerID(m.Libp2p.PeerID), + }) + require.NoError(n.t, aerr) + + createStorageMinerMsg := &types.Message{ + From: m.OwnerKey.Address, + To: power.Address, + Value: big.Zero(), + + Method: power.Methods.CreateMiner, + Params: params, + } + signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, &api.MessageSendSpec{ + MsgUuid: uuid.New(), + }) + require.NoError(n.t, err) + + mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(n.t, err) + require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode) + + var retval power3.CreateMinerReturn + err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)) + require.NoError(n.t, err, "failed to create miner") + + m.ActorAddr = retval.IDAddress + + has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address) + require.NoError(n.t, err) + + // Only import the owner's full key into our companion full node, if we + // don't have it still. + if !has { + _, err = m.FullNode.WalletImport(ctx, &m.OwnerKey.KeyInfo) + require.NoError(n.t, err) + } + + enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)}) + require.NoError(n.t, err) + + msg := &types.Message{ + From: m.OwnerKey.Address, + To: m.ActorAddr, + Method: builtin.MethodsMiner.ChangePeerID, + Params: enc, + Value: types.NewInt(0), + } + + _, err2 := m.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{ + MsgUuid: uuid.New(), + }) + require.NoError(n.t, err2) + + minerCopy := *m.FullNode + minerCopy.FullNode = modules.MakeUuidWrapper(minerCopy.FullNode) + m.FullNode = &minerCopy + + n.active.unmanagedMiners = append(n.active.unmanagedMiners, m) + } + + // If we are here, we have processed all inactive manual miners and moved them + // to active, so clear the slice. + n.inactive.unmanagedMiners = n.inactive.unmanagedMiners[:0] + // --------------------- // WORKERS // --------------------- @@ -1003,6 +1089,10 @@ func (n *Ensemble) BeginMining(blocktime time.Duration, miners ...*TestMiner) [] return bms } +func (n *Ensemble) minerCount() uint64 { + return uint64(len(n.inactive.miners) + len(n.active.miners) + len(n.inactive.unmanagedMiners) + len(n.active.unmanagedMiners)) +} + func (n *Ensemble) generateGenesis() *genesis.Template { var verifRoot = gen.DefaultVerifregRootkeyActor if k := n.options.verifiedRoot.key; k != nil { diff --git a/itests/kit/node_full.go b/itests/kit/node_full.go index 1e4176d9b..c71667a99 100644 --- a/itests/kit/node_full.go +++ b/itests/kit/node_full.go @@ -12,6 +12,7 @@ import ( "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" @@ -97,6 +98,30 @@ func (f *TestFullNode) WaitTillChain(ctx context.Context, pred ChainPredicate) * return nil } +// WaitTillChain waits until a specified chain condition is met. It returns +// the first tipset where the condition is met. +func (f *TestFullNode) WaitTillChainOrError(ctx context.Context, pred ChainPredicate) (*types.TipSet, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + heads, err := f.ChainNotify(ctx) + if err != nil { + return nil, err + } + + for chg := range heads { + for _, c := range chg { + if c.Type != "apply" { + continue + } + if ts := c.Val; pred(ts) { + return ts, nil + } + } + } + return nil, xerrors.New("chain condition not met") +} + func (f *TestFullNode) WaitForSectorActive(ctx context.Context, t *testing.T, sn abi.SectorNumber, maddr address.Address) { for { active, err := f.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) diff --git a/itests/kit/node_unmanaged.go b/itests/kit/node_unmanaged.go new file mode 100644 index 000000000..d3b7b865f --- /dev/null +++ b/itests/kit/node_unmanaged.go @@ -0,0 +1,1042 @@ +package kit + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/ipfs/go-cid" + libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + cbg "github.com/whyrusleeping/cbor-gen" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin" + miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/proof" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet/key" +) + +// TestUnmanagedMiner is a miner that's not managed by the storage/infrastructure, all tasks must be manually executed, managed and scheduled by the test or test kit. +// Note: `TestUnmanagedMiner` is not thread safe and assumes linear access of it's methods +type TestUnmanagedMiner struct { + t *testing.T + options nodeOpts + + cacheDir string + unsealedSectorDir string + sealedSectorDir string + currentSectorNum abi.SectorNumber + + cacheDirPaths map[abi.SectorNumber]string + unsealedSectorPaths map[abi.SectorNumber]string + sealedSectorPaths map[abi.SectorNumber]string + sealedCids map[abi.SectorNumber]cid.Cid + unsealedCids map[abi.SectorNumber]cid.Cid + sealTickets map[abi.SectorNumber]abi.SealRandomness + + proofType map[abi.SectorNumber]abi.RegisteredSealProof + + ActorAddr address.Address + OwnerKey *key.Key + FullNode *TestFullNode + Libp2p struct { + PeerID peer.ID + PrivKey libp2pcrypto.PrivKey + } +} + +type WindowPostResp struct { + Posted bool + Error error +} + +func NewTestUnmanagedMiner(t *testing.T, full *TestFullNode, actorAddr address.Address, opts ...NodeOpt) *TestUnmanagedMiner { + require.NotNil(t, full, "full node required when instantiating miner") + + options := DefaultNodeOpts + for _, o := range opts { + err := o(&options) + require.NoError(t, err) + } + + privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err) + + require.NotNil(t, options.ownerKey, "owner key is required for initializing a miner") + + peerId, err := peer.IDFromPrivateKey(privkey) + require.NoError(t, err) + tmpDir := t.TempDir() + + cacheDir := filepath.Join(tmpDir, fmt.Sprintf("cache-%s", actorAddr)) + unsealedSectorDir := filepath.Join(tmpDir, fmt.Sprintf("unsealed-%s", actorAddr)) + sealedSectorDir := filepath.Join(tmpDir, fmt.Sprintf("sealed-%s", actorAddr)) + + _ = os.Mkdir(cacheDir, 0755) + _ = os.Mkdir(unsealedSectorDir, 0755) + _ = os.Mkdir(sealedSectorDir, 0755) + + tm := TestUnmanagedMiner{ + t: t, + options: options, + cacheDir: cacheDir, + unsealedSectorDir: unsealedSectorDir, + sealedSectorDir: sealedSectorDir, + + unsealedSectorPaths: make(map[abi.SectorNumber]string), + cacheDirPaths: make(map[abi.SectorNumber]string), + sealedSectorPaths: make(map[abi.SectorNumber]string), + sealedCids: make(map[abi.SectorNumber]cid.Cid), + unsealedCids: make(map[abi.SectorNumber]cid.Cid), + sealTickets: make(map[abi.SectorNumber]abi.SealRandomness), + + ActorAddr: actorAddr, + OwnerKey: options.ownerKey, + FullNode: full, + currentSectorNum: 101, + proofType: make(map[abi.SectorNumber]abi.RegisteredSealProof), + } + tm.Libp2p.PeerID = peerId + tm.Libp2p.PrivKey = privkey + + return &tm +} + +func (tm *TestUnmanagedMiner) AssertNoPower(ctx context.Context) { + p := tm.CurrentPower(ctx) + tm.t.Logf("Miner %s RBP: %v, QaP: %v", tm.ActorAddr, p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String()) + require.True(tm.t, p.MinerPower.RawBytePower.IsZero()) +} + +func (tm *TestUnmanagedMiner) CurrentPower(ctx context.Context) *api.MinerPower { + head, err := tm.FullNode.ChainHead(ctx) + require.NoError(tm.t, err) + + p, err := tm.FullNode.StateMinerPower(ctx, tm.ActorAddr, head.Key()) + require.NoError(tm.t, err) + + return p +} + +func (tm *TestUnmanagedMiner) AssertPower(ctx context.Context, raw uint64, qa uint64) { + req := require.New(tm.t) + p := tm.CurrentPower(ctx) + tm.t.Logf("Miner %s RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), tm.ActorAddr, p.MinerPower.RawBytePower.String()) + req.Equal(raw, p.MinerPower.RawBytePower.Uint64()) + req.Equal(qa, p.MinerPower.QualityAdjPower.Uint64()) +} + +func (tm *TestUnmanagedMiner) mkAndSavePiecesToOnboard(_ context.Context, sectorNumber abi.SectorNumber, pt abi.RegisteredSealProof) []abi.PieceInfo { + paddedPieceSize := abi.PaddedPieceSize(tm.options.sectorSize) + unpaddedPieceSize := paddedPieceSize.Unpadded() + + // Generate random bytes for the piece + randomBytes := make([]byte, unpaddedPieceSize) + _, err := io.ReadFull(rand.Reader, randomBytes) + require.NoError(tm.t, err) + + // Create a temporary file for the first piece + pieceFileA := requireTempFile(tm.t, bytes.NewReader(randomBytes), uint64(unpaddedPieceSize)) + + // Generate the piece CID from the file + pieceCIDA, err := ffi.GeneratePieceCIDFromFile(pt, pieceFileA, unpaddedPieceSize) + require.NoError(tm.t, err) + + // Reset file offset to the beginning after CID generation + _, err = pieceFileA.Seek(0, io.SeekStart) + require.NoError(tm.t, err) + + unsealedSectorFile := requireTempFile(tm.t, bytes.NewReader([]byte{}), 0) + defer func() { + _ = unsealedSectorFile.Close() + }() + + // Write the piece to the staged sector file without alignment + writtenBytes, pieceCID, err := ffi.WriteWithoutAlignment(pt, pieceFileA, unpaddedPieceSize, unsealedSectorFile) + require.NoError(tm.t, err) + require.EqualValues(tm.t, unpaddedPieceSize, writtenBytes) + require.True(tm.t, pieceCID.Equals(pieceCIDA)) + + // Create a struct for the piece info + publicPieces := []abi.PieceInfo{{ + Size: paddedPieceSize, + PieceCID: pieceCIDA, + }} + + // Create a temporary file for the sealed sector + sealedSectorFile := requireTempFile(tm.t, bytes.NewReader([]byte{}), 0) + defer func() { + _ = sealedSectorFile.Close() + }() + + // Update paths for the sector + tm.sealedSectorPaths[sectorNumber] = sealedSectorFile.Name() + tm.unsealedSectorPaths[sectorNumber] = unsealedSectorFile.Name() + tm.cacheDirPaths[sectorNumber] = filepath.Join(tm.cacheDir, fmt.Sprintf("%d", sectorNumber)) + + // Ensure the cache directory exists + _ = os.Mkdir(tm.cacheDirPaths[sectorNumber], 0755) + + return publicPieces +} + +func (tm *TestUnmanagedMiner) makeAndSaveCCSector(_ context.Context, sectorNumber abi.SectorNumber) { + requirements := require.New(tm.t) + + // Create cache directory + cacheDirPath := filepath.Join(tm.cacheDir, fmt.Sprintf("%d", sectorNumber)) + requirements.NoError(os.Mkdir(cacheDirPath, 0755)) + tm.t.Logf("Miner %s: Sector %d: created cache directory at %s", tm.ActorAddr, sectorNumber, cacheDirPath) + + // Define paths for unsealed and sealed sectors + unsealedSectorPath := filepath.Join(tm.unsealedSectorDir, fmt.Sprintf("%d", sectorNumber)) + sealedSectorPath := filepath.Join(tm.sealedSectorDir, fmt.Sprintf("%d", sectorNumber)) + unsealedSize := abi.PaddedPieceSize(tm.options.sectorSize).Unpadded() + + // Write unsealed sector file + requirements.NoError(os.WriteFile(unsealedSectorPath, make([]byte, unsealedSize), 0644)) + tm.t.Logf("Miner %s: Sector %d: wrote unsealed CC sector to %s", tm.ActorAddr, sectorNumber, unsealedSectorPath) + + // Write sealed sector file + requirements.NoError(os.WriteFile(sealedSectorPath, make([]byte, tm.options.sectorSize), 0644)) + tm.t.Logf("Miner %s: Sector %d: wrote sealed CC sector to %s", tm.ActorAddr, sectorNumber, sealedSectorPath) + + // Update paths in the struct + tm.unsealedSectorPaths[sectorNumber] = unsealedSectorPath + tm.sealedSectorPaths[sectorNumber] = sealedSectorPath + tm.cacheDirPaths[sectorNumber] = cacheDirPath +} + +func (tm *TestUnmanagedMiner) OnboardSectorWithPiecesAndRealProofs(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, + context.CancelFunc) { + req := require.New(tm.t) + sectorNumber := tm.currentSectorNum + tm.currentSectorNum++ + + // Step 1: Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality) + preCommitSealRand := tm.waitPreCommitSealRandomness(ctx, sectorNumber) + + // Step 2: Build a sector with non 0 Pieces that we want to onboard + pieces := tm.mkAndSavePiecesToOnboard(ctx, sectorNumber, proofType) + + // Step 3: Generate a Pre-Commit for the CC sector -> this persists the proof on the `TestUnmanagedMiner` Miner State + tm.generatePreCommit(ctx, sectorNumber, preCommitSealRand, proofType, pieces) + + // Step 4 : Submit the Pre-Commit to the network + unsealedCid := tm.unsealedCids[sectorNumber] + r, err := tm.submitMessage(ctx, &miner14.PreCommitSectorBatchParams2{ + Sectors: []miner14.SectorPreCommitInfo{{ + Expiration: 2880 * 300, + SectorNumber: sectorNumber, + SealProof: TestSpt, + SealedCID: tm.sealedCids[sectorNumber], + SealRandEpoch: preCommitSealRand, + UnsealedCid: &unsealedCid, + }}, + }, 1, builtin.MethodsMiner.PreCommitSectorBatch2) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + // Step 5: Generate a ProveCommit for the CC sector + waitSeedRandomness := tm.proveCommitWaitSeed(ctx, sectorNumber) + + proveCommit := tm.generateProveCommit(ctx, sectorNumber, proofType, waitSeedRandomness, pieces) + + // Step 6: Submit the ProveCommit to the network + tm.t.Log("Submitting ProveCommitSector ...") + + var manifest []miner14.PieceActivationManifest + for _, piece := range pieces { + manifest = append(manifest, miner14.PieceActivationManifest{ + CID: piece.PieceCID, + Size: piece.Size, + }) + } + + r, err = tm.submitMessage(ctx, &miner14.ProveCommitSectors3Params{ + SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber, Pieces: manifest}}, + SectorProofs: [][]byte{proveCommit}, + RequireActivationSuccess: true, + }, 1, builtin.MethodsMiner.ProveCommitSectors3) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + tm.proofType[sectorNumber] = proofType + + respCh := make(chan WindowPostResp, 1) + + wdCtx, cancelF := context.WithCancel(ctx) + go tm.wdPostLoop(wdCtx, sectorNumber, respCh, false, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber]) + + return sectorNumber, respCh, cancelF +} + +func (tm *TestUnmanagedMiner) OnboardSectorWithPiecesAndMockProofs(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, + context.CancelFunc) { + req := require.New(tm.t) + sectorNumber := tm.currentSectorNum + tm.currentSectorNum++ + + // Step 1: Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality) + preCommitSealRand := tm.waitPreCommitSealRandomness(ctx, sectorNumber) + + // Step 2: Build a sector with non 0 Pieces that we want to onboard + pieces := []abi.PieceInfo{{ + Size: abi.PaddedPieceSize(tm.options.sectorSize), + PieceCID: cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha"), + }} + + // Step 3: Generate a Pre-Commit for the CC sector -> this persists the proof on the `TestUnmanagedMiner` Miner State + tm.sealedCids[sectorNumber] = cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz") + tm.unsealedCids[sectorNumber] = cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha") + + // Step 4 : Submit the Pre-Commit to the network + unsealedCid := tm.unsealedCids[sectorNumber] + r, err := tm.submitMessage(ctx, &miner14.PreCommitSectorBatchParams2{ + Sectors: []miner14.SectorPreCommitInfo{{ + Expiration: 2880 * 300, + SectorNumber: sectorNumber, + SealProof: TestSpt, + SealedCID: tm.sealedCids[sectorNumber], + SealRandEpoch: preCommitSealRand, + UnsealedCid: &unsealedCid, + }}, + }, 1, builtin.MethodsMiner.PreCommitSectorBatch2) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + // Step 5: Generate a ProveCommit for the CC sector + _ = tm.proveCommitWaitSeed(ctx, sectorNumber) + sectorProof := []byte{0xde, 0xad, 0xbe, 0xef} + + // Step 6: Submit the ProveCommit to the network + tm.t.Log("Submitting ProveCommitSector ...") + + var manifest []miner14.PieceActivationManifest + for _, piece := range pieces { + manifest = append(manifest, miner14.PieceActivationManifest{ + CID: piece.PieceCID, + Size: piece.Size, + }) + } + + r, err = tm.submitMessage(ctx, &miner14.ProveCommitSectors3Params{ + SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber, Pieces: manifest}}, + SectorProofs: [][]byte{sectorProof}, + RequireActivationSuccess: true, + }, 1, builtin.MethodsMiner.ProveCommitSectors3) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + tm.proofType[sectorNumber] = proofType + + respCh := make(chan WindowPostResp, 1) + + wdCtx, cancelF := context.WithCancel(ctx) + go tm.wdPostLoop(wdCtx, sectorNumber, respCh, true, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber]) + + return sectorNumber, respCh, cancelF +} + +func (tm *TestUnmanagedMiner) mkStagedFileWithPieces(pt abi.RegisteredSealProof) ([]abi.PieceInfo, string) { + paddedPieceSize := abi.PaddedPieceSize(tm.options.sectorSize) + unpaddedPieceSize := paddedPieceSize.Unpadded() + + // Generate random bytes for the piece + randomBytes := make([]byte, unpaddedPieceSize) + _, err := io.ReadFull(rand.Reader, randomBytes) + require.NoError(tm.t, err) + + // Create a temporary file for the first piece + pieceFileA := requireTempFile(tm.t, bytes.NewReader(randomBytes), uint64(unpaddedPieceSize)) + + // Generate the piece CID from the file + pieceCIDA, err := ffi.GeneratePieceCIDFromFile(pt, pieceFileA, unpaddedPieceSize) + require.NoError(tm.t, err) + + // Reset file offset to the beginning after CID generation + _, err = pieceFileA.Seek(0, io.SeekStart) + require.NoError(tm.t, err) + + unsealedSectorFile := requireTempFile(tm.t, bytes.NewReader([]byte{}), 0) + defer func() { + _ = unsealedSectorFile.Close() + }() + + // Write the piece to the staged sector file without alignment + writtenBytes, pieceCID, err := ffi.WriteWithoutAlignment(pt, pieceFileA, unpaddedPieceSize, unsealedSectorFile) + require.NoError(tm.t, err) + require.EqualValues(tm.t, unpaddedPieceSize, writtenBytes) + require.True(tm.t, pieceCID.Equals(pieceCIDA)) + + // Create a struct for the piece info + publicPieces := []abi.PieceInfo{{ + Size: paddedPieceSize, + PieceCID: pieceCIDA, + }} + + return publicPieces, unsealedSectorFile.Name() +} + +func (tm *TestUnmanagedMiner) SnapDealWithRealProofs(ctx context.Context, proofType abi.RegisteredSealProof, sectorNumber abi.SectorNumber) { + // generate sector key + pieces, unsealedPath := tm.mkStagedFileWithPieces(proofType) + updateProofType := abi.SealProofInfos[proofType].UpdateProof + + s, err := os.Stat(tm.sealedSectorPaths[sectorNumber]) + require.NoError(tm.t, err) + + randomBytes := make([]byte, s.Size()) + _, err = io.ReadFull(rand.Reader, randomBytes) + require.NoError(tm.t, err) + + updatePath := requireTempFile(tm.t, bytes.NewReader(randomBytes), uint64(s.Size())) + require.NoError(tm.t, updatePath.Close()) + updateDir := filepath.Join(tm.t.TempDir(), fmt.Sprintf("update-%d", sectorNumber)) + require.NoError(tm.t, os.MkdirAll(updateDir, 0700)) + + newSealed, newUnsealed, err := ffi.SectorUpdate.EncodeInto(updateProofType, updatePath.Name(), updateDir, + tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber], unsealedPath, pieces) + require.NoError(tm.t, err) + + vp, err := ffi.SectorUpdate.GenerateUpdateVanillaProofs(updateProofType, tm.sealedCids[sectorNumber], + newSealed, newUnsealed, updatePath.Name(), updateDir, tm.sealedSectorPaths[sectorNumber], + tm.cacheDirPaths[sectorNumber]) + require.NoError(tm.t, err) + + snapProof, err := ffi.SectorUpdate.GenerateUpdateProofWithVanilla(updateProofType, tm.sealedCids[sectorNumber], + newSealed, newUnsealed, vp) + require.NoError(tm.t, err) + + // submit proof + var manifest []miner14.PieceActivationManifest + for _, piece := range pieces { + manifest = append(manifest, miner14.PieceActivationManifest{ + CID: piece.PieceCID, + Size: piece.Size, + }) + } + + head, err := tm.FullNode.ChainHead(ctx) + require.NoError(tm.t, err) + + sl, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) + require.NoError(tm.t, err) + + params := &miner14.ProveReplicaUpdates3Params{ + SectorUpdates: []miner14.SectorUpdateManifest{ + { + Sector: sectorNumber, + Deadline: sl.Deadline, + Partition: sl.Partition, + NewSealedCID: newSealed, + Pieces: manifest, + }, + }, + SectorProofs: [][]byte{snapProof}, + UpdateProofsType: updateProofType, + RequireActivationSuccess: true, + RequireNotificationSuccess: false, + } + + r, err := tm.submitMessage(ctx, params, 1, builtin.MethodsMiner.ProveReplicaUpdates3) + require.NoError(tm.t, err) + require.True(tm.t, r.Receipt.ExitCode.IsSuccess()) +} + +func (tm *TestUnmanagedMiner) OnboardCCSectorWithMockProofs(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, + context.CancelFunc) { + req := require.New(tm.t) + sectorNumber := tm.currentSectorNum + tm.currentSectorNum++ + + // Step 1: Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality) + preCommitSealRand := tm.waitPreCommitSealRandomness(ctx, sectorNumber) + + tm.sealedCids[sectorNumber] = cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz") + + // Step 4 : Submit the Pre-Commit to the network + r, err := tm.submitMessage(ctx, &miner14.PreCommitSectorBatchParams2{ + Sectors: []miner14.SectorPreCommitInfo{{ + Expiration: 2880 * 300, + SectorNumber: sectorNumber, + SealProof: TestSpt, + SealedCID: tm.sealedCids[sectorNumber], + SealRandEpoch: preCommitSealRand, + }}, + }, 1, builtin.MethodsMiner.PreCommitSectorBatch2) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + // Step 5: Generate a ProveCommit for the CC sector + _ = tm.proveCommitWaitSeed(ctx, sectorNumber) + sectorProof := []byte{0xde, 0xad, 0xbe, 0xef} + + // Step 6: Submit the ProveCommit to the network + tm.t.Log("Submitting ProveCommitSector ...") + + r, err = tm.submitMessage(ctx, &miner14.ProveCommitSectors3Params{ + SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber}}, + SectorProofs: [][]byte{sectorProof}, + RequireActivationSuccess: true, + }, 0, builtin.MethodsMiner.ProveCommitSectors3) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + tm.proofType[sectorNumber] = proofType + + respCh := make(chan WindowPostResp, 1) + + wdCtx, cancelF := context.WithCancel(ctx) + go tm.wdPostLoop(wdCtx, sectorNumber, respCh, true, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber]) + + return sectorNumber, respCh, cancelF +} + +func (tm *TestUnmanagedMiner) OnboardCCSectorWithRealProofs(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, + context.CancelFunc) { + req := require.New(tm.t) + sectorNumber := tm.currentSectorNum + tm.currentSectorNum++ + + // --------------------Create pre-commit for the CC sector -> we'll just pre-commit `sector size` worth of 0s for this CC sector + + // Step 1: Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality) + preCommitSealRand := tm.waitPreCommitSealRandomness(ctx, sectorNumber) + + // Step 2: Write empty bytes that we want to seal i.e. create our CC sector + tm.makeAndSaveCCSector(ctx, sectorNumber) + + // Step 3: Generate a Pre-Commit for the CC sector -> this persists the proof on the `TestUnmanagedMiner` Miner State + tm.generatePreCommit(ctx, sectorNumber, preCommitSealRand, proofType, []abi.PieceInfo{}) + + // Step 4 : Submit the Pre-Commit to the network + r, err := tm.submitMessage(ctx, &miner14.PreCommitSectorBatchParams2{ + Sectors: []miner14.SectorPreCommitInfo{{ + Expiration: 2880 * 300, + SectorNumber: sectorNumber, + SealProof: TestSpt, + SealedCID: tm.sealedCids[sectorNumber], + SealRandEpoch: preCommitSealRand, + }}, + }, 1, builtin.MethodsMiner.PreCommitSectorBatch2) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + // Step 5: Generate a ProveCommit for the CC sector + waitSeedRandomness := tm.proveCommitWaitSeed(ctx, sectorNumber) + + proveCommit := tm.generateProveCommit(ctx, sectorNumber, proofType, waitSeedRandomness, []abi.PieceInfo{}) + + // Step 6: Submit the ProveCommit to the network + tm.t.Log("Submitting ProveCommitSector ...") + + r, err = tm.submitMessage(ctx, &miner14.ProveCommitSectors3Params{ + SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber}}, + SectorProofs: [][]byte{proveCommit}, + RequireActivationSuccess: true, + }, 0, builtin.MethodsMiner.ProveCommitSectors3) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + tm.proofType[sectorNumber] = proofType + + respCh := make(chan WindowPostResp, 1) + + wdCtx, cancelF := context.WithCancel(ctx) + go tm.wdPostLoop(wdCtx, sectorNumber, respCh, false, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber]) + + return sectorNumber, respCh, cancelF +} + +func (tm *TestUnmanagedMiner) wdPostLoop(ctx context.Context, sectorNumber abi.SectorNumber, respCh chan WindowPostResp, withMockProofs bool, sealedCid cid.Cid, sealedPath, cacheDir string) { + go func() { + var firstPost bool + + writeRespF := func(respErr error) { + var send WindowPostResp + if respErr == nil { + if firstPost { + return // already reported on our first post, no error to report, don't send anything + } + send.Posted = true + firstPost = true + } else { + if ctx.Err() == nil { + tm.t.Logf("Sector %d: WindowPoSt submission failed: %s", sectorNumber, respErr) + } + send.Error = respErr + } + select { + case respCh <- send: + case <-ctx.Done(): + default: + } + } + + var postCount int + for ctx.Err() == nil { + currentEpoch, nextPost, err := tm.calculateNextPostEpoch(ctx, sectorNumber) + tm.t.Logf("Activating sector %d, next post %d, current epoch %d", sectorNumber, nextPost, currentEpoch) + if err != nil { + writeRespF(err) + return + } + + if nextPost > currentEpoch { + if _, err := tm.FullNode.WaitTillChainOrError(ctx, HeightAtLeast(nextPost)); err != nil { + writeRespF(err) + return + } + } + + err = tm.submitWindowPost(ctx, sectorNumber, withMockProofs, sealedCid, sealedPath, cacheDir) + writeRespF(err) // send an error, or first post, or nothing if no error and this isn't the first post + postCount++ + tm.t.Logf("Sector %d: WindowPoSt #%d submitted", sectorNumber, postCount) + } + }() +} + +func (tm *TestUnmanagedMiner) SubmitPostDispute(ctx context.Context, sectorNumber abi.SectorNumber) error { + tm.t.Logf("Miner %s: Starting dispute submission for sector %d", tm.ActorAddr, sectorNumber) + + head, err := tm.FullNode.ChainHead(ctx) + if err != nil { + return fmt.Errorf("MinerB(%s): failed to get chain head: %w", tm.ActorAddr, err) + } + + sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) + if err != nil { + return fmt.Errorf("MinerB(%s): failed to get sector partition for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, head.Key()) + if err != nil { + return fmt.Errorf("MinerB(%s): failed to get proving deadline for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + disputeEpoch := di.Close + 5 + tm.t.Logf("Miner %s: Sector %d - Waiting %d epochs until epoch %d to submit dispute", tm.ActorAddr, sectorNumber, disputeEpoch-head.Height(), disputeEpoch) + + tm.FullNode.WaitTillChain(ctx, HeightAtLeast(disputeEpoch)) + + tm.t.Logf("Miner %s: Sector %d - Disputing WindowedPoSt to confirm validity at epoch %d", tm.ActorAddr, sectorNumber, disputeEpoch) + + _, err = tm.submitMessage(ctx, &miner14.DisputeWindowedPoStParams{ + Deadline: sp.Deadline, + PoStIndex: 0, + }, 1, builtin.MethodsMiner.DisputeWindowedPoSt) + return err +} + +func (tm *TestUnmanagedMiner) submitWindowPost(ctx context.Context, sectorNumber abi.SectorNumber, withMockProofs bool, sealedCid cid.Cid, sealedPath, cacheDir string) error { + tm.t.Logf("Miner(%s): WindowPoST(%d): Running WindowPoSt ...\n", tm.ActorAddr, sectorNumber) + + head, err := tm.FullNode.ChainHead(ctx) + if err != nil { + return fmt.Errorf("Miner(%s): failed to get chain head: %w", tm.ActorAddr, err) + } + + sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) + if err != nil { + return fmt.Errorf("Miner(%s): failed to get sector partition for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, head.Key()) + if err != nil { + return fmt.Errorf("Miner(%s): failed to get proving deadline for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + tm.t.Logf("Miner(%s): WindowPoST(%d): SectorPartition: %+v, ProvingDeadline: %+v\n", tm.ActorAddr, sectorNumber, sp, di) + if di.Index != sp.Deadline { + return fmt.Errorf("Miner(%s): sector %d is not in the deadline %d, but %d", tm.ActorAddr, sectorNumber, sp.Deadline, di.Index) + } + + var proofBytes []byte + if withMockProofs { + proofBytes = []byte{0xde, 0xad, 0xbe, 0xef} + } else { + proofBytes, err = tm.generateWindowPost(ctx, sectorNumber, sealedCid, sealedPath, cacheDir) + if err != nil { + return fmt.Errorf("Miner(%s): failed to generate window post for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + } + + tm.t.Logf("Miner(%s): WindowedPoSt(%d) Submitting ...\n", tm.ActorAddr, sectorNumber) + + chainRandomnessEpoch := di.Challenge + chainRandomness, err := tm.FullNode.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_PoStChainCommit, chainRandomnessEpoch, + nil, head.Key()) + if err != nil { + return fmt.Errorf("Miner(%s): failed to get chain randomness for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + minerInfo, err := tm.FullNode.StateMinerInfo(ctx, tm.ActorAddr, head.Key()) + if err != nil { + return fmt.Errorf("Miner(%s): failed to get miner info for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + r, err := tm.submitMessage(ctx, &miner14.SubmitWindowedPoStParams{ + ChainCommitEpoch: chainRandomnessEpoch, + ChainCommitRand: chainRandomness, + Deadline: sp.Deadline, + Partitions: []miner14.PoStPartition{{Index: sp.Partition}}, + Proofs: []proof.PoStProof{{PoStProof: minerInfo.WindowPoStProofType, ProofBytes: proofBytes}}, + }, 0, builtin.MethodsMiner.SubmitWindowedPoSt) + if err != nil { + return fmt.Errorf("Miner(%s): failed to submit window post for sector %d: %w", tm.ActorAddr, sectorNumber, err) + } + + if !r.Receipt.ExitCode.IsSuccess() { + return fmt.Errorf("Miner(%s): submitting PoSt for sector %d failed: %s", tm.ActorAddr, sectorNumber, r.Receipt.ExitCode) + } + + tm.t.Logf("Miner(%s): WindowedPoSt(%d) Submitted ...\n", tm.ActorAddr, sectorNumber) + + return nil +} + +func (tm *TestUnmanagedMiner) generateWindowPost( + ctx context.Context, + sectorNumber abi.SectorNumber, + sealedCid cid.Cid, + sealedPath string, + cacheDir string, +) ([]byte, error) { + head, err := tm.FullNode.ChainHead(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get chain head: %w", err) + } + + minerInfo, err := tm.FullNode.StateMinerInfo(ctx, tm.ActorAddr, head.Key()) + if err != nil { + return nil, fmt.Errorf("failed to get miner info: %w", err) + } + + di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, types.EmptyTSK) + if err != nil { + return nil, fmt.Errorf("failed to get proving deadline: %w", err) + } + + minerAddrBytes := new(bytes.Buffer) + if err := tm.ActorAddr.MarshalCBOR(minerAddrBytes); err != nil { + return nil, fmt.Errorf("failed to marshal miner address: %w", err) + } + + rand, err := tm.FullNode.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, minerAddrBytes.Bytes(), head.Key()) + if err != nil { + return nil, fmt.Errorf("failed to get randomness: %w", err) + } + postRand := abi.PoStRandomness(rand) + postRand[31] &= 0x3f // make fr32 compatible + + privateSectorInfo := ffi.PrivateSectorInfo{ + SectorInfo: proof.SectorInfo{ + SealProof: tm.proofType[sectorNumber], + SectorNumber: sectorNumber, + SealedCID: sealedCid, + }, + CacheDirPath: cacheDir, + PoStProofType: minerInfo.WindowPoStProofType, + SealedSectorPath: sealedPath, + } + + actorIdNum, err := address.IDFromAddress(tm.ActorAddr) + if err != nil { + return nil, fmt.Errorf("failed to get actor ID: %w", err) + } + actorId := abi.ActorID(actorIdNum) + + windowProofs, faultySectors, err := ffi.GenerateWindowPoSt(actorId, ffi.NewSortedPrivateSectorInfo(privateSectorInfo), postRand) + if err != nil { + return nil, fmt.Errorf("failed to generate window post: %w", err) + } + if len(faultySectors) > 0 { + return nil, fmt.Errorf("post failed for sectors: %v", faultySectors) + } + if len(windowProofs) != 1 { + return nil, fmt.Errorf("expected 1 proof, got %d", len(windowProofs)) + } + if windowProofs[0].PoStProof != minerInfo.WindowPoStProofType { + return nil, fmt.Errorf("expected proof type %d, got %d", minerInfo.WindowPoStProofType, windowProofs[0].PoStProof) + } + proofBytes := windowProofs[0].ProofBytes + + info := proof.WindowPoStVerifyInfo{ + Randomness: postRand, + Proofs: []proof.PoStProof{{PoStProof: minerInfo.WindowPoStProofType, ProofBytes: proofBytes}}, + ChallengedSectors: []proof.SectorInfo{{SealProof: tm.proofType[sectorNumber], SectorNumber: sectorNumber, SealedCID: sealedCid}}, + Prover: actorId, + } + + verified, err := ffi.VerifyWindowPoSt(info) + if err != nil { + return nil, fmt.Errorf("failed to verify window post: %w", err) + } + if !verified { + return nil, fmt.Errorf("window post verification failed") + } + + return proofBytes, nil +} +func (tm *TestUnmanagedMiner) waitPreCommitSealRandomness(ctx context.Context, sectorNumber abi.SectorNumber) abi.ChainEpoch { + // We want to draw seal randomness from a tipset that has already achieved finality as PreCommits are expensive to re-generate. + // Check if we already have an epoch that is already final and wait for such an epoch if we don't have one. + head, err := tm.FullNode.ChainHead(ctx) + require.NoError(tm.t, err) + + var sealRandEpoch abi.ChainEpoch + if head.Height() > policy.SealRandomnessLookback { + sealRandEpoch = head.Height() - policy.SealRandomnessLookback + } else { + sealRandEpoch = policy.SealRandomnessLookback + tm.t.Logf("Miner %s waiting for at least epoch %d for seal randomness for sector %d (current epoch %d)...", tm.ActorAddr, sealRandEpoch+5, + sectorNumber, head.Height()) + tm.FullNode.WaitTillChain(ctx, HeightAtLeast(sealRandEpoch+5)) + } + + tm.t.Logf("Miner %s using seal randomness from epoch %d for head %d for sector %d", tm.ActorAddr, sealRandEpoch, head.Height(), sectorNumber) + + return sealRandEpoch +} + +// calculateNextPostEpoch calculates the first epoch of the deadline proving window +// that is desired for the given sector for the specified miner. +// This function returns the current epoch and the calculated proving epoch. +func (tm *TestUnmanagedMiner) calculateNextPostEpoch( + ctx context.Context, + sectorNumber abi.SectorNumber, +) (abi.ChainEpoch, abi.ChainEpoch, error) { + // Retrieve the current blockchain head + head, err := tm.FullNode.ChainHead(ctx) + if err != nil { + return 0, 0, fmt.Errorf("failed to get chain head: %w", err) + } + + // Fetch the sector partition for the given sector number + sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) + if err != nil { + return 0, 0, fmt.Errorf("failed to get sector partition: %w", err) + } + + tm.t.Logf("Miner %s: WindowPoST(%d): SectorPartition: %+v", tm.ActorAddr, sectorNumber, sp) + + // Obtain the proving deadline information for the miner + di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, head.Key()) + if err != nil { + return 0, 0, fmt.Errorf("failed to get proving deadline: %w", err) + } + + tm.t.Logf("Miner %s: WindowPoST(%d): ProvingDeadline: %+v", tm.ActorAddr, sectorNumber, di) + + // Calculate the start of the period, adjusting if the current deadline has passed + periodStart := di.PeriodStart + if di.PeriodStart < di.CurrentEpoch && sp.Deadline <= di.Index { + // If the deadline has passed in the current proving period, calculate for the next period + periodStart += di.WPoStProvingPeriod + } + + // Calculate the exact epoch when proving should occur + provingEpoch := periodStart + (di.WPoStProvingPeriod/abi.ChainEpoch(di.WPoStPeriodDeadlines))*abi.ChainEpoch(sp.Deadline) + + tm.t.Logf("Miner %s: WindowPoST(%d): next ProvingEpoch: %d", tm.ActorAddr, sectorNumber, provingEpoch) + + return di.CurrentEpoch, provingEpoch, nil +} + +func (tm *TestUnmanagedMiner) generatePreCommit( + ctx context.Context, + sectorNumber abi.SectorNumber, + sealRandEpoch abi.ChainEpoch, + proofType abi.RegisteredSealProof, + pieceInfo []abi.PieceInfo, +) { + req := require.New(tm.t) + tm.t.Logf("Miner %s: Generating proof type %d PreCommit for sector %d...", tm.ActorAddr, proofType, sectorNumber) + + head, err := tm.FullNode.ChainHead(ctx) + req.NoError(err, "Miner %s: Failed to get chain head for sector %d", tm.ActorAddr, sectorNumber) + + minerAddrBytes := new(bytes.Buffer) + req.NoError(tm.ActorAddr.MarshalCBOR(minerAddrBytes), "Miner %s: Failed to marshal address for sector %d", tm.ActorAddr, sectorNumber) + + rand, err := tm.FullNode.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_SealRandomness, sealRandEpoch, minerAddrBytes.Bytes(), head.Key()) + req.NoError(err, "Miner %s: Failed to get randomness for sector %d", tm.ActorAddr, sectorNumber) + sealTickets := abi.SealRandomness(rand) + + tm.t.Logf("Miner %s: Running proof type %d SealPreCommitPhase1 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + + actorIdNum, err := address.IDFromAddress(tm.ActorAddr) + req.NoError(err, "Miner %s: Failed to get actor ID for sector %d", tm.ActorAddr, sectorNumber) + actorId := abi.ActorID(actorIdNum) + + pc1, err := ffi.SealPreCommitPhase1( + proofType, + tm.cacheDirPaths[sectorNumber], + tm.unsealedSectorPaths[sectorNumber], + tm.sealedSectorPaths[sectorNumber], + sectorNumber, + actorId, + sealTickets, + pieceInfo, + ) + req.NoError(err, "Miner %s: SealPreCommitPhase1 failed for sector %d", tm.ActorAddr, sectorNumber) + req.NotNil(pc1, "Miner %s: SealPreCommitPhase1 returned nil for sector %d", tm.ActorAddr, sectorNumber) + + tm.t.Logf("Miner %s: Running proof type %d SealPreCommitPhase2 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + + sealedCid, unsealedCid, err := ffi.SealPreCommitPhase2( + pc1, + tm.cacheDirPaths[sectorNumber], + tm.sealedSectorPaths[sectorNumber], + ) + req.NoError(err, "Miner %s: SealPreCommitPhase2 failed for sector %d", tm.ActorAddr, sectorNumber) + + tm.t.Logf("Miner %s: Unsealed CID for sector %d: %s", tm.ActorAddr, sectorNumber, unsealedCid) + tm.t.Logf("Miner %s: Sealed CID for sector %d: %s", tm.ActorAddr, sectorNumber, sealedCid) + + tm.sealTickets[sectorNumber] = sealTickets + tm.sealedCids[sectorNumber] = sealedCid + tm.unsealedCids[sectorNumber] = unsealedCid +} + +func (tm *TestUnmanagedMiner) proveCommitWaitSeed(ctx context.Context, sectorNumber abi.SectorNumber) abi.InteractiveSealRandomness { + req := require.New(tm.t) + head, err := tm.FullNode.ChainHead(ctx) + req.NoError(err) + + tm.t.Logf("Miner %s: Fetching pre-commit info for sector %d...", tm.ActorAddr, sectorNumber) + preCommitInfo, err := tm.FullNode.StateSectorPreCommitInfo(ctx, tm.ActorAddr, sectorNumber, head.Key()) + req.NoError(err) + seedRandomnessHeight := preCommitInfo.PreCommitEpoch + policy.GetPreCommitChallengeDelay() + + tm.t.Logf("Miner %s: Waiting %d epochs for seed randomness at epoch %d (current epoch %d) for sector %d...", tm.ActorAddr, seedRandomnessHeight-head.Height(), seedRandomnessHeight, head.Height(), sectorNumber) + tm.FullNode.WaitTillChain(ctx, HeightAtLeast(seedRandomnessHeight+5)) + + minerAddrBytes := new(bytes.Buffer) + req.NoError(tm.ActorAddr.MarshalCBOR(minerAddrBytes)) + + head, err = tm.FullNode.ChainHead(ctx) + req.NoError(err) + + tm.t.Logf("Miner %s: Fetching seed randomness for sector %d...", tm.ActorAddr, sectorNumber) + rand, err := tm.FullNode.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, seedRandomnessHeight, minerAddrBytes.Bytes(), head.Key()) + req.NoError(err) + seedRandomness := abi.InteractiveSealRandomness(rand) + + tm.t.Logf("Miner %s: Obtained seed randomness for sector %d: %x", tm.ActorAddr, sectorNumber, seedRandomness) + return seedRandomness +} + +func (tm *TestUnmanagedMiner) generateProveCommit( + ctx context.Context, + sectorNumber abi.SectorNumber, + proofType abi.RegisteredSealProof, + seedRandomness abi.InteractiveSealRandomness, + pieces []abi.PieceInfo, +) []byte { + tm.t.Logf("Miner %s: Generating proof type %d Sector Proof for sector %d...", tm.ActorAddr, proofType, sectorNumber) + req := require.New(tm.t) + + actorIdNum, err := address.IDFromAddress(tm.ActorAddr) + req.NoError(err) + actorId := abi.ActorID(actorIdNum) + + tm.t.Logf("Miner %s: Running proof type %d SealCommitPhase1 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + + scp1, err := ffi.SealCommitPhase1( + proofType, + tm.sealedCids[sectorNumber], + tm.unsealedCids[sectorNumber], + tm.cacheDirPaths[sectorNumber], + tm.sealedSectorPaths[sectorNumber], + sectorNumber, + actorId, + tm.sealTickets[sectorNumber], + seedRandomness, + pieces, + ) + req.NoError(err) + + tm.t.Logf("Miner %s: Running proof type %d SealCommitPhase2 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + + sectorProof, err := ffi.SealCommitPhase2(scp1, sectorNumber, actorId) + req.NoError(err) + + tm.t.Logf("Miner %s: Got proof type %d sector proof of length %d for sector %d", tm.ActorAddr, proofType, len(sectorProof), sectorNumber) + + return sectorProof +} + +func (tm *TestUnmanagedMiner) submitMessage( + ctx context.Context, + params cbg.CBORMarshaler, + value uint64, + method abi.MethodNum, +) (*api.MsgLookup, error) { + enc, aerr := actors.SerializeParams(params) + if aerr != nil { + return nil, aerr + } + + tm.t.Logf("Submitting message for miner %s with method number %d", tm.ActorAddr, method) + + m, err := tm.FullNode.MpoolPushMessage(ctx, &types.Message{ + To: tm.ActorAddr, + From: tm.OwnerKey.Address, + Value: types.FromFil(value), + Method: method, + Params: enc, + }, nil) + if err != nil { + return nil, err + } + + tm.t.Logf("Pushed message with CID: %s for miner %s", m.Cid(), tm.ActorAddr) + + msg, err := tm.FullNode.StateWaitMsg(ctx, m.Cid(), 2, api.LookbackNoLimit, true) + if err != nil { + return nil, err + } + + tm.t.Logf("Message with CID: %s has been confirmed on-chain for miner %s", m.Cid(), tm.ActorAddr) + + return msg, nil +} + +func requireTempFile(t *testing.T, fileContentsReader io.Reader, size uint64) *os.File { + // Create a temporary file + tempFile, err := os.CreateTemp(t.TempDir(), "") + require.NoError(t, err) + + // Copy contents from the reader to the temporary file + bytesCopied, err := io.Copy(tempFile, fileContentsReader) + require.NoError(t, err) + + // Ensure the expected size matches the copied size + require.EqualValues(t, size, bytesCopied) + + // Synchronize the file's content to disk + require.NoError(t, tempFile.Sync()) + + // Reset the file pointer to the beginning of the file + _, err = tempFile.Seek(0, io.SeekStart) + require.NoError(t, err) + + return tempFile +} diff --git a/itests/manual_onboarding_test.go b/itests/manual_onboarding_test.go new file mode 100644 index 000000000..f10c8b7c1 --- /dev/null +++ b/itests/manual_onboarding_test.go @@ -0,0 +1,174 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/itests/kit" +) + +const defaultSectorSize = abi.SectorSize(2 << 10) // 2KiB + +// Manually onboard CC sectors, bypassing lotus-miner onboarding pathways +func TestManualSectorOnboarding(t *testing.T) { + req := require.New(t) + + for _, withMockProofs := range []bool{true, false} { + testName := "WithRealProofs" + if withMockProofs { + testName = "WithMockProofs" + } + t.Run(testName, func(t *testing.T) { + if testName == "WithRealProofs" { + kit.Expensive(t) + } + kit.QuietMiningLogs() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var ( + // need to pick a balance value so that the test is not racy on CI by running through it's WindowPostDeadlines too fast + blocktime = 2 * time.Millisecond + client kit.TestFullNode + minerA kit.TestMiner // A is a standard genesis miner + ) + + // Setup and begin mining with a single miner (A) + // Miner A will only be a genesis Miner with power allocated in the genesis block and will not onboard any sectors from here on + kitOpts := []kit.EnsembleOpt{} + if withMockProofs { + kitOpts = append(kitOpts, kit.MockProofs()) + } + ens := kit.NewEnsemble(t, kitOpts...). + FullNode(&client, kit.SectorSize(defaultSectorSize)). + // preseal more than the default number of sectors to ensure that the genesis miner has power + // because our unmanaged miners won't produce blocks so we may get null rounds + Miner(&minerA, &client, kit.PresealSectors(5), kit.SectorSize(defaultSectorSize), kit.WithAllSubsystems()). + Start(). + InterconnectAll() + blockMiners := ens.BeginMiningMustPost(blocktime) + req.Len(blockMiners, 1) + blockMiner := blockMiners[0] + + // Instantiate MinerB to manually handle sector onboarding and power acquisition through sector activation. + // Unlike other miners managed by the Lotus Miner storage infrastructure, MinerB operates independently, + // performing all related tasks manually. Managed by the TestKit, MinerB has the capability to utilize actual proofs + // for the processes of sector onboarding and activation. + nodeOpts := []kit.NodeOpt{kit.SectorSize(defaultSectorSize), kit.OwnerAddr(client.DefaultKey)} + minerB, ens := ens.UnmanagedMiner(&client, nodeOpts...) + // MinerC is similar to MinerB, but onboards pieces instead of a pure CC sector + minerC, ens := ens.UnmanagedMiner(&client, nodeOpts...) + + ens.Start() + + build.Clock.Sleep(time.Second) + + t.Log("Checking initial power ...") + + // Miner A should have power as it has already onboarded sectors in the genesis block + head, err := client.ChainHead(ctx) + req.NoError(err) + p, err := client.StateMinerPower(ctx, minerA.ActorAddr, head.Key()) + req.NoError(err) + t.Logf("MinerA RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String()) + + // Miner B should have no power as it has yet to onboard and activate any sectors + minerB.AssertNoPower(ctx) + + // Miner C should have no power as it has yet to onboard and activate any sectors + minerC.AssertNoPower(ctx) + + // ---- Miner B onboards a CC sector + var bSectorNum abi.SectorNumber + var bRespCh chan kit.WindowPostResp + var bWdPostCancelF context.CancelFunc + + if withMockProofs { + bSectorNum, bRespCh, bWdPostCancelF = minerB.OnboardCCSectorWithMockProofs(ctx, kit.TestSpt) + } else { + bSectorNum, bRespCh, bWdPostCancelF = minerB.OnboardCCSectorWithRealProofs(ctx, kit.TestSpt) + } + // Miner B should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it + minerB.AssertNoPower(ctx) + // Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector + blockMiner.WatchMinerForPost(minerB.ActorAddr) + + // --- Miner C onboards sector with data/pieces + var cSectorNum abi.SectorNumber + var cRespCh chan kit.WindowPostResp + + if withMockProofs { + cSectorNum, cRespCh, _ = minerC.OnboardSectorWithPiecesAndMockProofs(ctx, kit.TestSpt) + } else { + cSectorNum, cRespCh, _ = minerC.OnboardSectorWithPiecesAndRealProofs(ctx, kit.TestSpt) + } + // Miner C should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it + minerC.AssertNoPower(ctx) + // Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector + blockMiner.WatchMinerForPost(minerC.ActorAddr) + + // Wait till both miners' sectors have had their first post and are activated and check that this is reflected in miner power + waitTillActivatedAndAssertPower(ctx, t, minerB, bRespCh, bSectorNum, uint64(defaultSectorSize), withMockProofs) + waitTillActivatedAndAssertPower(ctx, t, minerC, cRespCh, cSectorNum, uint64(defaultSectorSize), withMockProofs) + + // Miner B has activated the CC sector -> upgrade it with snapdeals + // Note: We can't activate a sector with mock proofs as the WdPost is successfully disputed and so no point + // in snapping it as snapping is only for activated sectors + if !withMockProofs { + minerB.SnapDealWithRealProofs(ctx, kit.TestSpt, bSectorNum) + // cancel the WdPost for the CC sector as the corresponding CommR is no longer valid + bWdPostCancelF() + } + }) + } +} + +func waitTillActivatedAndAssertPower(ctx context.Context, t *testing.T, miner *kit.TestUnmanagedMiner, respCh chan kit.WindowPostResp, sector abi.SectorNumber, + sectorSize uint64, withMockProofs bool) { + req := require.New(t) + // wait till sector is activated + select { + case resp := <-respCh: + req.NoError(resp.Error) + req.True(resp.Posted) + case <-ctx.Done(): + t.Fatal("timed out waiting for sector activation") + } + + // Fetch on-chain sector properties + head, err := miner.FullNode.ChainHead(ctx) + req.NoError(err) + + soi, err := miner.FullNode.StateSectorGetInfo(ctx, miner.ActorAddr, sector, head.Key()) + req.NoError(err) + t.Logf("Miner %s SectorOnChainInfo %d: %+v", miner.ActorAddr.String(), sector, soi) + + _ = miner.FullNode.WaitTillChain(ctx, kit.HeightAtLeast(head.Height()+5)) + + t.Log("Checking power after PoSt ...") + + // Miner B should now have power + miner.AssertPower(ctx, sectorSize, sectorSize) + + if withMockProofs { + // WindowPost Dispute should succeed as we are using mock proofs + err := miner.SubmitPostDispute(ctx, sector) + require.NoError(t, err) + } else { + // WindowPost Dispute should fail + assertDisputeFails(ctx, t, miner, sector) + } +} + +func assertDisputeFails(ctx context.Context, t *testing.T, miner *kit.TestUnmanagedMiner, sector abi.SectorNumber) { + err := miner.SubmitPostDispute(ctx, sector) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to dispute valid post") + require.Contains(t, err.Error(), "(RetCode=16)") +}