2024-01-25 14:15:55 +00:00
|
|
|
package itests
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"crypto/rand"
|
2024-01-30 10:02:44 +00:00
|
|
|
"fmt"
|
|
|
|
"strings"
|
2024-01-25 14:15:55 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ipfs/go-cid"
|
2024-01-30 10:02:44 +00:00
|
|
|
"github.com/ipld/go-ipld-prime"
|
|
|
|
"github.com/ipld/go-ipld-prime/codec/dagcbor"
|
|
|
|
"github.com/ipld/go-ipld-prime/codec/dagjson"
|
|
|
|
"github.com/ipld/go-ipld-prime/node/basicnode"
|
|
|
|
"github.com/ipld/go-ipld-prime/node/bindnode"
|
2024-01-25 14:15:55 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2024-01-30 10:02:44 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
2024-01-25 14:15:55 +00:00
|
|
|
cborutil "github.com/filecoin-project/go-cbor-util"
|
|
|
|
"github.com/filecoin-project/go-commp-utils/nonffi"
|
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
|
|
|
"github.com/filecoin-project/go-state-types/big"
|
2024-01-30 10:02:44 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/builtin"
|
|
|
|
minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner"
|
|
|
|
verifregtypes13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg"
|
|
|
|
datacap2 "github.com/filecoin-project/go-state-types/builtin/v9/datacap"
|
2024-01-25 14:15:55 +00:00
|
|
|
market2 "github.com/filecoin-project/go-state-types/builtin/v9/market"
|
2024-01-30 10:02:44 +00:00
|
|
|
verifregtypes9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
2024-01-25 14:15:55 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/exitcode"
|
|
|
|
"github.com/filecoin-project/go-state-types/network"
|
|
|
|
|
2024-01-30 10:02:44 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
|
|
|
lapi "github.com/filecoin-project/lotus/api"
|
|
|
|
"github.com/filecoin-project/lotus/api/v1api"
|
|
|
|
"github.com/filecoin-project/lotus/chain/actors"
|
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin/datacap"
|
2024-01-25 14:15:55 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
|
|
|
minertypes "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
2024-01-30 10:02:44 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
|
2024-01-25 14:15:55 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
|
|
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2024-01-30 10:02:44 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/wallet/key"
|
2024-01-25 14:15:55 +00:00
|
|
|
"github.com/filecoin-project/lotus/itests/kit"
|
|
|
|
"github.com/filecoin-project/lotus/lib/must"
|
|
|
|
"github.com/filecoin-project/lotus/node/config"
|
|
|
|
"github.com/filecoin-project/lotus/storage/pipeline/piece"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestActors13Migration(t *testing.T) {
|
|
|
|
|
|
|
|
var (
|
|
|
|
blocktime = 2 * time.Millisecond
|
|
|
|
ctx = context.Background()
|
|
|
|
)
|
|
|
|
client, _, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.UpgradeSchedule(stmgr.Upgrade{
|
|
|
|
Network: network.Version21,
|
|
|
|
Height: -1,
|
|
|
|
}, stmgr.Upgrade{
|
|
|
|
Network: network.Version22,
|
|
|
|
Height: 10,
|
|
|
|
Migration: filcns.UpgradeActorsV13,
|
|
|
|
}))
|
|
|
|
ens.InterconnectAll().BeginMiningMustPost(blocktime)
|
|
|
|
|
|
|
|
// mine until 15
|
|
|
|
client.WaitTillChain(ctx, kit.HeightAtLeast(15))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestOnboardRawPiece(t *testing.T) {
|
|
|
|
kit.QuietMiningLogs()
|
|
|
|
|
|
|
|
var (
|
|
|
|
blocktime = 2 * time.Millisecond
|
|
|
|
ctx = context.Background()
|
|
|
|
)
|
|
|
|
|
|
|
|
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC())
|
|
|
|
ens.InterconnectAll().BeginMiningMustPost(blocktime)
|
|
|
|
|
|
|
|
pieceSize := abi.PaddedPieceSize(2048).Unpadded()
|
|
|
|
pieceData := make([]byte, pieceSize)
|
|
|
|
_, _ = rand.Read(pieceData)
|
|
|
|
|
|
|
|
dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
head, err := client.ChainHead(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{
|
|
|
|
PublishCid: nil,
|
|
|
|
DealID: 0,
|
|
|
|
DealProposal: nil,
|
|
|
|
DealSchedule: piece.DealSchedule{
|
|
|
|
StartEpoch: head.Height() + 2880*2,
|
|
|
|
EndEpoch: head.Height() + 2880*400,
|
|
|
|
},
|
|
|
|
KeepUnsealed: true,
|
|
|
|
PieceActivationManifest: &minertypes.PieceActivationManifest{
|
|
|
|
CID: dc.PieceCID,
|
|
|
|
Size: dc.Size,
|
|
|
|
VerifiedAllocationKey: nil,
|
|
|
|
Notify: nil,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// wait for sector to commit
|
|
|
|
|
|
|
|
// wait for sector to commit and enter proving state
|
|
|
|
toCheck := map[abi.SectorNumber]struct{}{
|
|
|
|
so.Sector: {},
|
|
|
|
}
|
|
|
|
|
|
|
|
miner.WaitSectorsProving(ctx, toCheck)
|
|
|
|
|
|
|
|
si, err := miner.SectorsStatus(ctx, so.Sector, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, dc.PieceCID, *si.CommD)
|
|
|
|
}
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
func TestOnboardRawPieceVerified_WithActorEvents(t *testing.T) {
|
2024-01-30 10:02:44 +00:00
|
|
|
kit.QuietMiningLogs()
|
|
|
|
|
|
|
|
var (
|
|
|
|
blocktime = 2 * time.Millisecond
|
|
|
|
ctx = context.Background()
|
|
|
|
)
|
|
|
|
|
|
|
|
rootKey, err := key.GenerateKey(types.KTSecp256k1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
verifier1Key, err := key.GenerateKey(types.KTSecp256k1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
verifiedClientKey, err := key.GenerateKey(types.KTBLS)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
bal, err := types.ParseFIL("100fil")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(),
|
|
|
|
kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())),
|
|
|
|
kit.Account(verifier1Key, abi.NewTokenAmount(bal.Int64())),
|
|
|
|
kit.Account(verifiedClientKey, abi.NewTokenAmount(bal.Int64())),
|
|
|
|
)
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
/* --- Setup subscription channels for ActorEvents --- */
|
|
|
|
|
|
|
|
// subscribe only to miner's actor events
|
2024-02-07 08:17:46 +00:00
|
|
|
minerEvtsChan, err := miner.FullNode.SubscribeActorEvents(ctx, &types.SubActorEventFilter{
|
2024-01-30 10:02:44 +00:00
|
|
|
Filter: types.ActorEventFilter{
|
2024-02-07 08:17:46 +00:00
|
|
|
Addresses: []address.Address{miner.ActorAddr},
|
2024-01-30 10:02:44 +00:00
|
|
|
},
|
|
|
|
Prefill: true,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
// subscribe only to sector-activated events
|
|
|
|
sectorActivatedCbor := stringToEventKey(t, "sector-activated")
|
2024-02-07 08:17:46 +00:00
|
|
|
sectorActivatedEvtsCh, err := miner.FullNode.SubscribeActorEvents(ctx, &types.SubActorEventFilter{
|
|
|
|
Filter: types.ActorEventFilter{
|
|
|
|
Fields: map[string][]types.ActorEventBlock{
|
|
|
|
"$type": {
|
|
|
|
{Codec: 0x51, Value: sectorActivatedCbor},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Prefill: true,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2024-01-30 10:02:44 +00:00
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
/* --- Start mining --- */
|
|
|
|
|
2024-01-30 10:02:44 +00:00
|
|
|
ens.InterconnectAll().BeginMiningMustPost(blocktime)
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
minerId, err := address.IDFromAddress(miner.ActorAddr)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2024-01-30 10:02:44 +00:00
|
|
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
|
|
|
sl, err := miner.SectorsListNonGenesis(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, sl, 1, "expected 1 sector")
|
|
|
|
|
|
|
|
snum := sl[0]
|
|
|
|
|
|
|
|
maddr, err := miner.ActorAddress(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
client.WaitForSectorActive(ctx, t, snum, maddr)
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
/* --- Prepare piece for onboarding --- */
|
|
|
|
|
2024-01-30 10:02:44 +00:00
|
|
|
pieceSize := abi.PaddedPieceSize(2048).Unpadded()
|
|
|
|
pieceData := make([]byte, pieceSize)
|
|
|
|
_, _ = rand.Read(pieceData)
|
|
|
|
|
|
|
|
dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
/* --- Setup verified registry and client allocator --- */
|
|
|
|
|
2024-01-30 10:02:44 +00:00
|
|
|
// get VRH
|
|
|
|
vrh, err := client.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
|
|
|
|
fmt.Println(vrh.String())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// import the root key.
|
|
|
|
rootAddr, err := client.WalletImport(ctx, &rootKey.KeyInfo)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// import the verifiers' keys.
|
|
|
|
verifier1Addr, err := client.WalletImport(ctx, &verifier1Key.KeyInfo)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// import the verified client's key.
|
|
|
|
verifiedClientAddr, err := client.WalletImport(ctx, &verifiedClientKey.KeyInfo)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// make the 2 verifiers
|
|
|
|
|
|
|
|
mkVerifier(ctx, t, client.FullNode.(*api.FullNodeStruct), rootAddr, verifier1Addr)
|
|
|
|
|
|
|
|
// assign datacap to a client
|
|
|
|
initialDatacap := big.NewInt(10000)
|
|
|
|
|
|
|
|
params, err := actors.SerializeParams(&verifregtypes13.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: initialDatacap})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
msg := &types.Message{
|
|
|
|
From: verifier1Addr,
|
|
|
|
To: verifreg.Address,
|
|
|
|
Method: verifreg.Methods.AddVerifiedClient,
|
|
|
|
Params: params,
|
|
|
|
Value: big.Zero(),
|
|
|
|
}
|
|
|
|
|
|
|
|
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
res, err := client.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
/* --- Allocate datacap for the piece by the verified client --- */
|
2024-01-30 10:02:44 +00:00
|
|
|
|
|
|
|
allocationRequest := verifregtypes13.AllocationRequest{
|
|
|
|
Provider: abi.ActorID(minerId),
|
|
|
|
Data: dc.PieceCID,
|
|
|
|
Size: dc.Size,
|
|
|
|
TermMin: verifregtypes13.MinimumVerifiedAllocationTerm,
|
|
|
|
TermMax: verifregtypes13.MaximumVerifiedAllocationTerm,
|
|
|
|
Expiration: verifregtypes13.MaximumVerifiedAllocationExpiration,
|
|
|
|
}
|
|
|
|
|
|
|
|
allocationRequests := verifregtypes13.AllocationRequests{
|
|
|
|
Allocations: []verifregtypes13.AllocationRequest{allocationRequest},
|
|
|
|
}
|
|
|
|
|
|
|
|
receiverParams, err := actors.SerializeParams(&allocationRequests)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
transferParams, err := actors.SerializeParams(&datacap2.TransferParams{
|
|
|
|
To: builtin.VerifiedRegistryActorAddr,
|
|
|
|
Amount: big.Mul(big.NewInt(int64(dc.Size)), builtin.TokenPrecision),
|
|
|
|
OperatorData: receiverParams,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
msg = &types.Message{
|
|
|
|
To: builtin.DatacapActorAddr,
|
|
|
|
From: verifiedClientAddr,
|
|
|
|
Method: datacap.Methods.TransferExported,
|
|
|
|
Params: transferParams,
|
|
|
|
Value: big.Zero(),
|
|
|
|
}
|
|
|
|
|
|
|
|
sm, err = client.MpoolPushMessage(ctx, msg, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
res, err = client.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
// check that we have an allocation
|
2024-01-30 10:02:44 +00:00
|
|
|
allocations, err := client.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
2024-02-08 05:00:22 +00:00
|
|
|
require.Len(t, allocations, 1) // allocation waiting to be claimed
|
2024-01-30 10:02:44 +00:00
|
|
|
|
|
|
|
var allocationId verifregtypes13.AllocationId
|
|
|
|
var clientId abi.ActorID
|
|
|
|
for key, value := range allocations {
|
|
|
|
allocationId = verifregtypes13.AllocationId(key)
|
|
|
|
clientId = value.Client
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
/* --- Onboard the piece --- */
|
|
|
|
|
2024-01-30 10:02:44 +00:00
|
|
|
head, err := client.ChainHead(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{
|
|
|
|
PublishCid: nil,
|
|
|
|
DealID: 0,
|
|
|
|
DealProposal: nil,
|
|
|
|
DealSchedule: piece.DealSchedule{
|
|
|
|
StartEpoch: head.Height() + 2880*2,
|
|
|
|
EndEpoch: head.Height() + 2880*400,
|
|
|
|
},
|
|
|
|
KeepUnsealed: true,
|
|
|
|
PieceActivationManifest: &minertypes.PieceActivationManifest{
|
|
|
|
CID: dc.PieceCID,
|
|
|
|
Size: dc.Size,
|
|
|
|
VerifiedAllocationKey: &minertypes13.VerifiedAllocationKey{Client: clientId, ID: allocationId},
|
|
|
|
Notify: nil,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// wait for sector to commit
|
|
|
|
miner.WaitSectorsProving(ctx, map[abi.SectorNumber]struct{}{
|
|
|
|
so.Sector: {},
|
|
|
|
})
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
/* --- Verify that the piece has been onboarded --- */
|
|
|
|
|
2024-01-30 10:02:44 +00:00
|
|
|
si, err := miner.SectorsStatus(ctx, so.Sector, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, dc.PieceCID, *si.CommD)
|
|
|
|
|
|
|
|
require.Equal(t, si.DealWeight, big.Zero())
|
|
|
|
require.Equal(t, si.VerifiedDealWeight, big.Mul(big.NewInt(int64(dc.Size)), big.NewInt(int64(si.Expiration-si.Activation))))
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
// check that we have no more allocations because the allocation has been claimed by the miner for the piece
|
2024-01-30 10:02:44 +00:00
|
|
|
allocations, err = client.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
2024-02-08 05:00:22 +00:00
|
|
|
require.Len(t, allocations, 0) // allocation has been claimed
|
|
|
|
|
|
|
|
/* --- Tests for ActorEvents --- */
|
|
|
|
|
|
|
|
// construct ActorEvents from messages and receipts
|
2024-02-07 08:17:46 +00:00
|
|
|
eventsFromMessages := buildActorEventsFromMessages(ctx, t, miner.FullNode)
|
2024-02-08 05:00:22 +00:00
|
|
|
fmt.Println("Events from message receipts:")
|
|
|
|
printEvents(ctx, t, miner.FullNode, eventsFromMessages)
|
|
|
|
|
|
|
|
require.GreaterOrEqual(t, len(eventsFromMessages), 8) // allow for additional events in the future
|
|
|
|
// check for precisely these events
|
|
|
|
for key, count := range map[string]int{
|
|
|
|
"sector-precommitted": 2, // first to begin mining, second to onboard the piece
|
|
|
|
"sector-activated": 2, // first to begin mining, second to onboard the piece
|
|
|
|
"verifier-balance": 2, // first to setup the verifier, second to allocate datacap to the verified client
|
|
|
|
"allocation": 1, // verified client allocates datacap to the miner
|
|
|
|
"claim": 1, // miner claims the allocation for the piece
|
|
|
|
} {
|
|
|
|
keyBytes := stringToEventKey(t, key)
|
|
|
|
found := 0
|
|
|
|
for _, event := range eventsFromMessages {
|
|
|
|
for _, e := range event.Entries {
|
|
|
|
if e.Key == "$type" && bytes.Equal(e.Value, keyBytes) {
|
|
|
|
found++
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.Equal(t, count, found, "unexpected number of events for %s", key)
|
|
|
|
}
|
|
|
|
|
|
|
|
// verify that we can trace a datacap allocation through to a claim with the events, since this
|
|
|
|
// information is not completely available from the state tree
|
|
|
|
claims := buildClaimsFromEvents(ctx, t, eventsFromMessages, miner.FullNode)
|
|
|
|
for _, claim := range claims {
|
|
|
|
p, err := address.NewIDAddress(uint64(claim.Provider))
|
|
|
|
require.NoError(t, err)
|
|
|
|
c, err := address.NewIDAddress(uint64(claim.Client))
|
|
|
|
require.NoError(t, err)
|
|
|
|
fmt.Printf("Claim<provider=%s, client=%s, data=%s, size=%d, termMin=%d, termMax=%d, termStart=%d, sector=%d>\n",
|
|
|
|
p, c, claim.Data, claim.Size, claim.TermMin, claim.TermMax, claim.TermStart, claim.Sector)
|
|
|
|
}
|
|
|
|
require.Equal(t, []*verifregtypes9.Claim{
|
|
|
|
{
|
|
|
|
Provider: abi.ActorID(minerId),
|
|
|
|
Client: clientId,
|
|
|
|
Data: dc.PieceCID,
|
|
|
|
Size: dc.Size,
|
|
|
|
TermMin: verifregtypes13.MinimumVerifiedAllocationTerm,
|
|
|
|
TermMax: verifregtypes13.MaximumVerifiedAllocationTerm,
|
|
|
|
TermStart: si.Activation,
|
|
|
|
Sector: so.Sector,
|
|
|
|
},
|
|
|
|
}, claims)
|
2024-02-07 08:17:46 +00:00
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
// construct ActorEvents from GetActorEvents API
|
2024-02-07 08:17:46 +00:00
|
|
|
allEvtsFromGetAPI, err := miner.FullNode.GetActorEvents(ctx, &types.ActorEventFilter{
|
|
|
|
FromEpoch: "earliest",
|
|
|
|
ToEpoch: "latest",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2024-02-08 05:00:22 +00:00
|
|
|
fmt.Println("Events from GetActorEvents:")
|
|
|
|
printEvents(ctx, t, miner.FullNode, allEvtsFromGetAPI)
|
|
|
|
// compare events from messages and receipts with events from GetActorEvents API
|
|
|
|
require.Equal(t, eventsFromMessages, allEvtsFromGetAPI)
|
2024-02-07 08:17:46 +00:00
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
// construct ActorEvents from subscription channel for just the miner actor
|
|
|
|
var subMinerEvts []*types.ActorEvent
|
2024-02-07 08:17:46 +00:00
|
|
|
for evt := range minerEvtsChan {
|
2024-02-08 05:00:22 +00:00
|
|
|
subMinerEvts = append(subMinerEvts, evt)
|
2024-02-07 08:17:46 +00:00
|
|
|
if len(subMinerEvts) == 4 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2024-02-08 05:00:22 +00:00
|
|
|
var allMinerEvts []*types.ActorEvent
|
2024-02-07 08:17:46 +00:00
|
|
|
for _, evt := range eventsFromMessages {
|
|
|
|
if evt.EmitterAddr == miner.ActorAddr {
|
|
|
|
allMinerEvts = append(allMinerEvts, evt)
|
|
|
|
}
|
|
|
|
}
|
2024-02-08 05:00:22 +00:00
|
|
|
// compare events from messages and receipts with events from subscription channel
|
|
|
|
require.Equal(t, allMinerEvts, subMinerEvts)
|
2024-02-07 08:17:46 +00:00
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
// construct ActorEvents from subscription channel for just the sector-activated events
|
|
|
|
var prefillSectorActivatedEvts []*types.ActorEvent
|
2024-02-07 08:17:46 +00:00
|
|
|
for evt := range sectorActivatedEvtsCh {
|
2024-02-08 05:00:22 +00:00
|
|
|
prefillSectorActivatedEvts = append(prefillSectorActivatedEvts, evt)
|
2024-02-07 08:17:46 +00:00
|
|
|
if len(prefillSectorActivatedEvts) == 2 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.Len(t, prefillSectorActivatedEvts, 2)
|
2024-02-08 05:00:22 +00:00
|
|
|
var sectorActivatedEvts []*types.ActorEvent
|
2024-02-07 08:17:46 +00:00
|
|
|
for _, evt := range eventsFromMessages {
|
|
|
|
for _, entry := range evt.Entries {
|
|
|
|
if entry.Key == "$type" && bytes.Equal(entry.Value, sectorActivatedCbor) {
|
|
|
|
sectorActivatedEvts = append(sectorActivatedEvts, evt)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-02-08 05:00:22 +00:00
|
|
|
// compare events from messages and receipts with events from subscription channel
|
|
|
|
require.Equal(t, sectorActivatedEvts, prefillSectorActivatedEvts)
|
2024-01-30 10:02:44 +00:00
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
// construct ActorEvents from subscription channel for all actor events
|
2024-02-07 08:17:46 +00:00
|
|
|
allEvtsCh, err := miner.FullNode.SubscribeActorEvents(ctx, &types.SubActorEventFilter{
|
|
|
|
Filter: types.ActorEventFilter{
|
|
|
|
FromEpoch: "earliest",
|
|
|
|
ToEpoch: "latest",
|
|
|
|
},
|
|
|
|
Prefill: true,
|
2024-01-30 10:02:44 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2024-02-08 05:00:22 +00:00
|
|
|
var prefillEvts []*types.ActorEvent
|
2024-02-07 08:17:46 +00:00
|
|
|
for evt := range allEvtsCh {
|
2024-02-08 05:00:22 +00:00
|
|
|
prefillEvts = append(prefillEvts, evt)
|
2024-02-07 08:17:46 +00:00
|
|
|
if len(prefillEvts) == len(eventsFromMessages) {
|
|
|
|
break
|
|
|
|
}
|
2024-01-30 10:02:44 +00:00
|
|
|
}
|
2024-02-08 05:00:22 +00:00
|
|
|
// compare events from messages and receipts with events from subscription channel
|
|
|
|
require.Equal(t, eventsFromMessages, prefillEvts)
|
2024-02-07 08:17:46 +00:00
|
|
|
}
|
2024-01-30 10:02:44 +00:00
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
func buildClaimsFromEvents(ctx context.Context, t *testing.T, eventsFromMessages []*types.ActorEvent, node v1api.FullNode) []*verifregtypes9.Claim {
|
|
|
|
claimKeyCbor := stringToEventKey(t, "claim")
|
|
|
|
claims := make([]*verifregtypes9.Claim, 0)
|
|
|
|
for _, event := range eventsFromMessages {
|
|
|
|
var isClaim bool
|
|
|
|
var claimId int64 = -1
|
|
|
|
var providerId int64 = -1
|
|
|
|
for _, e := range event.Entries {
|
|
|
|
if e.Key == "$type" && bytes.Equal(e.Value, claimKeyCbor) {
|
|
|
|
isClaim = true
|
|
|
|
} else if isClaim && e.Key == "id" {
|
|
|
|
nd, err := ipld.DecodeUsingPrototype(e.Value, dagcbor.Decode, bindnode.Prototype((*int64)(nil), nil))
|
|
|
|
require.NoError(t, err)
|
|
|
|
claimId = *bindnode.Unwrap(nd).(*int64)
|
|
|
|
} else if isClaim && e.Key == "provider" {
|
|
|
|
nd, err := ipld.DecodeUsingPrototype(e.Value, dagcbor.Decode, bindnode.Prototype((*int64)(nil), nil))
|
|
|
|
require.NoError(t, err)
|
|
|
|
providerId = *bindnode.Unwrap(nd).(*int64)
|
|
|
|
}
|
|
|
|
if isClaim && claimId != -1 && providerId != -1 {
|
|
|
|
provider, err := address.NewIDAddress(uint64(providerId))
|
|
|
|
require.NoError(t, err)
|
|
|
|
claim, err := node.StateGetClaim(ctx, provider, verifregtypes9.ClaimId(claimId), types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
claims = append(claims, claim)
|
|
|
|
}
|
|
|
|
}
|
2024-02-07 08:17:46 +00:00
|
|
|
}
|
2024-02-08 05:00:22 +00:00
|
|
|
return claims
|
2024-01-30 10:02:44 +00:00
|
|
|
}
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
func buildActorEventsFromMessages(ctx context.Context, t *testing.T, node v1api.FullNode) []*types.ActorEvent {
|
|
|
|
actorEvents := make([]*types.ActorEvent, 0)
|
2024-01-30 10:02:44 +00:00
|
|
|
|
|
|
|
head, err := node.ChainHead(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
for height := 0; height < int(head.Height()); height++ {
|
|
|
|
// for each tipset
|
|
|
|
ts, err := node.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(height), types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
2024-02-08 05:00:22 +00:00
|
|
|
messages, err := node.ChainGetMessagesInTipset(ctx, ts.Key())
|
|
|
|
require.NoError(t, err)
|
|
|
|
if len(messages) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, m := range messages {
|
|
|
|
receipt, err := node.StateSearchMsg(ctx, types.EmptyTSK, m.Cid, -1, false)
|
2024-01-30 10:02:44 +00:00
|
|
|
require.NoError(t, err)
|
2024-02-08 05:00:22 +00:00
|
|
|
require.NotNil(t, receipt)
|
|
|
|
// receipt
|
|
|
|
if receipt.Receipt.EventsRoot != nil {
|
|
|
|
events, err := node.ChainGetEvents(ctx, *receipt.Receipt.EventsRoot)
|
2024-01-30 10:02:44 +00:00
|
|
|
require.NoError(t, err)
|
2024-02-08 05:00:22 +00:00
|
|
|
for _, evt := range events {
|
|
|
|
// for each event
|
|
|
|
addr, err := address.NewIDAddress(uint64(evt.Emitter))
|
2024-01-30 10:02:44 +00:00
|
|
|
require.NoError(t, err)
|
2024-02-08 05:00:22 +00:00
|
|
|
tsCid, err := ts.Key().Cid()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
actorEvents = append(actorEvents, &types.ActorEvent{
|
|
|
|
Entries: evt.Entries,
|
|
|
|
EmitterAddr: addr,
|
|
|
|
Reverted: false,
|
|
|
|
Height: ts.Height(),
|
|
|
|
TipSetKey: tsCid,
|
|
|
|
MsgCid: m.Cid,
|
|
|
|
})
|
2024-01-30 10:02:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return actorEvents
|
|
|
|
}
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
func printEvents(ctx context.Context, t *testing.T, node v1api.FullNode, events []*types.ActorEvent) {
|
2024-01-30 10:02:44 +00:00
|
|
|
for _, event := range events {
|
|
|
|
entryStrings := []string{
|
|
|
|
fmt.Sprintf("height=%d", event.Height),
|
|
|
|
fmt.Sprintf("msg=%s", event.MsgCid),
|
|
|
|
fmt.Sprintf("emitter=%s", event.EmitterAddr),
|
|
|
|
fmt.Sprintf("reverted=%t", event.Reverted),
|
|
|
|
}
|
|
|
|
for _, e := range event.Entries {
|
|
|
|
// for each event entry
|
2024-02-08 05:00:22 +00:00
|
|
|
entryStrings = append(entryStrings, fmt.Sprintf("%s=%s", e.Key, eventValueToDagJson(t, e.Codec, e.Value)))
|
2024-01-30 10:02:44 +00:00
|
|
|
}
|
2024-02-08 05:00:22 +00:00
|
|
|
fmt.Printf("Event<%s>\n", strings.Join(entryStrings, ", "))
|
2024-01-30 10:02:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-08 05:00:22 +00:00
|
|
|
// stringToEventKey converts a string to a CBOR-encoded blob which matches what we expect from the
|
|
|
|
// actor events.
|
|
|
|
func stringToEventKey(t *testing.T, str string) []byte {
|
|
|
|
dcb, err := ipld.Encode(basicnode.NewString(str), dagcbor.Encode)
|
|
|
|
require.NoError(t, err)
|
|
|
|
return dcb
|
|
|
|
}
|
|
|
|
|
|
|
|
// eventValueToDagJson converts an ActorEvent value to a JSON string for printing.
|
|
|
|
func eventValueToDagJson(t *testing.T, codec uint64, data []byte) string {
|
2024-01-30 10:02:44 +00:00
|
|
|
switch codec {
|
|
|
|
case 0x51:
|
|
|
|
nd, err := ipld.Decode(data, dagcbor.Decode)
|
|
|
|
require.NoError(t, err)
|
|
|
|
byts, err := ipld.Encode(nd, dagjson.Encode)
|
|
|
|
require.NoError(t, err)
|
|
|
|
return string(byts)
|
|
|
|
default:
|
|
|
|
return fmt.Sprintf("0x%x", data)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func mkVerifier(ctx context.Context, t *testing.T, api *api.FullNodeStruct, rootAddr address.Address, addr address.Address) {
|
|
|
|
allowance := big.NewInt(100000000000)
|
|
|
|
params, aerr := actors.SerializeParams(&verifregtypes13.AddVerifierParams{Address: addr, Allowance: allowance})
|
|
|
|
require.NoError(t, aerr)
|
|
|
|
|
|
|
|
msg := &types.Message{
|
|
|
|
From: rootAddr,
|
|
|
|
To: verifreg.Address,
|
|
|
|
Method: verifreg.Methods.AddVerifier,
|
|
|
|
Params: params,
|
|
|
|
Value: big.Zero(),
|
|
|
|
}
|
|
|
|
|
|
|
|
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
|
|
|
require.NoError(t, err, "AddVerifier failed")
|
|
|
|
|
|
|
|
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
|
|
|
|
|
|
|
verifierAllowance, err := api.StateVerifierStatus(ctx, addr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, allowance, *verifierAllowance)
|
|
|
|
}
|
|
|
|
|
2024-01-25 14:15:55 +00:00
|
|
|
func makeMarketDealProposal(t *testing.T, client *kit.TestFullNode, miner *kit.TestMiner, data cid.Cid, ps abi.PaddedPieceSize, start, end abi.ChainEpoch) market2.ClientDealProposal {
|
|
|
|
ca, err := client.WalletDefaultAddress(context.Background())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
ma, err := miner.ActorAddress(context.Background())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
dp := market2.DealProposal{
|
|
|
|
PieceCID: data,
|
|
|
|
PieceSize: ps,
|
|
|
|
VerifiedDeal: false,
|
|
|
|
Client: ca,
|
|
|
|
Provider: ma,
|
|
|
|
Label: must.One(market2.NewLabelFromString("wat")),
|
|
|
|
StartEpoch: start,
|
|
|
|
EndEpoch: end,
|
|
|
|
StoragePricePerEpoch: big.Zero(),
|
|
|
|
ProviderCollateral: abi.TokenAmount{}, // below
|
|
|
|
ClientCollateral: big.Zero(),
|
|
|
|
}
|
|
|
|
|
|
|
|
cb, err := client.StateDealProviderCollateralBounds(context.Background(), dp.PieceSize, dp.VerifiedDeal, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
dp.ProviderCollateral = big.Div(big.Mul(cb.Min, big.NewInt(2)), big.NewInt(2))
|
|
|
|
|
|
|
|
buf, err := cborutil.Dump(&dp)
|
|
|
|
require.NoError(t, err)
|
|
|
|
sig, err := client.WalletSign(context.Background(), ca, buf)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return market2.ClientDealProposal{
|
|
|
|
Proposal: dp,
|
|
|
|
ClientSignature: *sig,
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestOnboardMixedMarketDDO(t *testing.T) {
|
|
|
|
kit.QuietMiningLogs()
|
|
|
|
|
|
|
|
var (
|
|
|
|
blocktime = 2 * time.Millisecond
|
|
|
|
ctx = context.Background()
|
|
|
|
)
|
|
|
|
|
|
|
|
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) {
|
|
|
|
sc.RequireActivationSuccess = true
|
|
|
|
sc.RequireNotificationSuccess = true
|
|
|
|
}))
|
|
|
|
ens.InterconnectAll().BeginMiningMustPost(blocktime)
|
|
|
|
|
|
|
|
maddr, err := miner.ActorAddress(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var pieces []abi.PieceInfo
|
|
|
|
var dealID abi.DealID
|
|
|
|
|
|
|
|
{
|
|
|
|
// market piece
|
|
|
|
pieceSize := abi.PaddedPieceSize(2048 / 2).Unpadded()
|
|
|
|
pieceData := make([]byte, pieceSize)
|
|
|
|
_, _ = rand.Read(pieceData)
|
|
|
|
|
|
|
|
dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData))
|
|
|
|
require.NoError(t, err)
|
|
|
|
pieces = append(pieces, dc)
|
|
|
|
|
|
|
|
head, err := client.ChainHead(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// PSD
|
|
|
|
|
|
|
|
psdParams := market2.PublishStorageDealsParams{
|
|
|
|
Deals: []market2.ClientDealProposal{
|
|
|
|
makeMarketDealProposal(t, client, miner, dc.PieceCID, pieceSize.Padded(), head.Height()+2880*2, head.Height()+2880*400),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
psdMsg := &types.Message{
|
|
|
|
To: market.Address,
|
|
|
|
From: mi.Worker,
|
|
|
|
|
|
|
|
Method: market.Methods.PublishStorageDeals,
|
|
|
|
Params: must.One(cborutil.Dump(&psdParams)),
|
|
|
|
}
|
|
|
|
|
|
|
|
smsg, err := client.MpoolPushMessage(ctx, psdMsg, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
r, err := client.StateWaitMsg(ctx, smsg.Cid(), 1, stmgr.LookbackNoLimit, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
|
|
|
|
|
|
|
|
nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
res, err := market.DecodePublishStorageDealsReturn(r.Receipt.Return, nv)
|
|
|
|
require.NoError(t, err)
|
|
|
|
dealID = must.One(res.DealIDs())[0]
|
|
|
|
|
|
|
|
mcid := smsg.Cid()
|
|
|
|
|
|
|
|
so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{
|
|
|
|
PublishCid: &mcid,
|
|
|
|
DealID: dealID,
|
|
|
|
DealProposal: &psdParams.Deals[0].Proposal,
|
|
|
|
DealSchedule: piece.DealSchedule{
|
|
|
|
StartEpoch: head.Height() + 2880*2,
|
|
|
|
EndEpoch: head.Height() + 2880*400,
|
|
|
|
},
|
|
|
|
PieceActivationManifest: nil,
|
|
|
|
KeepUnsealed: true,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, abi.PaddedPieceSize(0), so.Offset)
|
|
|
|
require.Equal(t, abi.SectorNumber(2), so.Sector)
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// raw ddo piece
|
|
|
|
|
|
|
|
pieceSize := abi.PaddedPieceSize(2048 / 2).Unpadded()
|
|
|
|
pieceData := make([]byte, pieceSize)
|
|
|
|
_, _ = rand.Read(pieceData)
|
|
|
|
|
|
|
|
dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData))
|
|
|
|
require.NoError(t, err)
|
|
|
|
pieces = append(pieces, dc)
|
|
|
|
|
|
|
|
head, err := client.ChainHead(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{
|
|
|
|
PublishCid: nil,
|
|
|
|
DealID: 0,
|
|
|
|
DealProposal: nil,
|
|
|
|
DealSchedule: piece.DealSchedule{
|
|
|
|
StartEpoch: head.Height() + 2880*2,
|
|
|
|
EndEpoch: head.Height() + 2880*400,
|
|
|
|
},
|
|
|
|
KeepUnsealed: false,
|
|
|
|
PieceActivationManifest: &minertypes.PieceActivationManifest{
|
|
|
|
CID: dc.PieceCID,
|
|
|
|
Size: dc.Size,
|
|
|
|
VerifiedAllocationKey: nil,
|
|
|
|
Notify: nil,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, abi.PaddedPieceSize(1024), so.Offset)
|
|
|
|
require.Equal(t, abi.SectorNumber(2), so.Sector)
|
|
|
|
}
|
|
|
|
|
|
|
|
toCheck := map[abi.SectorNumber]struct{}{
|
|
|
|
2: {},
|
|
|
|
}
|
|
|
|
|
|
|
|
miner.WaitSectorsProving(ctx, toCheck)
|
|
|
|
|
|
|
|
expectCommD, err := nonffi.GenerateUnsealedCID(abi.RegisteredSealProof_StackedDrg2KiBV1_1, pieces)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
si, err := miner.SectorsStatus(ctx, 2, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, expectCommD, *si.CommD)
|
|
|
|
|
|
|
|
ds, err := client.StateMarketStorageDeal(ctx, dealID, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NotEqual(t, -1, ds.State.SectorStartEpoch)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestOnboardRawPieceSnap(t *testing.T) {
|
|
|
|
kit.QuietMiningLogs()
|
|
|
|
|
|
|
|
var (
|
|
|
|
blocktime = 2 * time.Millisecond
|
|
|
|
ctx = context.Background()
|
|
|
|
)
|
|
|
|
|
|
|
|
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) {
|
|
|
|
sc.PreferNewSectorsForDeals = false
|
|
|
|
sc.MakeNewSectorForDeals = false
|
|
|
|
sc.MakeCCSectorsAvailable = true
|
|
|
|
sc.AggregateCommits = false
|
|
|
|
}))
|
|
|
|
ens.InterconnectAll().BeginMiningMustPost(blocktime)
|
|
|
|
|
|
|
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
|
|
|
sl, err := miner.SectorsListNonGenesis(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, sl, 1, "expected 1 sector")
|
|
|
|
|
|
|
|
snum := sl[0]
|
|
|
|
|
|
|
|
maddr, err := miner.ActorAddress(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
client.WaitForSectorActive(ctx, t, snum, maddr)
|
|
|
|
|
|
|
|
pieceSize := abi.PaddedPieceSize(2048).Unpadded()
|
|
|
|
pieceData := make([]byte, pieceSize)
|
|
|
|
_, _ = rand.Read(pieceData)
|
|
|
|
|
|
|
|
dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
head, err := client.ChainHead(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{
|
|
|
|
PublishCid: nil,
|
|
|
|
DealID: 0,
|
|
|
|
DealProposal: nil,
|
|
|
|
DealSchedule: piece.DealSchedule{
|
|
|
|
StartEpoch: head.Height() + 2880*2,
|
|
|
|
EndEpoch: head.Height() + 2880*400, // todo set so that it works with the sector
|
|
|
|
},
|
|
|
|
KeepUnsealed: false,
|
|
|
|
PieceActivationManifest: &minertypes.PieceActivationManifest{
|
|
|
|
CID: dc.PieceCID,
|
|
|
|
Size: dc.Size,
|
|
|
|
VerifiedAllocationKey: nil,
|
|
|
|
Notify: nil,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// wait for sector to commit
|
|
|
|
|
|
|
|
// wait for sector to commit and enter proving state
|
|
|
|
toCheck := map[abi.SectorNumber]struct{}{
|
|
|
|
so.Sector: {},
|
|
|
|
}
|
|
|
|
|
|
|
|
miner.WaitSectorsProving(ctx, toCheck)
|
|
|
|
}
|