2020-01-15 20:49:11 +00:00
|
|
|
package sealing
|
|
|
|
|
|
|
|
import (
|
2020-04-06 18:07:26 +00:00
|
|
|
"bytes"
|
2020-01-15 20:49:11 +00:00
|
|
|
"context"
|
2022-09-09 08:32:27 +00:00
|
|
|
"encoding/json"
|
2023-07-19 16:51:59 +00:00
|
|
|
"errors"
|
2022-09-09 08:32:27 +00:00
|
|
|
"io"
|
|
|
|
"net/http"
|
2023-07-19 16:51:59 +00:00
|
|
|
"time"
|
2020-09-17 02:34:13 +00:00
|
|
|
|
2020-09-30 17:32:19 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
2020-04-06 18:07:26 +00:00
|
|
|
"golang.org/x/xerrors"
|
2020-01-15 20:49:11 +00:00
|
|
|
|
2024-01-25 14:15:55 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
|
|
|
cborutil "github.com/filecoin-project/go-cbor-util"
|
2021-08-26 14:22:43 +00:00
|
|
|
"github.com/filecoin-project/go-commp-utils/zerocomm"
|
2020-09-07 03:49:10 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2022-09-06 15:49:29 +00:00
|
|
|
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
2020-09-07 03:49:10 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/big"
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/builtin"
|
2024-01-25 14:15:55 +00:00
|
|
|
miner2 "github.com/filecoin-project/go-state-types/builtin/v13/miner"
|
|
|
|
verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg"
|
|
|
|
"github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
2020-09-07 03:49:10 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/crypto"
|
|
|
|
"github.com/filecoin-project/go-state-types/exitcode"
|
2021-05-17 20:51:29 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/network"
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/proof"
|
2020-04-06 20:23:37 +00:00
|
|
|
"github.com/filecoin-project/go-statemachine"
|
2020-09-30 17:32:19 +00:00
|
|
|
|
2020-12-02 20:47:45 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2022-06-16 11:15:49 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2024-01-25 14:15:55 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
2023-08-29 13:16:05 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
2020-09-30 17:32:19 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
2022-06-16 09:12:33 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2022-06-14 17:41:59 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader"
|
2022-06-17 11:31:05 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
2020-01-15 20:49:11 +00:00
|
|
|
)
|
|
|
|
|
2024-01-25 14:15:55 +00:00
|
|
|
const MinDDONetworkVersion = network.Version22
|
|
|
|
|
2020-06-25 15:46:06 +00:00
|
|
|
var DealSectorPriority = 1024
|
2021-02-04 08:42:02 +00:00
|
|
|
var MaxTicketAge = policy.MaxPreCommitRandomnessLookback
|
2020-06-24 21:55:41 +00:00
|
|
|
|
2022-11-07 16:31:53 +00:00
|
|
|
func (m *Sealing) cleanupAssignedDeals(sector SectorInfo) {
|
2021-02-09 17:44:41 +00:00
|
|
|
m.inputLk.Lock()
|
2022-04-20 21:34:28 +00:00
|
|
|
// make sure we are not accepting deals into this sector
|
2021-02-09 17:44:41 +00:00
|
|
|
for _, c := range m.assignedPieces[m.minerSectorID(sector.SectorNumber)] {
|
|
|
|
pp := m.pendingPieces[c]
|
|
|
|
delete(m.pendingPieces, c)
|
|
|
|
if pp == nil {
|
|
|
|
log.Errorf("nil assigned pending piece %s", c)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// todo: return to the sealing queue (this is extremely unlikely to happen)
|
2021-05-30 13:13:38 +00:00
|
|
|
pp.accepted(sector.SectorNumber, 0, xerrors.Errorf("sector %d entered packing state early", sector.SectorNumber))
|
2021-02-09 17:44:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
delete(m.openSectors, m.minerSectorID(sector.SectorNumber))
|
|
|
|
delete(m.assignedPieces, m.minerSectorID(sector.SectorNumber))
|
|
|
|
m.inputLk.Unlock()
|
2022-11-07 16:31:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) error {
|
|
|
|
m.cleanupAssignedDeals(sector)
|
2021-02-09 17:44:41 +00:00
|
|
|
|
2022-09-14 09:35:07 +00:00
|
|
|
// if this is a snapdeals sector, but it ended up not having any deals, abort the upgrade
|
2024-01-25 14:15:55 +00:00
|
|
|
if sector.State == SnapDealsPacking && !sector.hasData() {
|
2022-09-14 09:35:07 +00:00
|
|
|
return ctx.Send(SectorAbortUpgrade{xerrors.New("sector had no deals")})
|
|
|
|
}
|
|
|
|
|
2020-04-06 22:31:33 +00:00
|
|
|
log.Infow("performing filling up rest of the sector...", "sector", sector.SectorNumber)
|
2020-01-15 20:49:11 +00:00
|
|
|
|
2020-02-08 02:18:32 +00:00
|
|
|
var allocated abi.UnpaddedPieceSize
|
2020-04-08 14:52:20 +00:00
|
|
|
for _, piece := range sector.Pieces {
|
2024-01-25 14:15:55 +00:00
|
|
|
allocated += piece.Piece().Size.Unpadded()
|
2020-01-15 20:49:11 +00:00
|
|
|
}
|
|
|
|
|
2020-11-04 20:29:08 +00:00
|
|
|
ssize, err := sector.SectorType.SectorSize()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ubytes := abi.PaddedPieceSize(ssize).Unpadded()
|
2020-01-15 20:49:11 +00:00
|
|
|
|
|
|
|
if allocated > ubytes {
|
|
|
|
return xerrors.Errorf("too much data in sector: %d > %d", allocated, ubytes)
|
|
|
|
}
|
|
|
|
|
|
|
|
fillerSizes, err := fillersFromRem(ubytes - allocated)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(fillerSizes) > 0 {
|
2020-04-06 22:31:33 +00:00
|
|
|
log.Warnf("Creating %d filler pieces for sector %d", len(fillerSizes), sector.SectorNumber)
|
2020-01-15 20:49:11 +00:00
|
|
|
}
|
|
|
|
|
2021-02-16 16:14:59 +00:00
|
|
|
fillerPieces, err := m.padSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...)
|
2020-01-15 20:49:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("filling up the sector (%v): %w", fillerSizes, err)
|
|
|
|
}
|
|
|
|
|
2020-04-07 21:44:33 +00:00
|
|
|
return ctx.Send(SectorPacked{FillerPieces: fillerPieces})
|
2020-01-15 20:49:11 +00:00
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
func (m *Sealing) padSector(ctx context.Context, sectorID storiface.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) {
|
2021-02-16 16:14:59 +00:00
|
|
|
if len(sizes) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Pledge %d, contains %+v", sectorID, existingPieceSizes)
|
|
|
|
|
|
|
|
out := make([]abi.PieceInfo, len(sizes))
|
|
|
|
for i, size := range sizes {
|
2021-08-26 14:22:43 +00:00
|
|
|
expectCid := zerocomm.ZeroPieceCommitment(size)
|
|
|
|
|
2022-04-12 15:45:34 +00:00
|
|
|
ppi, err := m.sealer.AddPiece(ctx, sectorID, existingPieceSizes, size, nullreader.NewNullReader(size))
|
2021-02-16 16:14:59 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("add piece: %w", err)
|
|
|
|
}
|
2021-09-10 10:13:07 +00:00
|
|
|
if !expectCid.Equals(ppi.PieceCID) {
|
2021-08-26 14:22:43 +00:00
|
|
|
return nil, xerrors.Errorf("got unexpected padding piece CID: expected:%s, got:%s", expectCid, ppi.PieceCID)
|
|
|
|
}
|
2021-02-16 16:14:59 +00:00
|
|
|
|
|
|
|
existingPieceSizes = append(existingPieceSizes, size)
|
|
|
|
|
|
|
|
out[i] = ppi
|
|
|
|
}
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2021-05-20 10:36:00 +00:00
|
|
|
func checkTicketExpired(ticket, head abi.ChainEpoch) bool {
|
|
|
|
return head-ticket > MaxTicketAge // TODO: allow configuring expected seal durations
|
2020-11-16 20:58:42 +00:00
|
|
|
}
|
|
|
|
|
2021-06-30 08:32:44 +00:00
|
|
|
func checkProveCommitExpired(preCommitEpoch, msd abi.ChainEpoch, currEpoch abi.ChainEpoch) bool {
|
|
|
|
return currEpoch > preCommitEpoch+msd
|
|
|
|
}
|
|
|
|
|
2021-07-01 02:53:42 +00:00
|
|
|
func (m *Sealing) getTicket(ctx statemachine.Context, sector SectorInfo) (abi.SealRandomness, abi.ChainEpoch, bool, error) {
|
2022-06-16 11:15:49 +00:00
|
|
|
ts, err := m.Api.ChainHead(ctx.Context())
|
2020-04-07 20:26:43 +00:00
|
|
|
if err != nil {
|
2021-07-01 02:53:42 +00:00
|
|
|
log.Errorf("getTicket: api error, not proceeding: %+v", err)
|
|
|
|
return nil, 0, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// the reason why the StateMinerSectorAllocated function is placed here, if it is outside,
|
|
|
|
// if the MarshalCBOR function and StateSectorPreCommitInfo function return err, it will be executed
|
2022-06-16 09:12:33 +00:00
|
|
|
allocated, aerr := m.Api.StateMinerSectorAllocated(ctx.Context(), m.maddr, sector.SectorNumber, types.EmptyTSK)
|
2021-07-01 02:53:42 +00:00
|
|
|
if aerr != nil {
|
|
|
|
log.Errorf("getTicket: api error, checking if sector is allocated: %+v", aerr)
|
|
|
|
return nil, 0, false, nil
|
2020-06-02 21:45:28 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
ticketEpoch := ts.Height() - policy.SealRandomnessLookback
|
2020-06-02 21:45:28 +00:00
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
if err := m.maddr.MarshalCBOR(buf); err != nil {
|
2021-07-01 02:53:42 +00:00
|
|
|
return nil, 0, allocated, err
|
2020-06-02 21:45:28 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
pci, err := m.Api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, ts.Key())
|
2020-06-02 21:45:28 +00:00
|
|
|
if err != nil {
|
2021-07-01 02:53:42 +00:00
|
|
|
return nil, 0, allocated, xerrors.Errorf("getting precommit info: %w", err)
|
2020-06-02 21:45:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if pci != nil {
|
|
|
|
ticketEpoch = pci.Info.SealRandEpoch
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
nv, err := m.Api.StateNetworkVersion(ctx.Context(), ts.Key())
|
2021-06-30 08:32:44 +00:00
|
|
|
if err != nil {
|
2021-07-01 02:53:42 +00:00
|
|
|
return nil, 0, allocated, xerrors.Errorf("getTicket: StateNetworkVersion: api error, not proceeding: %+v", err)
|
2020-11-23 18:12:54 +00:00
|
|
|
}
|
2021-06-30 08:32:44 +00:00
|
|
|
|
2022-09-06 15:49:29 +00:00
|
|
|
av, err := actorstypes.VersionForNetwork(nv)
|
2021-08-10 17:07:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, allocated, xerrors.Errorf("getTicket: actor version for network error, not proceeding: %w", err)
|
|
|
|
}
|
|
|
|
msd, err := policy.GetMaxProveCommitDuration(av, sector.SectorType)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, allocated, xerrors.Errorf("getTicket: max prove commit duration policy error, not proceeding: %w", err)
|
|
|
|
}
|
2021-06-30 08:32:44 +00:00
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
if checkProveCommitExpired(pci.PreCommitEpoch, msd, ts.Height()) {
|
2021-07-01 02:53:42 +00:00
|
|
|
return nil, 0, allocated, xerrors.Errorf("ticket expired for precommitted sector")
|
2020-11-23 18:12:54 +00:00
|
|
|
}
|
2020-11-16 20:58:42 +00:00
|
|
|
}
|
|
|
|
|
2021-07-02 03:44:46 +00:00
|
|
|
if pci == nil && allocated { // allocated is true, sector precommitted but expired, will SectorCommitFailed or SectorRemove
|
2021-07-02 03:38:04 +00:00
|
|
|
return nil, 0, allocated, xerrors.Errorf("sector %s precommitted but expired", sector.SectorNumber)
|
2020-11-16 20:58:42 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
rand, err := m.Api.StateGetRandomnessFromTickets(ctx.Context(), crypto.DomainSeparationTag_SealRandomness, ticketEpoch, buf.Bytes(), ts.Key())
|
2020-06-02 21:45:28 +00:00
|
|
|
if err != nil {
|
2021-07-01 02:53:42 +00:00
|
|
|
return nil, 0, allocated, err
|
2020-04-07 20:26:43 +00:00
|
|
|
}
|
|
|
|
|
2021-07-01 02:53:42 +00:00
|
|
|
return abi.SealRandomness(rand), ticketEpoch, allocated, nil
|
2020-06-02 21:45:28 +00:00
|
|
|
}
|
|
|
|
|
2020-09-29 07:57:36 +00:00
|
|
|
func (m *Sealing) handleGetTicket(ctx statemachine.Context, sector SectorInfo) error {
|
2021-07-01 02:53:42 +00:00
|
|
|
ticketValue, ticketEpoch, allocated, err := m.getTicket(ctx, sector)
|
2020-09-29 07:57:36 +00:00
|
|
|
if err != nil {
|
2020-10-13 19:35:29 +00:00
|
|
|
if allocated {
|
|
|
|
if sector.CommitMessage != nil {
|
|
|
|
// Some recovery paths with unfortunate timing lead here
|
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector %s is committed but got into the GetTicket state", sector.SectorNumber)})
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Errorf("Sector %s precommitted but expired", sector.SectorNumber)
|
|
|
|
return ctx.Send(SectorRemove{})
|
|
|
|
}
|
|
|
|
|
2020-09-29 07:57:36 +00:00
|
|
|
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("getting ticket failed: %w", err)})
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Send(SectorTicket{
|
|
|
|
TicketValue: ticketValue,
|
|
|
|
TicketEpoch: ticketEpoch,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-07-19 16:51:59 +00:00
|
|
|
var SoftErrRetryWait = 5 * time.Second
|
|
|
|
|
|
|
|
func retrySoftErr(ctx context.Context, cb func() error) error {
|
|
|
|
for {
|
|
|
|
err := cb()
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var cerr storiface.WorkError
|
|
|
|
|
|
|
|
if errors.As(err, &cerr) {
|
|
|
|
switch cerr.ErrCode() {
|
|
|
|
case storiface.ErrTempWorkerRestart:
|
|
|
|
fallthrough
|
|
|
|
case storiface.ErrTempAllocateSpace:
|
|
|
|
// retry
|
2023-10-24 19:12:06 +00:00
|
|
|
log.Errorw("retrying soft error", "err", err, "code", cerr.ErrCode())
|
2023-07-19 16:51:59 +00:00
|
|
|
default:
|
|
|
|
// non-temp error
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-08-23 15:29:18 +00:00
|
|
|
// check if the context got cancelled early
|
2023-07-19 16:51:59 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
2023-08-23 15:29:18 +00:00
|
|
|
// retry
|
2023-07-19 16:51:59 +00:00
|
|
|
time.Sleep(SoftErrRetryWait)
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-02 21:45:28 +00:00
|
|
|
func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error {
|
2022-08-24 19:27:27 +00:00
|
|
|
if err := checkPieces(ctx.Context(), m.maddr, sector.SectorNumber, sector.Pieces, m.Api, false); err != nil { // Sanity check state
|
2020-01-23 15:38:01 +00:00
|
|
|
switch err.(type) {
|
2020-01-23 16:02:55 +00:00
|
|
|
case *ErrApi:
|
2020-04-03 16:54:01 +00:00
|
|
|
log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err)
|
2020-01-23 15:38:01 +00:00
|
|
|
return nil
|
2020-01-23 16:02:55 +00:00
|
|
|
case *ErrInvalidDeals:
|
2020-08-27 19:04:43 +00:00
|
|
|
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
|
2020-08-27 21:14:46 +00:00
|
|
|
return ctx.Send(SectorInvalidDealIDs{Return: RetPreCommit1})
|
2020-01-23 16:02:55 +00:00
|
|
|
case *ErrExpiredDeals: // Probably not much we can do here, maybe re-pack the sector?
|
2020-08-27 20:41:35 +00:00
|
|
|
return ctx.Send(SectorDealsExpired{xerrors.Errorf("expired dealIDs in sector: %w", err)})
|
2020-01-23 15:38:01 +00:00
|
|
|
default:
|
|
|
|
return xerrors.Errorf("checkPieces sanity check error: %w", err)
|
|
|
|
}
|
2020-01-22 19:47:29 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
ts, err := m.Api.ChainHead(ctx.Context())
|
2020-01-15 20:49:11 +00:00
|
|
|
if err != nil {
|
2020-09-29 07:57:36 +00:00
|
|
|
log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
if checkTicketExpired(sector.TicketEpoch, ts.Height()) {
|
|
|
|
pci, err := m.Api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, ts.Key())
|
2021-06-30 08:32:44 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("handlePreCommit1: StateSectorPreCommitInfo: api error, not proceeding: %+v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if pci == nil {
|
|
|
|
return ctx.Send(SectorOldTicket{}) // go get new ticket
|
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
nv, err := m.Api.StateNetworkVersion(ctx.Context(), ts.Key())
|
2021-06-30 08:32:44 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("handlePreCommit1: StateNetworkVersion: api error, not proceeding: %+v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-09-06 15:49:29 +00:00
|
|
|
av, err := actorstypes.VersionForNetwork(nv)
|
2021-08-10 17:07:30 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("handlePreCommit1: VersionForNetwork error, not proceeding: %w", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
msd, err := policy.GetMaxProveCommitDuration(av, sector.SectorType)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("handlePreCommit1: GetMaxProveCommitDuration error, not proceeding: %w", err)
|
|
|
|
return nil
|
|
|
|
}
|
2021-06-30 08:32:44 +00:00
|
|
|
|
|
|
|
// if height > PreCommitEpoch + msd, there is no need to recalculate
|
2022-06-16 11:15:49 +00:00
|
|
|
if checkProveCommitExpired(pci.PreCommitEpoch, msd, ts.Height()) {
|
2021-06-30 08:32:44 +00:00
|
|
|
return ctx.Send(SectorOldTicket{}) // will be removed
|
|
|
|
}
|
2020-01-15 20:49:11 +00:00
|
|
|
}
|
|
|
|
|
2023-07-19 16:51:59 +00:00
|
|
|
var pc1o storiface.PreCommit1Out
|
|
|
|
err = retrySoftErr(ctx.Context(), func() (err error) {
|
|
|
|
pc1o, err = m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.pieceInfos())
|
|
|
|
return err
|
|
|
|
})
|
2020-03-03 22:19:22 +00:00
|
|
|
if err != nil {
|
2020-06-04 15:29:31 +00:00
|
|
|
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)})
|
2020-03-03 22:19:22 +00:00
|
|
|
}
|
|
|
|
|
2020-04-03 16:54:01 +00:00
|
|
|
return ctx.Send(SectorPreCommit1{
|
|
|
|
PreCommit1Out: pc1o,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) error {
|
2023-07-19 16:51:59 +00:00
|
|
|
var cids storiface.SectorCids
|
|
|
|
|
|
|
|
err := retrySoftErr(ctx.Context(), func() (err error) {
|
|
|
|
cids, err = m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.PreCommit1Out)
|
|
|
|
return err
|
|
|
|
})
|
2020-01-15 20:49:11 +00:00
|
|
|
if err != nil {
|
2020-06-04 15:29:31 +00:00
|
|
|
return ctx.Send(SectorSealPreCommit2Failed{xerrors.Errorf("seal pre commit(2) failed: %w", err)})
|
2020-01-15 20:49:11 +00:00
|
|
|
}
|
|
|
|
|
2020-09-30 15:53:15 +00:00
|
|
|
if cids.Unsealed == cid.Undef {
|
|
|
|
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(2) returned undefined CommD")})
|
|
|
|
}
|
|
|
|
|
2020-04-03 16:54:01 +00:00
|
|
|
return ctx.Send(SectorPreCommit2{
|
2020-03-22 20:44:27 +00:00
|
|
|
Unsealed: cids.Unsealed,
|
|
|
|
Sealed: cids.Sealed,
|
2020-01-15 20:49:11 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-09-10 14:34:47 +00:00
|
|
|
func (m *Sealing) preCommitInfo(ctx statemachine.Context, sector SectorInfo) (*miner.SectorPreCommitInfo, big.Int, types.TipSetKey, error) {
|
2022-06-16 11:15:49 +00:00
|
|
|
ts, err := m.Api.ChainHead(ctx.Context())
|
2020-04-09 17:34:07 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err)
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, nil
|
2020-04-09 17:34:07 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
if err := checkPrecommit(ctx.Context(), m.Address(), sector, ts.Key(), ts.Height(), m.Api); err != nil {
|
2020-06-02 21:45:28 +00:00
|
|
|
switch err := err.(type) {
|
2020-01-23 16:02:55 +00:00
|
|
|
case *ErrApi:
|
2020-01-23 15:38:01 +00:00
|
|
|
log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err)
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, nil
|
2020-04-03 16:54:01 +00:00
|
|
|
case *ErrBadCommD: // TODO: Should this just back to packing? (not really needed since handlePreCommit1 will do that too)
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)})
|
2020-01-23 16:02:55 +00:00
|
|
|
case *ErrExpiredTicket:
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired: %w", err)})
|
2020-06-02 21:45:28 +00:00
|
|
|
case *ErrBadTicket:
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)})
|
2020-08-27 11:51:13 +00:00
|
|
|
case *ErrInvalidDeals:
|
2020-08-27 19:04:43 +00:00
|
|
|
log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err)
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorInvalidDealIDs{Return: RetPreCommitting})
|
2020-08-27 11:51:13 +00:00
|
|
|
case *ErrExpiredDeals:
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)})
|
2020-06-02 21:45:28 +00:00
|
|
|
case *ErrPrecommitOnChain:
|
2022-06-16 11:15:49 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorPreCommitLanded{TipSet: ts.Key()}) // we re-did precommit
|
2020-08-18 16:02:13 +00:00
|
|
|
case *ErrSectorNumberAllocated:
|
|
|
|
log.Errorf("handlePreCommitFailed: sector number already allocated, not proceeding: %+v", err)
|
|
|
|
// TODO: check if the sector is committed (not sure how we'd end up here)
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, nil
|
2020-01-23 15:38:01 +00:00
|
|
|
default:
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, xerrors.Errorf("checkPrecommit sanity check error: %w", err)
|
2020-01-23 15:38:01 +00:00
|
|
|
}
|
2020-01-22 19:47:29 +00:00
|
|
|
}
|
|
|
|
|
2020-04-08 14:52:20 +00:00
|
|
|
expiration, err := m.pcp.Expiration(ctx.Context(), sector.Pieces...)
|
2020-04-07 22:26:07 +00:00
|
|
|
if err != nil {
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("handlePreCommitting: failed to compute pre-commit expiry: %w", err)})
|
2020-04-07 22:26:07 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
nv, err := m.Api.StateNetworkVersion(ctx.Context(), ts.Key())
|
2020-09-17 02:34:13 +00:00
|
|
|
if err != nil {
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get network version: %w", err)})
|
2020-09-17 02:34:13 +00:00
|
|
|
}
|
|
|
|
|
2022-09-06 15:49:29 +00:00
|
|
|
av, err := actorstypes.VersionForNetwork(nv)
|
2021-08-10 17:07:30 +00:00
|
|
|
if err != nil {
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get actors version: %w", err)})
|
2021-08-10 17:07:30 +00:00
|
|
|
}
|
|
|
|
msd, err := policy.GetMaxProveCommitDuration(av, sector.SectorType)
|
|
|
|
if err != nil {
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get max prove commit duration: %w", err)})
|
2021-08-10 17:07:30 +00:00
|
|
|
}
|
2020-09-17 02:34:13 +00:00
|
|
|
|
2021-07-29 07:01:52 +00:00
|
|
|
if minExpiration := sector.TicketEpoch + policy.MaxPreCommitRandomnessLookback + msd + miner.MinSectorExpiration; expiration < minExpiration {
|
2021-07-14 03:52:18 +00:00
|
|
|
expiration = minExpiration
|
|
|
|
}
|
2021-04-23 11:06:54 +00:00
|
|
|
|
|
|
|
// Assume: both precommit msg & commit msg land on chain as early as possible
|
2023-09-23 17:40:53 +00:00
|
|
|
maxExtension, err := policy.GetMaxSectorExpirationExtension(nv)
|
|
|
|
if err != nil {
|
|
|
|
return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get max extension: %w", err)})
|
|
|
|
}
|
|
|
|
|
|
|
|
maxExpiration := ts.Height() + policy.GetPreCommitChallengeDelay() + maxExtension
|
2021-04-23 11:06:54 +00:00
|
|
|
if expiration > maxExpiration {
|
|
|
|
expiration = maxExpiration
|
2020-08-12 04:14:37 +00:00
|
|
|
}
|
|
|
|
|
2020-02-14 00:24:24 +00:00
|
|
|
params := &miner.SectorPreCommitInfo{
|
2020-06-15 13:13:35 +00:00
|
|
|
Expiration: expiration,
|
|
|
|
SectorNumber: sector.SectorNumber,
|
|
|
|
SealProof: sector.SectorType,
|
2020-02-12 00:58:55 +00:00
|
|
|
|
2020-02-27 00:42:39 +00:00
|
|
|
SealedCID: *sector.CommR,
|
2020-04-06 18:07:26 +00:00
|
|
|
SealRandEpoch: sector.TicketEpoch,
|
2023-08-08 13:35:19 +00:00
|
|
|
}
|
2023-08-08 13:12:50 +00:00
|
|
|
|
2024-01-25 14:15:55 +00:00
|
|
|
if sector.hasData() {
|
|
|
|
// only CC sectors don't have UnsealedCID
|
2023-08-08 13:35:19 +00:00
|
|
|
params.UnsealedCid = sector.CommD
|
2024-01-25 14:15:55 +00:00
|
|
|
|
|
|
|
// true when the sector has non-builtin-marked data
|
|
|
|
sectorIsDDO := false
|
|
|
|
|
|
|
|
for _, piece := range sector.Pieces {
|
|
|
|
err := piece.handleDealInfo(handleDealInfoParams{
|
|
|
|
FillerHandler: func(info UniversalPieceInfo) error {
|
|
|
|
return nil // ignore
|
|
|
|
},
|
|
|
|
BuiltinMarketHandler: func(info UniversalPieceInfo) error {
|
|
|
|
if sectorIsDDO {
|
|
|
|
return nil // will be passed later in the Commit message
|
|
|
|
}
|
|
|
|
params.DealIDs = append(params.DealIDs, info.Impl().DealID)
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
DDOHandler: func(info UniversalPieceInfo) error {
|
|
|
|
if nv < MinDDONetworkVersion {
|
|
|
|
return xerrors.Errorf("DDO sectors are not supported on network version %d", nv)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infow("DDO piece in sector", "sector", sector.SectorNumber, "piece", info.String())
|
|
|
|
|
|
|
|
sectorIsDDO = true
|
|
|
|
|
|
|
|
// DDO sectors don't carry DealIDs, we will pass those
|
|
|
|
// deals in the Commit message later
|
|
|
|
params.DealIDs = nil
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, big.Zero(), types.EmptyTSK, xerrors.Errorf("handleDealInfo: %w", err)
|
|
|
|
}
|
|
|
|
}
|
2020-02-14 21:38:30 +00:00
|
|
|
}
|
2020-01-15 20:49:11 +00:00
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
collateral, err := m.Api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, ts.Key())
|
2021-05-18 15:21:10 +00:00
|
|
|
if err != nil {
|
2022-06-16 09:12:33 +00:00
|
|
|
return nil, big.Zero(), types.EmptyTSK, xerrors.Errorf("getting initial pledge collateral: %w", err)
|
2021-05-18 15:21:10 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
return params, collateral, ts.Key(), nil
|
2021-05-18 15:21:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error {
|
2023-08-08 12:43:00 +00:00
|
|
|
// note: this is a legacy state handler, normally new sectors won't enter this state
|
|
|
|
// but we keep this handler in order to not break existing sector state machines.
|
|
|
|
// todo: drop after nv21
|
|
|
|
return ctx.Send(SectorPreCommitBatch{})
|
2020-01-15 20:49:11 +00:00
|
|
|
}
|
|
|
|
|
2021-05-18 15:21:10 +00:00
|
|
|
func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error {
|
|
|
|
if sector.CommD == nil || sector.CommR == nil {
|
2021-06-01 12:35:30 +00:00
|
|
|
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("sector had nil commR or commD")})
|
2021-05-18 15:21:10 +00:00
|
|
|
}
|
|
|
|
|
2022-09-10 14:34:47 +00:00
|
|
|
params, deposit, _, err := m.preCommitInfo(ctx, sector)
|
2021-07-15 11:42:07 +00:00
|
|
|
if err != nil {
|
2022-09-10 14:34:47 +00:00
|
|
|
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitInfo: %w", err)})
|
2021-07-15 11:42:07 +00:00
|
|
|
}
|
|
|
|
if params == nil {
|
2022-09-10 14:34:47 +00:00
|
|
|
return nil // event was sent in preCommitInfo
|
2021-05-18 15:21:10 +00:00
|
|
|
}
|
|
|
|
|
2021-06-01 12:35:30 +00:00
|
|
|
res, err := m.precommiter.AddPreCommit(ctx.Context(), sector, deposit, params)
|
2021-05-18 15:21:10 +00:00
|
|
|
if err != nil {
|
2021-06-01 12:35:30 +00:00
|
|
|
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("queuing precommit batch failed: %w", err)})
|
2021-05-18 15:21:10 +00:00
|
|
|
}
|
|
|
|
|
2021-06-01 12:35:30 +00:00
|
|
|
if res.Error != "" {
|
|
|
|
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("precommit batch error: %s", res.Error)})
|
|
|
|
}
|
|
|
|
|
|
|
|
if res.Msg == nil {
|
|
|
|
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("batch message was nil")})
|
2021-05-18 15:21:10 +00:00
|
|
|
}
|
|
|
|
|
2021-06-01 12:35:30 +00:00
|
|
|
return ctx.Send(SectorPreCommitBatchSent{*res.Msg})
|
2021-05-18 15:21:10 +00:00
|
|
|
}
|
|
|
|
|
2020-05-18 22:49:21 +00:00
|
|
|
func (m *Sealing) handlePreCommitWait(ctx statemachine.Context, sector SectorInfo) error {
|
2020-06-17 18:35:47 +00:00
|
|
|
if sector.PreCommitMessage == nil {
|
|
|
|
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("precommit message was nil")})
|
|
|
|
}
|
|
|
|
|
2020-08-18 22:53:56 +00:00
|
|
|
// would be ideal to just use the events.Called handler, but it wouldn't be able to handle individual message timeouts
|
2020-04-06 22:31:33 +00:00
|
|
|
log.Info("Sector precommitted: ", sector.SectorNumber)
|
2022-06-16 10:47:19 +00:00
|
|
|
mw, err := m.Api.StateWaitMsg(ctx.Context(), *sector.PreCommitMessage, build.MessageConfidence, api.LookbackNoLimit, true)
|
2020-01-15 20:49:11 +00:00
|
|
|
if err != nil {
|
2020-04-03 16:54:01 +00:00
|
|
|
return ctx.Send(SectorChainPreCommitFailed{err})
|
2020-01-15 20:49:11 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 12:02:00 +00:00
|
|
|
switch mw.Receipt.ExitCode {
|
|
|
|
case exitcode.Ok:
|
|
|
|
// this is what we expect
|
2020-11-18 00:08:28 +00:00
|
|
|
case exitcode.SysErrInsufficientFunds:
|
|
|
|
fallthrough
|
2020-08-27 12:02:00 +00:00
|
|
|
case exitcode.SysErrOutOfGas:
|
2020-11-18 00:08:28 +00:00
|
|
|
// gas estimator guessed a wrong number / out of funds:
|
2020-08-27 12:02:00 +00:00
|
|
|
return ctx.Send(SectorRetryPreCommit{})
|
|
|
|
default:
|
2020-01-15 20:49:11 +00:00
|
|
|
log.Error("sector precommit failed: ", mw.Receipt.ExitCode)
|
|
|
|
err := xerrors.Errorf("sector precommit failed: %d", mw.Receipt.ExitCode)
|
2020-04-03 16:54:01 +00:00
|
|
|
return ctx.Send(SectorChainPreCommitFailed{err})
|
2020-01-15 20:49:11 +00:00
|
|
|
}
|
2020-08-27 12:02:00 +00:00
|
|
|
|
2020-04-06 22:31:33 +00:00
|
|
|
log.Info("precommit message landed on chain: ", sector.SectorNumber)
|
2020-01-15 20:49:11 +00:00
|
|
|
|
2022-06-16 10:47:19 +00:00
|
|
|
return ctx.Send(SectorPreCommitLanded{TipSet: mw.TipSet})
|
2020-05-18 22:49:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) handleWaitSeed(ctx statemachine.Context, sector SectorInfo) error {
|
2022-06-16 11:15:49 +00:00
|
|
|
ts, err := m.Api.ChainHead(ctx.Context())
|
2020-08-05 01:30:58 +00:00
|
|
|
if err != nil {
|
2020-09-17 02:34:13 +00:00
|
|
|
log.Errorf("handleWaitSeed: api error, not proceeding: %+v", err)
|
2020-08-05 01:30:58 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
pci, err := m.Api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, ts.Key())
|
2020-04-04 02:55:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting precommit info: %w", err)
|
|
|
|
}
|
2020-06-22 08:31:28 +00:00
|
|
|
if pci == nil {
|
|
|
|
return ctx.Send(SectorChainPreCommitFailed{error: xerrors.Errorf("precommit info not found on chain")})
|
|
|
|
}
|
2020-04-04 02:55:19 +00:00
|
|
|
|
2020-09-23 19:24:51 +00:00
|
|
|
randHeight := pci.PreCommitEpoch + policy.GetPreCommitChallengeDelay()
|
2020-01-15 20:49:11 +00:00
|
|
|
|
2022-06-16 14:05:56 +00:00
|
|
|
err = m.events.ChainAt(context.Background(), func(ectx context.Context, _ *types.TipSet, curH abi.ChainEpoch) error {
|
2020-08-05 01:30:58 +00:00
|
|
|
// in case of null blocks the randomness can land after the tipset we
|
|
|
|
// get from the events API
|
2022-06-16 11:15:49 +00:00
|
|
|
ts, err := m.Api.ChainHead(ctx.Context())
|
2020-08-05 01:30:58 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-20 18:21:11 +00:00
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
if err := m.maddr.MarshalCBOR(buf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-06-16 11:15:49 +00:00
|
|
|
rand, err := m.Api.StateGetRandomnessFromBeacon(ectx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, randHeight, buf.Bytes(), ts.Key())
|
2020-01-15 20:49:11 +00:00
|
|
|
if err != nil {
|
2022-06-16 11:15:49 +00:00
|
|
|
err = xerrors.Errorf("failed to get randomness for computing seal proof (ch %d; rh %d; tsk %x): %w", curH, randHeight, ts.Key(), err)
|
2020-01-15 20:49:11 +00:00
|
|
|
|
2020-06-17 15:19:36 +00:00
|
|
|
_ = ctx.Send(SectorChainPreCommitFailed{error: err})
|
2020-01-15 20:49:11 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-04-06 20:23:37 +00:00
|
|
|
_ = ctx.Send(SectorSeedReady{SeedValue: abi.InteractiveSealRandomness(rand), SeedEpoch: randHeight})
|
2020-01-15 20:49:11 +00:00
|
|
|
|
|
|
|
return nil
|
2022-06-16 14:05:56 +00:00
|
|
|
}, func(ctx context.Context, ts *types.TipSet) error {
|
2020-01-15 20:49:11 +00:00
|
|
|
log.Warn("revert in interactive commit sector step")
|
|
|
|
// TODO: need to cancel running process and restart...
|
|
|
|
return nil
|
2020-04-06 20:23:37 +00:00
|
|
|
}, InteractivePoRepConfidence, randHeight)
|
2020-01-15 20:49:11 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("waitForPreCommitMessage ChainAt errored: ", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) error {
|
2020-08-18 16:02:13 +00:00
|
|
|
if sector.CommitMessage != nil {
|
|
|
|
log.Warnf("sector %d entered committing state with a commit message cid", sector.SectorNumber)
|
|
|
|
|
2022-06-16 10:47:19 +00:00
|
|
|
ml, err := m.Api.StateSearchMsg(ctx.Context(), types.EmptyTSK, *sector.CommitMessage, api.LookbackNoLimit, true)
|
2020-08-18 16:02:13 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Warnf("sector %d searching existing commit message %s: %+v", sector.SectorNumber, *sector.CommitMessage, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ml != nil {
|
|
|
|
// some weird retry paths can lead here
|
|
|
|
return ctx.Send(SectorRetryCommitWait{})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-11 09:41:28 +00:00
|
|
|
cfg, err := m.getConfig()
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting config: %w", err)
|
|
|
|
}
|
|
|
|
|
2020-08-21 16:31:28 +00:00
|
|
|
if sector.CommD == nil || sector.CommR == nil {
|
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")})
|
|
|
|
}
|
|
|
|
|
2022-09-09 08:32:27 +00:00
|
|
|
var c2in storiface.Commit1Out
|
|
|
|
if sector.RemoteCommit1Endpoint == "" {
|
2022-09-09 10:54:48 +00:00
|
|
|
// Local Commit1
|
2022-09-09 08:32:27 +00:00
|
|
|
cids := storiface.SectorCids{
|
|
|
|
Unsealed: *sector.CommD,
|
|
|
|
Sealed: *sector.CommR,
|
|
|
|
}
|
|
|
|
c2in, err = m.sealer.SealCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.SeedValue, sector.pieceInfos(), cids)
|
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(1): %w", err)})
|
|
|
|
}
|
|
|
|
} else {
|
2022-09-09 10:54:48 +00:00
|
|
|
// Remote Commit1
|
|
|
|
|
2022-09-09 08:32:27 +00:00
|
|
|
reqData := api.RemoteCommit1Params{
|
|
|
|
Ticket: sector.TicketValue,
|
|
|
|
Seed: sector.SeedValue,
|
|
|
|
Unsealed: *sector.CommD,
|
|
|
|
Sealed: *sector.CommR,
|
|
|
|
ProofType: sector.SectorType,
|
|
|
|
}
|
|
|
|
reqBody, err := json.Marshal(&reqData)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("marshaling remote commit1 request: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
req, err := http.NewRequest("POST", sector.RemoteCommit1Endpoint, bytes.NewReader(reqBody))
|
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorRemoteCommit1Failed{xerrors.Errorf("creating new remote commit1 request: %w", err)})
|
|
|
|
}
|
|
|
|
req.Header.Set("Content-Type", "application/json")
|
|
|
|
req = req.WithContext(ctx.Context())
|
|
|
|
resp, err := http.DefaultClient.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorRemoteCommit1Failed{xerrors.Errorf("requesting remote commit1: %w", err)})
|
|
|
|
}
|
|
|
|
|
2022-09-09 12:57:56 +00:00
|
|
|
defer resp.Body.Close() //nolint:errcheck
|
2022-09-09 08:32:27 +00:00
|
|
|
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
|
|
return ctx.Send(SectorRemoteCommit1Failed{xerrors.Errorf("remote commit1 received non-200 http response %s", resp.Status)})
|
|
|
|
}
|
|
|
|
|
|
|
|
c2in, err = io.ReadAll(resp.Body) // todo some len constraint
|
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorRemoteCommit1Failed{xerrors.Errorf("reading commit1 response: %w", err)})
|
|
|
|
}
|
2020-03-03 22:19:22 +00:00
|
|
|
}
|
|
|
|
|
2022-09-09 10:54:48 +00:00
|
|
|
var porepProof storiface.Proof
|
|
|
|
|
|
|
|
if sector.RemoteCommit2Endpoint == "" {
|
|
|
|
// Local Commit2
|
|
|
|
|
|
|
|
porepProof, err = m.sealer.SealCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), c2in)
|
|
|
|
if err != nil {
|
2023-09-19 13:09:46 +00:00
|
|
|
log.Errorw("Commit2 error", "error", err)
|
2022-09-09 10:54:48 +00:00
|
|
|
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)})
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Remote Commit2
|
|
|
|
|
|
|
|
reqData := api.RemoteCommit2Params{
|
|
|
|
ProofType: sector.SectorType,
|
|
|
|
Sector: m.minerSectorID(sector.SectorNumber),
|
|
|
|
|
|
|
|
Commit1Out: c2in,
|
|
|
|
}
|
|
|
|
reqBody, err := json.Marshal(&reqData)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("marshaling remote commit2 request: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
req, err := http.NewRequest("POST", sector.RemoteCommit2Endpoint, bytes.NewReader(reqBody))
|
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorRemoteCommit2Failed{xerrors.Errorf("creating new remote commit2 request: %w", err)})
|
|
|
|
}
|
|
|
|
req.Header.Set("Content-Type", "application/json")
|
|
|
|
req = req.WithContext(ctx.Context())
|
|
|
|
resp, err := http.DefaultClient.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorRemoteCommit2Failed{xerrors.Errorf("requesting remote commit2: %w", err)})
|
|
|
|
}
|
|
|
|
|
2022-09-09 12:57:56 +00:00
|
|
|
defer resp.Body.Close() //nolint:errcheck
|
2022-09-09 10:54:48 +00:00
|
|
|
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
|
|
return ctx.Send(SectorRemoteCommit2Failed{xerrors.Errorf("remote commit2 received non-200 http response %s", resp.Status)})
|
|
|
|
}
|
|
|
|
|
|
|
|
porepProof, err = io.ReadAll(resp.Body) // todo some len constraint
|
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorRemoteCommit2Failed{xerrors.Errorf("reading commit2 response: %w", err)})
|
|
|
|
}
|
2020-01-15 20:49:11 +00:00
|
|
|
}
|
|
|
|
|
2021-06-11 09:41:28 +00:00
|
|
|
{
|
2022-06-16 11:15:49 +00:00
|
|
|
ts, err := m.Api.ChainHead(ctx.Context())
|
2021-06-11 09:41:28 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-09-09 10:54:48 +00:00
|
|
|
if err := m.checkCommit(ctx.Context(), sector, porepProof, ts.Key()); err != nil {
|
2021-10-22 02:09:44 +00:00
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)})
|
2021-06-11 09:41:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if cfg.FinalizeEarly {
|
|
|
|
return ctx.Send(SectorProofReady{
|
2022-09-09 10:54:48 +00:00
|
|
|
Proof: porepProof,
|
2021-06-11 09:41:28 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-08-27 10:57:08 +00:00
|
|
|
return ctx.Send(SectorCommitted{
|
2022-09-09 10:54:48 +00:00
|
|
|
Proof: porepProof,
|
2020-08-27 10:57:08 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo) error {
|
2024-01-25 14:15:55 +00:00
|
|
|
// TODO: Deprecate this path, always go through batcher, just respect the AggregateCommits config in there
|
|
|
|
|
2021-03-10 15:16:44 +00:00
|
|
|
cfg, err := m.getConfig()
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting config: %w", err)
|
|
|
|
}
|
2021-05-17 20:51:29 +00:00
|
|
|
|
2021-03-10 15:16:44 +00:00
|
|
|
if cfg.AggregateCommits {
|
2022-06-16 09:12:33 +00:00
|
|
|
nv, err := m.Api.StateNetworkVersion(ctx.Context(), types.EmptyTSK)
|
2021-05-17 20:51:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting network version: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if nv >= network.Version13 {
|
|
|
|
return ctx.Send(SectorSubmitCommitAggregate{})
|
|
|
|
}
|
2021-03-10 15:16:44 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
ts, err := m.Api.ChainHead(ctx.Context())
|
2020-04-09 17:34:07 +00:00
|
|
|
if err != nil {
|
2021-06-11 09:41:28 +00:00
|
|
|
log.Errorf("handleSubmitCommit: api error, not proceeding: %+v", err)
|
2020-04-09 17:34:07 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
if err := m.checkCommit(ctx.Context(), sector, sector.Proof, ts.Key()); err != nil {
|
2020-04-04 01:50:05 +00:00
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)})
|
|
|
|
}
|
|
|
|
|
2020-04-06 18:07:26 +00:00
|
|
|
enc := new(bytes.Buffer)
|
2020-09-22 04:35:15 +00:00
|
|
|
params := &miner.ProveCommitSectorParams{
|
|
|
|
SectorNumber: sector.SectorNumber,
|
|
|
|
Proof: sector.Proof,
|
|
|
|
}
|
2020-09-17 02:34:13 +00:00
|
|
|
|
2020-09-22 04:35:15 +00:00
|
|
|
if err := params.MarshalCBOR(enc); err != nil {
|
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("could not serialize commit sector parameters: %w", err)})
|
2020-01-15 20:49:11 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
mi, err := m.Api.StateMinerInfo(ctx.Context(), m.maddr, ts.Key())
|
2020-04-09 17:34:07 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("handleCommitting: api error, not proceeding: %+v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
pci, err := m.Api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, ts.Key())
|
2020-06-26 15:58:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting precommit info: %w", err)
|
|
|
|
}
|
|
|
|
if pci == nil {
|
|
|
|
return ctx.Send(SectorCommitFailed{error: xerrors.Errorf("precommit info not found on chain")})
|
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
collateral, err := m.Api.StateMinerInitialPledgeCollateral(ctx.Context(), m.maddr, pci.Info, ts.Key())
|
2020-08-17 07:11:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting initial pledge collateral: %w", err)
|
|
|
|
}
|
|
|
|
|
2020-06-26 15:58:29 +00:00
|
|
|
collateral = big.Sub(collateral, pci.PreCommitDeposit)
|
|
|
|
if collateral.LessThan(big.Zero()) {
|
|
|
|
collateral = big.Zero()
|
|
|
|
}
|
|
|
|
|
2021-08-18 10:43:44 +00:00
|
|
|
collateral, err = collateralSendAmount(ctx.Context(), m.Api, m.maddr, cfg, collateral)
|
2021-07-12 16:46:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2021-06-29 16:17:08 +00:00
|
|
|
}
|
|
|
|
|
2021-06-08 13:43:43 +00:00
|
|
|
goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee))
|
2020-12-02 20:47:45 +00:00
|
|
|
|
2022-08-09 10:57:20 +00:00
|
|
|
from, _, err := m.addrSel.AddressFor(ctx.Context(), m.Api, mi, api.CommitAddr, goodFunds, collateral)
|
2020-12-02 20:47:45 +00:00
|
|
|
if err != nil {
|
2020-12-02 21:01:09 +00:00
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("no good address to send commit message from: %w", err)})
|
2020-12-02 20:47:45 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 10:57:08 +00:00
|
|
|
// TODO: check seed / ticket / deals are up to date
|
2022-06-16 13:50:41 +00:00
|
|
|
mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.ProveCommitSector, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes())
|
2020-01-15 20:49:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
|
|
|
|
}
|
|
|
|
|
2020-08-27 10:57:08 +00:00
|
|
|
return ctx.Send(SectorCommitSubmitted{
|
2020-04-06 18:07:26 +00:00
|
|
|
Message: mcid,
|
2020-01-15 20:49:11 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2024-01-25 14:15:55 +00:00
|
|
|
// processPieces returns either:
|
|
|
|
// - a list of piece activation manifests
|
|
|
|
// - a list of deal IDs, if all non-filler pieces are deal-id pieces
|
|
|
|
func (m *Sealing) processPieces(ctx context.Context, sector SectorInfo) ([]miner.PieceActivationManifest, []abi.DealID, error) {
|
|
|
|
pams := make([]miner.PieceActivationManifest, 0, len(sector.Pieces))
|
|
|
|
dealIDs := make([]abi.DealID, 0, len(sector.Pieces))
|
|
|
|
var hasDDO bool
|
|
|
|
|
|
|
|
for _, piece := range sector.Pieces {
|
|
|
|
piece := piece
|
|
|
|
|
|
|
|
// first figure out if this is a ddo sector
|
|
|
|
err := piece.handleDealInfo(handleDealInfoParams{
|
|
|
|
FillerHandler: func(info UniversalPieceInfo) error {
|
|
|
|
// Fillers are implicit (todo review: Are they??)
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
BuiltinMarketHandler: func(info UniversalPieceInfo) error {
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
DDOHandler: func(info UniversalPieceInfo) error {
|
|
|
|
hasDDO = true
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, xerrors.Errorf("handleDealInfo: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, piece := range sector.Pieces {
|
|
|
|
piece := piece
|
|
|
|
|
|
|
|
err := piece.handleDealInfo(handleDealInfoParams{
|
|
|
|
FillerHandler: func(info UniversalPieceInfo) error {
|
|
|
|
// Fillers are implicit (todo review: Are they??)
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
BuiltinMarketHandler: func(info UniversalPieceInfo) error {
|
|
|
|
if hasDDO {
|
|
|
|
alloc, err := m.Api.StateGetAllocationIdForPendingDeal(ctx, info.Impl().DealID, types.EmptyTSK)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting allocation for deal %d: %w", info.Impl().DealID, err)
|
|
|
|
}
|
|
|
|
clid, err := m.Api.StateLookupID(ctx, info.Impl().DealProposal.Client, types.EmptyTSK)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting client address for deal %d: %w", info.Impl().DealID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
clientId, err := address.IDFromAddress(clid)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting client address for deal %d: %w", info.Impl().DealID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var vac *miner2.VerifiedAllocationKey
|
|
|
|
if alloc != verifreg.NoAllocationID {
|
|
|
|
vac = &miner2.VerifiedAllocationKey{
|
|
|
|
Client: abi.ActorID(clientId),
|
|
|
|
ID: verifreg13.AllocationId(alloc),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
payload, err := cborutil.Dump(info.Impl().DealID)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("serializing deal id: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pams = append(pams, miner.PieceActivationManifest{
|
|
|
|
CID: piece.Piece().PieceCID,
|
|
|
|
Size: piece.Piece().Size,
|
|
|
|
VerifiedAllocationKey: vac,
|
|
|
|
Notify: []miner2.DataActivationNotification{
|
|
|
|
{
|
|
|
|
Address: market.Address,
|
|
|
|
Payload: payload,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
dealIDs = append(dealIDs, info.Impl().DealID)
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
DDOHandler: func(info UniversalPieceInfo) error {
|
|
|
|
pams = append(pams, *piece.Impl().PieceActivationManifest)
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, xerrors.Errorf("handleDealInfo: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pams, dealIDs, nil
|
|
|
|
}
|
|
|
|
|
2021-03-10 15:16:44 +00:00
|
|
|
func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector SectorInfo) error {
|
|
|
|
if sector.CommD == nil || sector.CommR == nil {
|
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")})
|
|
|
|
}
|
|
|
|
|
2024-01-25 14:15:55 +00:00
|
|
|
pams, dealIDs, err := m.processPieces(ctx.Context(), sector)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-06-01 09:56:19 +00:00
|
|
|
res, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{
|
2021-06-09 15:18:09 +00:00
|
|
|
Info: proof.AggregateSealVerifyInfo{
|
2021-03-10 15:16:44 +00:00
|
|
|
Number: sector.SectorNumber,
|
|
|
|
Randomness: sector.TicketValue,
|
|
|
|
InteractiveRandomness: sector.SeedValue,
|
|
|
|
SealedCID: *sector.CommR,
|
|
|
|
UnsealedCID: *sector.CommD,
|
|
|
|
},
|
2024-01-25 14:15:55 +00:00
|
|
|
Proof: sector.Proof,
|
2021-06-09 15:18:09 +00:00
|
|
|
Spt: sector.SectorType,
|
2024-01-25 14:15:55 +00:00
|
|
|
|
|
|
|
ActivationManifest: miner2.SectorActivationManifest{
|
|
|
|
SectorNumber: sector.SectorNumber,
|
|
|
|
Pieces: pams,
|
|
|
|
},
|
|
|
|
DealIDPrecommit: len(dealIDs) > 0,
|
2021-03-10 15:16:44 +00:00
|
|
|
})
|
|
|
|
|
2021-09-01 09:31:49 +00:00
|
|
|
if err != nil || res.Error != "" {
|
2022-06-16 11:15:49 +00:00
|
|
|
ts, err := m.Api.ChainHead(ctx.Context())
|
2021-06-25 09:25:17 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("handleSubmitCommit: api error, not proceeding: %+v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
if err := m.checkCommit(ctx.Context(), sector, sector.Proof, ts.Key()); err != nil {
|
2021-06-25 09:25:17 +00:00
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)})
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Send(SectorRetrySubmitCommit{})
|
2021-06-01 09:56:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if e, found := res.FailedSectors[sector.SectorNumber]; found {
|
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector failed in aggregate processing: %s", e)})
|
|
|
|
}
|
|
|
|
|
|
|
|
if res.Msg == nil {
|
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("aggregate message was nil")})
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Send(SectorCommitAggregateSent{*res.Msg})
|
2021-03-10 15:16:44 +00:00
|
|
|
}
|
|
|
|
|
2020-01-15 20:49:11 +00:00
|
|
|
func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo) error {
|
|
|
|
if sector.CommitMessage == nil {
|
2020-04-06 22:31:33 +00:00
|
|
|
log.Errorf("sector %d entered commit wait state without a message cid", sector.SectorNumber)
|
2020-01-15 20:49:11 +00:00
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("entered commit wait with no commit cid")})
|
|
|
|
}
|
|
|
|
|
2022-06-16 10:47:19 +00:00
|
|
|
mw, err := m.Api.StateWaitMsg(ctx.Context(), *sector.CommitMessage, build.MessageConfidence, api.LookbackNoLimit, true)
|
2020-01-15 20:49:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("failed to wait for porep inclusion: %w", err)})
|
|
|
|
}
|
|
|
|
|
2020-08-27 12:02:00 +00:00
|
|
|
switch mw.Receipt.ExitCode {
|
|
|
|
case exitcode.Ok:
|
|
|
|
// this is what we expect
|
2020-11-18 00:08:28 +00:00
|
|
|
case exitcode.SysErrInsufficientFunds:
|
|
|
|
fallthrough
|
2020-08-27 12:02:00 +00:00
|
|
|
case exitcode.SysErrOutOfGas:
|
2020-11-18 00:08:28 +00:00
|
|
|
// gas estimator guessed a wrong number / out of funds
|
2020-08-27 12:02:00 +00:00
|
|
|
return ctx.Send(SectorRetrySubmitCommit{})
|
|
|
|
default:
|
2020-04-06 18:07:26 +00:00
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("submitting sector proof failed (exit=%d, msg=%s) (t:%x; s:%x(%d); p:%x)", mw.Receipt.ExitCode, sector.CommitMessage, sector.TicketValue, sector.SeedValue, sector.SeedEpoch, sector.Proof)})
|
2020-01-15 20:49:11 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 10:47:19 +00:00
|
|
|
si, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, mw.TipSet)
|
2020-05-28 00:10:50 +00:00
|
|
|
if err != nil {
|
2020-08-27 11:51:13 +00:00
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("proof validation failed, calling StateSectorGetInfo: %w", err)})
|
|
|
|
}
|
|
|
|
if si == nil {
|
|
|
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("proof validation failed, sector not found in sector set after cron")})
|
2020-05-28 00:10:50 +00:00
|
|
|
}
|
|
|
|
|
2020-01-15 20:49:11 +00:00
|
|
|
return ctx.Send(SectorProving{})
|
|
|
|
}
|
|
|
|
|
2020-01-29 21:25:06 +00:00
|
|
|
func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorInfo) error {
|
2020-01-29 22:37:31 +00:00
|
|
|
// TODO: Maybe wait for some finality
|
|
|
|
|
2021-01-26 16:50:31 +00:00
|
|
|
cfg, err := m.getConfig()
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting sealing config: %w", err)
|
|
|
|
}
|
|
|
|
|
2022-11-16 16:07:42 +00:00
|
|
|
if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(sector.Pieces, false, cfg.AlwaysKeepUnsealedCopy)); err != nil {
|
|
|
|
return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("release unsealed: %w", err)})
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.sealer.FinalizeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil {
|
2020-03-03 22:19:22 +00:00
|
|
|
return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)})
|
2020-01-29 21:25:06 +00:00
|
|
|
}
|
|
|
|
|
2024-01-25 14:15:55 +00:00
|
|
|
if cfg.MakeCCSectorsAvailable && !sector.hasData() {
|
2022-03-16 18:29:47 +00:00
|
|
|
return ctx.Send(SectorFinalizedAvailable{})
|
|
|
|
}
|
2020-01-29 21:25:06 +00:00
|
|
|
return ctx.Send(SectorFinalized{})
|
|
|
|
}
|