2020-06-22 16:42:38 +00:00
|
|
|
package sealing
|
|
|
|
|
|
|
|
import (
|
2021-01-12 23:42:01 +00:00
|
|
|
"time"
|
|
|
|
|
2020-06-22 16:42:38 +00:00
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
2021-01-12 23:42:01 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/exitcode"
|
2022-06-15 10:06:22 +00:00
|
|
|
"github.com/filecoin-project/go-statemachine"
|
2023-11-14 00:06:11 +00:00
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2021-01-12 23:42:01 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
|
|
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
2022-06-16 09:12:33 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2020-06-22 16:42:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func (m *Sealing) handleFaulty(ctx statemachine.Context, sector SectorInfo) error {
|
2020-06-22 16:44:28 +00:00
|
|
|
// TODO: noop because this is now handled by the PoSt scheduler. We can reuse
|
|
|
|
// this state for tracking faulty sectors, or remove it when that won't be
|
|
|
|
// a breaking change
|
|
|
|
return nil
|
2020-06-22 16:42:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) handleFaultReported(ctx statemachine.Context, sector SectorInfo) error {
|
|
|
|
if sector.FaultReportMsg == nil {
|
|
|
|
return xerrors.Errorf("entered fault reported state without a FaultReportMsg cid")
|
|
|
|
}
|
|
|
|
|
2022-06-16 10:47:19 +00:00
|
|
|
mw, err := m.Api.StateWaitMsg(ctx.Context(), *sector.FaultReportMsg, build.MessageConfidence, api.LookbackNoLimit, true)
|
2020-06-22 16:42:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("failed to wait for fault declaration: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if mw.Receipt.ExitCode != 0 {
|
|
|
|
log.Errorf("UNHANDLED: declaring sector fault failed (exit=%d, msg=%s) (id: %d)", mw.Receipt.ExitCode, *sector.FaultReportMsg, sector.SectorNumber)
|
|
|
|
return xerrors.Errorf("UNHANDLED: submitting fault declaration failed (exit %d)", mw.Receipt.ExitCode)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Send(SectorFaultedFinal{})
|
|
|
|
}
|
|
|
|
|
2021-01-12 23:42:01 +00:00
|
|
|
func (m *Sealing) handleTerminating(ctx statemachine.Context, sector SectorInfo) error {
|
|
|
|
// First step of sector termination
|
|
|
|
// * See if sector is live
|
|
|
|
// * If not, goto removing
|
|
|
|
// * Add to termination queue
|
|
|
|
// * Wait for message to land on-chain
|
|
|
|
// * Check for correct termination
|
|
|
|
// * wait for expiration (+winning lookback?)
|
|
|
|
|
2022-06-16 09:12:33 +00:00
|
|
|
si, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, types.EmptyTSK)
|
2021-01-12 23:42:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting sector info: %w", err)})
|
|
|
|
}
|
|
|
|
|
|
|
|
if si == nil {
|
|
|
|
// either already terminated or not committed yet
|
2021-01-14 14:46:57 +00:00
|
|
|
|
2022-06-16 09:12:33 +00:00
|
|
|
pci, err := m.Api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, types.EmptyTSK)
|
2021-01-14 14:46:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("checking precommit presence: %w", err)})
|
|
|
|
}
|
|
|
|
if pci != nil {
|
|
|
|
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("sector was precommitted but not proven, remove instead of terminating")})
|
|
|
|
}
|
|
|
|
|
2021-01-13 23:11:41 +00:00
|
|
|
return ctx.Send(SectorRemove{})
|
2021-01-12 23:42:01 +00:00
|
|
|
}
|
|
|
|
|
2021-01-14 16:14:26 +00:00
|
|
|
termCid, terminated, err := m.terminator.AddTermination(ctx.Context(), m.minerSectorID(sector.SectorNumber))
|
2021-01-12 23:42:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("queueing termination: %w", err)})
|
|
|
|
}
|
|
|
|
|
2021-01-14 16:14:26 +00:00
|
|
|
if terminated {
|
|
|
|
return ctx.Send(SectorTerminating{Message: nil})
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Send(SectorTerminating{Message: &termCid})
|
2021-01-12 23:42:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) handleTerminateWait(ctx statemachine.Context, sector SectorInfo) error {
|
|
|
|
if sector.TerminateMessage == nil {
|
2023-04-19 13:43:50 +00:00
|
|
|
ts, err := m.Api.ChainHead(ctx.Context())
|
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting chain head: %w", err)})
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Send(SectorTerminated{TerminatedAt: ts.Height()})
|
2021-01-12 23:42:01 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 10:47:19 +00:00
|
|
|
mw, err := m.Api.StateWaitMsg(ctx.Context(), *sector.TerminateMessage, build.MessageConfidence, api.LookbackNoLimit, true)
|
2021-01-12 23:42:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("waiting for terminate message to land on chain: %w", err)})
|
|
|
|
}
|
|
|
|
|
|
|
|
if mw.Receipt.ExitCode != exitcode.Ok {
|
|
|
|
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("terminate message failed to execute: exit %d: %w", mw.Receipt.ExitCode, err)})
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Send(SectorTerminated{TerminatedAt: mw.Height})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) handleTerminateFinality(ctx statemachine.Context, sector SectorInfo) error {
|
|
|
|
for {
|
2022-06-16 11:15:49 +00:00
|
|
|
ts, err := m.Api.ChainHead(ctx.Context())
|
2021-01-12 23:42:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting chain head: %w", err)})
|
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
nv, err := m.Api.StateNetworkVersion(ctx.Context(), ts.Key())
|
2021-01-12 23:42:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting network version: %w", err)})
|
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
if ts.Height() >= sector.TerminatedAt+policy.GetWinningPoStSectorSetLookback(nv) {
|
2021-01-12 23:42:01 +00:00
|
|
|
return ctx.Send(SectorRemove{})
|
|
|
|
}
|
|
|
|
|
2022-06-16 11:15:49 +00:00
|
|
|
toWait := time.Duration(ts.Height()-sector.TerminatedAt+policy.GetWinningPoStSectorSetLookback(nv)) * time.Duration(build.BlockDelaySecs) * time.Second
|
2021-01-12 23:42:01 +00:00
|
|
|
select {
|
|
|
|
case <-time.After(toWait):
|
|
|
|
continue
|
|
|
|
case <-ctx.Context().Done():
|
|
|
|
return ctx.Context().Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-22 16:42:38 +00:00
|
|
|
func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) error {
|
2020-11-04 20:29:08 +00:00
|
|
|
if err := m.sealer.Remove(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil {
|
2020-06-22 16:42:38 +00:00
|
|
|
return ctx.Send(SectorRemoveFailed{err})
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Send(SectorRemoved{})
|
|
|
|
}
|
2021-05-19 11:17:59 +00:00
|
|
|
|
|
|
|
func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error {
|
|
|
|
// TODO: track sector health / expiration
|
|
|
|
|
2022-03-16 16:33:05 +00:00
|
|
|
m.inputLk.Lock()
|
|
|
|
// in case we revert into Proving without going into Available
|
|
|
|
delete(m.available, m.minerSectorID(sector.SectorNumber))
|
|
|
|
m.inputLk.Unlock()
|
|
|
|
|
2022-11-07 16:31:53 +00:00
|
|
|
// guard against manual state updates from snap-deals states into Proving
|
|
|
|
// note: normally snap deals should be aborted through the abort command, but
|
|
|
|
// apparently sometimes some SPs would use update-state to force the sector back
|
|
|
|
// into the Proving state, breaking the deal input pipeline in the process.
|
|
|
|
m.cleanupAssignedDeals(sector)
|
|
|
|
|
2021-05-19 11:17:59 +00:00
|
|
|
// TODO: Watch termination
|
|
|
|
// TODO: Auto-extend if set
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2022-03-16 16:33:05 +00:00
|
|
|
|
|
|
|
func (m *Sealing) handleAvailableSector(ctx statemachine.Context, sector SectorInfo) error {
|
|
|
|
m.inputLk.Lock()
|
|
|
|
m.available[m.minerSectorID(sector.SectorNumber)] = struct{}{}
|
|
|
|
m.inputLk.Unlock()
|
|
|
|
// TODO: Watch termination
|
|
|
|
// TODO: Auto-extend if set
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|