2021-01-18 13:26:03 +00:00
|
|
|
package sealing
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"sort"
|
2021-01-18 20:59:34 +00:00
|
|
|
"time"
|
2021-01-18 13:26:03 +00:00
|
|
|
|
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
|
|
|
"github.com/ipfs/go-cid"
|
|
|
|
|
2021-08-26 14:22:43 +00:00
|
|
|
"github.com/filecoin-project/go-commp-utils/zerocomm"
|
2021-01-18 13:26:03 +00:00
|
|
|
"github.com/filecoin-project/go-padreader"
|
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2022-03-16 16:33:05 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/big"
|
2021-01-18 13:26:03 +00:00
|
|
|
"github.com/filecoin-project/go-statemachine"
|
|
|
|
"github.com/filecoin-project/specs-storage/storage"
|
|
|
|
|
2021-05-19 11:05:07 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2022-01-09 10:23:02 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2022-03-16 16:33:05 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2021-01-18 13:26:03 +00:00
|
|
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
|
|
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
2021-03-12 17:42:17 +00:00
|
|
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
2021-01-18 13:26:03 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) error {
|
2021-01-21 18:59:18 +00:00
|
|
|
var used abi.UnpaddedPieceSize
|
|
|
|
for _, piece := range sector.Pieces {
|
|
|
|
used += piece.Piece.Size.Unpadded()
|
|
|
|
}
|
|
|
|
|
2021-01-18 13:26:03 +00:00
|
|
|
m.inputLk.Lock()
|
2021-01-18 20:59:34 +00:00
|
|
|
|
2021-05-30 16:30:38 +00:00
|
|
|
if m.creating != nil && *m.creating == sector.SectorNumber {
|
|
|
|
m.creating = nil
|
|
|
|
}
|
|
|
|
|
2021-04-14 18:26:07 +00:00
|
|
|
sid := m.minerSectorID(sector.SectorNumber)
|
|
|
|
|
|
|
|
if len(m.assignedPieces[sid]) > 0 {
|
|
|
|
m.inputLk.Unlock()
|
|
|
|
// got assigned more pieces in the AddPiece state
|
|
|
|
return ctx.Send(SectorAddPiece{})
|
|
|
|
}
|
|
|
|
|
2021-01-21 18:59:18 +00:00
|
|
|
started, err := m.maybeStartSealing(ctx, sector, used)
|
|
|
|
if err != nil || started {
|
2021-02-09 17:44:41 +00:00
|
|
|
delete(m.openSectors, m.minerSectorID(sector.SectorNumber))
|
|
|
|
|
2021-01-21 18:59:18 +00:00
|
|
|
m.inputLk.Unlock()
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-04-14 18:26:07 +00:00
|
|
|
if _, has := m.openSectors[sid]; !has {
|
|
|
|
m.openSectors[sid] = &openSector{
|
|
|
|
used: used,
|
|
|
|
maybeAccept: func(cid cid.Cid) error {
|
|
|
|
// todo check deal start deadline (configurable)
|
|
|
|
m.assignedPieces[sid] = append(m.assignedPieces[sid], cid)
|
2021-01-21 18:59:18 +00:00
|
|
|
|
2021-04-14 18:26:07 +00:00
|
|
|
return ctx.Send(SectorAddPiece{})
|
|
|
|
},
|
2021-12-08 17:11:19 +00:00
|
|
|
number: sector.SectorNumber,
|
|
|
|
ccUpdate: sector.CCUpdate,
|
2021-04-14 18:26:07 +00:00
|
|
|
}
|
2021-10-18 07:55:28 +00:00
|
|
|
} else {
|
|
|
|
// make sure we're only accounting for pieces which were correctly added
|
|
|
|
// (note that m.assignedPieces[sid] will always be empty here)
|
|
|
|
m.openSectors[sid].used = used
|
2021-01-21 18:59:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer m.inputLk.Unlock()
|
|
|
|
if err := m.updateInput(ctx.Context(), sector.SectorType); err != nil {
|
|
|
|
log.Errorf("%+v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, used abi.UnpaddedPieceSize) (bool, error) {
|
2021-01-18 20:59:34 +00:00
|
|
|
now := time.Now()
|
|
|
|
st := m.sectorTimers[m.minerSectorID(sector.SectorNumber)]
|
|
|
|
if st != nil {
|
|
|
|
if !st.Stop() { // timer expired, SectorStartPacking was/is being sent
|
|
|
|
// we send another SectorStartPacking in case one was sent in the handleAddPiece state
|
2021-01-20 21:44:18 +00:00
|
|
|
log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timeout")
|
2021-01-21 18:59:18 +00:00
|
|
|
return true, ctx.Send(SectorStartPacking{})
|
2021-01-18 20:59:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 17:18:12 +00:00
|
|
|
ssize, err := sector.SectorType.SectorSize()
|
|
|
|
if err != nil {
|
2021-01-21 18:59:18 +00:00
|
|
|
return false, xerrors.Errorf("getting sector size")
|
2021-01-20 17:18:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
maxDeals, err := getDealPerSectorLimit(ssize)
|
2021-01-20 14:20:44 +00:00
|
|
|
if err != nil {
|
2021-01-21 18:59:18 +00:00
|
|
|
return false, xerrors.Errorf("getting per-sector deal limit: %w", err)
|
2021-01-20 14:20:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(sector.dealIDs()) >= maxDeals {
|
2021-01-20 17:18:12 +00:00
|
|
|
// can't accept more deals
|
2021-01-20 21:44:18 +00:00
|
|
|
log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "maxdeals")
|
2021-01-21 18:59:18 +00:00
|
|
|
return true, ctx.Send(SectorStartPacking{})
|
2021-01-20 17:18:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if used.Padded() == abi.PaddedPieceSize(ssize) {
|
|
|
|
// sector full
|
2021-01-20 21:44:18 +00:00
|
|
|
log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "filled")
|
2021-01-21 18:59:18 +00:00
|
|
|
return true, ctx.Send(SectorStartPacking{})
|
2021-01-20 14:20:44 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 13:49:31 +00:00
|
|
|
if sector.CreationTime != 0 {
|
2021-01-18 20:59:34 +00:00
|
|
|
cfg, err := m.getConfig()
|
|
|
|
if err != nil {
|
2021-01-21 18:59:18 +00:00
|
|
|
return false, xerrors.Errorf("getting storage config: %w", err)
|
2021-01-18 20:59:34 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 13:49:31 +00:00
|
|
|
sealTime := time.Unix(sector.CreationTime, 0).Add(cfg.WaitDealsDelay)
|
2021-01-20 17:18:12 +00:00
|
|
|
|
2022-01-09 10:23:02 +00:00
|
|
|
// check deal age, start sealing when the deal closest to starting is within slack time
|
|
|
|
_, current, err := m.Api.ChainHead(ctx.Context())
|
|
|
|
blockTime := time.Second * time.Duration(build.BlockDelaySecs)
|
|
|
|
if err != nil {
|
|
|
|
return false, xerrors.Errorf("API error getting head: %w", err)
|
|
|
|
}
|
|
|
|
for _, piece := range sector.Pieces {
|
2022-02-08 17:39:18 +00:00
|
|
|
if piece.DealInfo == nil {
|
2022-01-09 10:23:02 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
dealSafeSealEpoch := piece.DealInfo.DealProposal.StartEpoch - cfg.StartEpochSealingBuffer
|
|
|
|
dealSafeSealTime := time.Now().Add(time.Duration(dealSafeSealEpoch-current) * blockTime)
|
2022-02-08 17:39:18 +00:00
|
|
|
if dealSafeSealTime.Before(sealTime) {
|
|
|
|
sealTime = dealSafeSealTime
|
2022-01-09 10:23:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-18 20:59:34 +00:00
|
|
|
if now.After(sealTime) {
|
2021-01-20 21:44:18 +00:00
|
|
|
log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timeout")
|
2021-01-21 18:59:18 +00:00
|
|
|
return true, ctx.Send(SectorStartPacking{})
|
2021-01-18 20:59:34 +00:00
|
|
|
}
|
2021-01-19 18:04:05 +00:00
|
|
|
|
|
|
|
m.sectorTimers[m.minerSectorID(sector.SectorNumber)] = time.AfterFunc(sealTime.Sub(now), func() {
|
2021-01-20 21:44:18 +00:00
|
|
|
log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timer")
|
|
|
|
|
2021-01-19 18:04:05 +00:00
|
|
|
if err := ctx.Send(SectorStartPacking{}); err != nil {
|
|
|
|
log.Errorw("sending SectorStartPacking event failed", "sector", sector.SectorNumber, "error", err)
|
|
|
|
}
|
|
|
|
})
|
2021-01-18 20:59:34 +00:00
|
|
|
}
|
|
|
|
|
2021-01-21 18:59:18 +00:00
|
|
|
return false, nil
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) error {
|
|
|
|
ssize, err := sector.SectorType.SectorSize()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-01-20 15:00:00 +00:00
|
|
|
res := SectorPieceAdded{}
|
|
|
|
|
2021-01-18 13:26:03 +00:00
|
|
|
m.inputLk.Lock()
|
|
|
|
|
2021-01-20 15:00:00 +00:00
|
|
|
pending, ok := m.assignedPieces[m.minerSectorID(sector.SectorNumber)]
|
|
|
|
if ok {
|
|
|
|
delete(m.assignedPieces, m.minerSectorID(sector.SectorNumber))
|
|
|
|
}
|
|
|
|
m.inputLk.Unlock()
|
|
|
|
if !ok {
|
2021-01-20 17:42:22 +00:00
|
|
|
// nothing to do here (might happen after a restart in AddPiece)
|
2021-01-20 15:00:00 +00:00
|
|
|
return ctx.Send(res)
|
|
|
|
}
|
2021-01-18 13:26:03 +00:00
|
|
|
|
2021-01-18 20:59:34 +00:00
|
|
|
var offset abi.UnpaddedPieceSize
|
|
|
|
pieceSizes := make([]abi.UnpaddedPieceSize, len(sector.Pieces))
|
|
|
|
for i, p := range sector.Pieces {
|
|
|
|
pieceSizes[i] = p.Piece.Size.Unpadded()
|
|
|
|
offset += p.Piece.Size.Unpadded()
|
|
|
|
}
|
|
|
|
|
2021-01-20 17:18:12 +00:00
|
|
|
maxDeals, err := getDealPerSectorLimit(ssize)
|
2021-01-20 14:20:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting per-sector deal limit: %w", err)
|
|
|
|
}
|
|
|
|
|
2021-01-20 15:00:00 +00:00
|
|
|
for i, piece := range pending {
|
2021-01-18 13:26:03 +00:00
|
|
|
m.inputLk.Lock()
|
|
|
|
deal, ok := m.pendingPieces[piece]
|
|
|
|
m.inputLk.Unlock()
|
|
|
|
if !ok {
|
|
|
|
return xerrors.Errorf("piece %s assigned to sector %d not found", piece, sector.SectorNumber)
|
|
|
|
}
|
|
|
|
|
2021-01-20 14:20:44 +00:00
|
|
|
if len(sector.dealIDs())+(i+1) > maxDeals {
|
2021-01-20 17:18:12 +00:00
|
|
|
// todo: this is rather unlikely to happen, but in case it does, return the deal to waiting queue instead of failing it
|
2021-01-20 14:20:44 +00:00
|
|
|
deal.accepted(sector.SectorNumber, offset, xerrors.Errorf("too many deals assigned to sector %d, dropping deal", sector.SectorNumber))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-01-18 20:59:34 +00:00
|
|
|
pads, padLength := ffiwrapper.GetRequiredPadding(offset.Padded(), deal.size.Padded())
|
2021-01-18 13:26:03 +00:00
|
|
|
|
2021-01-18 20:59:34 +00:00
|
|
|
if offset.Padded()+padLength+deal.size.Padded() > abi.PaddedPieceSize(ssize) {
|
2021-01-20 17:18:12 +00:00
|
|
|
// todo: this is rather unlikely to happen, but in case it does, return the deal to waiting queue instead of failing it
|
|
|
|
deal.accepted(sector.SectorNumber, offset, xerrors.Errorf("piece %s assigned to sector %d with not enough space", piece, sector.SectorNumber))
|
|
|
|
continue
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 20:59:34 +00:00
|
|
|
offset += padLength.Unpadded()
|
2021-01-18 13:26:03 +00:00
|
|
|
|
|
|
|
for _, p := range pads {
|
2021-08-26 14:22:43 +00:00
|
|
|
expectCid := zerocomm.ZeroPieceCommitment(p.Unpadded())
|
|
|
|
|
2021-01-18 13:26:03 +00:00
|
|
|
ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx.Context(), DealSectorPriority),
|
|
|
|
m.minerSector(sector.SectorType, sector.SectorNumber),
|
|
|
|
pieceSizes,
|
|
|
|
p.Unpadded(),
|
|
|
|
NewNullReader(p.Unpadded()))
|
|
|
|
if err != nil {
|
2021-01-20 15:00:00 +00:00
|
|
|
err = xerrors.Errorf("writing padding piece: %w", err)
|
|
|
|
deal.accepted(sector.SectorNumber, offset, err)
|
2021-01-20 17:42:22 +00:00
|
|
|
return ctx.Send(SectorAddPieceFailed{err})
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
2021-08-26 14:22:43 +00:00
|
|
|
if !ppi.PieceCID.Equals(expectCid) {
|
|
|
|
err = xerrors.Errorf("got unexpected padding piece CID: expected:%s, got:%s", expectCid, ppi.PieceCID)
|
|
|
|
deal.accepted(sector.SectorNumber, offset, err)
|
|
|
|
return ctx.Send(SectorAddPieceFailed{err})
|
|
|
|
}
|
2021-01-18 13:26:03 +00:00
|
|
|
|
|
|
|
pieceSizes = append(pieceSizes, p.Unpadded())
|
|
|
|
res.NewPieces = append(res.NewPieces, Piece{
|
|
|
|
Piece: ppi,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx.Context(), DealSectorPriority),
|
|
|
|
m.minerSector(sector.SectorType, sector.SectorNumber),
|
|
|
|
pieceSizes,
|
|
|
|
deal.size,
|
|
|
|
deal.data)
|
|
|
|
if err != nil {
|
2021-01-20 15:00:00 +00:00
|
|
|
err = xerrors.Errorf("writing piece: %w", err)
|
|
|
|
deal.accepted(sector.SectorNumber, offset, err)
|
2021-01-20 17:42:22 +00:00
|
|
|
return ctx.Send(SectorAddPieceFailed{err})
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
2021-08-26 14:22:43 +00:00
|
|
|
if !ppi.PieceCID.Equals(deal.deal.DealProposal.PieceCID) {
|
|
|
|
err = xerrors.Errorf("got unexpected piece CID: expected:%s, got:%s", deal.deal.DealProposal.PieceCID, ppi.PieceCID)
|
|
|
|
deal.accepted(sector.SectorNumber, offset, err)
|
|
|
|
return ctx.Send(SectorAddPieceFailed{err})
|
|
|
|
}
|
2021-01-18 13:26:03 +00:00
|
|
|
|
2021-01-20 21:44:18 +00:00
|
|
|
log.Infow("deal added to a sector", "deal", deal.deal.DealID, "sector", sector.SectorNumber, "piece", ppi.PieceCID)
|
|
|
|
|
2021-01-18 20:59:34 +00:00
|
|
|
deal.accepted(sector.SectorNumber, offset, nil)
|
|
|
|
|
|
|
|
offset += deal.size
|
2021-01-18 13:26:03 +00:00
|
|
|
pieceSizes = append(pieceSizes, deal.size)
|
2021-01-18 20:59:34 +00:00
|
|
|
|
2021-01-18 13:26:03 +00:00
|
|
|
res.NewPieces = append(res.NewPieces, Piece{
|
2021-01-18 20:59:34 +00:00
|
|
|
Piece: ppi,
|
2021-01-18 13:26:03 +00:00
|
|
|
DealInfo: &deal.deal,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Send(res)
|
|
|
|
}
|
|
|
|
|
2021-01-20 17:42:22 +00:00
|
|
|
func (m *Sealing) handleAddPieceFailed(ctx statemachine.Context, sector SectorInfo) error {
|
2021-10-04 18:00:07 +00:00
|
|
|
return ctx.Send(SectorRetryWaitDeals{})
|
2021-01-20 17:42:22 +00:00
|
|
|
}
|
|
|
|
|
2021-05-19 11:05:07 +00:00
|
|
|
func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal api.PieceDealInfo) (api.SectorOffset, error) {
|
2021-01-18 13:26:03 +00:00
|
|
|
log.Infof("Adding piece for deal %d (publish msg: %s)", deal.DealID, deal.PublishCid)
|
|
|
|
if (padreader.PaddedSize(uint64(size))) != size {
|
2021-05-19 11:05:07 +00:00
|
|
|
return api.SectorOffset{}, xerrors.Errorf("cannot allocate unpadded piece")
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sp, err := m.currentSealProof(ctx)
|
|
|
|
if err != nil {
|
2021-05-19 11:05:07 +00:00
|
|
|
return api.SectorOffset{}, xerrors.Errorf("getting current seal proof type: %w", err)
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ssize, err := sp.SectorSize()
|
|
|
|
if err != nil {
|
2021-05-19 11:05:07 +00:00
|
|
|
return api.SectorOffset{}, err
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if size > abi.PaddedPieceSize(ssize).Unpadded() {
|
2021-05-19 11:05:07 +00:00
|
|
|
return api.SectorOffset{}, xerrors.Errorf("piece cannot fit into a sector")
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
2021-02-09 17:44:41 +00:00
|
|
|
if _, err := deal.DealProposal.Cid(); err != nil {
|
2021-05-19 11:05:07 +00:00
|
|
|
return api.SectorOffset{}, xerrors.Errorf("getting proposal CID: %w", err)
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
2021-09-30 12:35:23 +00:00
|
|
|
cfg, err := m.getConfig()
|
|
|
|
if err != nil {
|
|
|
|
return api.SectorOffset{}, xerrors.Errorf("getting config: %w", err)
|
|
|
|
}
|
|
|
|
|
2021-09-30 10:43:02 +00:00
|
|
|
_, head, err := m.Api.ChainHead(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return api.SectorOffset{}, xerrors.Errorf("couldnt get chain head: %w", err)
|
|
|
|
}
|
2021-09-30 12:35:23 +00:00
|
|
|
if head+cfg.StartEpochSealingBuffer > deal.DealProposal.StartEpoch {
|
2021-09-30 10:43:02 +00:00
|
|
|
return api.SectorOffset{}, xerrors.Errorf(
|
|
|
|
"cannot add piece for deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d",
|
|
|
|
deal.DealProposal.PieceCID, head, deal.DealProposal.StartEpoch)
|
|
|
|
}
|
|
|
|
|
2021-01-18 13:26:03 +00:00
|
|
|
m.inputLk.Lock()
|
2022-02-21 07:51:25 +00:00
|
|
|
if pp, exist := m.pendingPieces[proposalCID(deal)]; exist {
|
2021-01-18 13:26:03 +00:00
|
|
|
m.inputLk.Unlock()
|
2022-02-22 10:22:51 +00:00
|
|
|
|
|
|
|
// we already have a pre-existing add piece call for this deal, let's wait for it to finish and see if it's successful
|
2022-03-10 19:24:26 +00:00
|
|
|
res, err := waitAddPieceResp(ctx, pp)
|
|
|
|
if err != nil {
|
|
|
|
return api.SectorOffset{}, err
|
|
|
|
}
|
|
|
|
if res.err == nil {
|
2022-02-22 10:22:51 +00:00
|
|
|
// all good, return the response
|
2022-02-21 07:51:25 +00:00
|
|
|
return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err
|
|
|
|
}
|
2022-03-10 19:24:26 +00:00
|
|
|
// if there was an error waiting for a pre-existing add piece call, let's retry
|
|
|
|
m.inputLk.Lock()
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
2022-03-10 19:24:26 +00:00
|
|
|
// addPendingPiece takes over m.inputLk
|
2022-02-22 10:22:51 +00:00
|
|
|
pp := m.addPendingPiece(ctx, size, data, deal, sp)
|
2022-03-10 19:24:26 +00:00
|
|
|
|
2022-02-22 10:22:51 +00:00
|
|
|
res, err := waitAddPieceResp(ctx, pp)
|
|
|
|
if err != nil {
|
|
|
|
return api.SectorOffset{}, err
|
|
|
|
}
|
|
|
|
return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err
|
|
|
|
}
|
|
|
|
|
2022-03-10 19:24:26 +00:00
|
|
|
// called with m.inputLk; transfers the lock to another goroutine!
|
2022-02-22 10:22:51 +00:00
|
|
|
func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal api.PieceDealInfo, sp abi.RegisteredSealProof) *pendingPiece {
|
2022-02-22 09:48:39 +00:00
|
|
|
doneCh := make(chan struct{})
|
2022-02-21 09:51:43 +00:00
|
|
|
pp := &pendingPiece{
|
|
|
|
doneCh: doneCh,
|
2021-01-18 13:26:03 +00:00
|
|
|
size: size,
|
|
|
|
deal: deal,
|
|
|
|
data: data,
|
|
|
|
assigned: false,
|
2022-02-21 09:51:43 +00:00
|
|
|
}
|
|
|
|
pp.accepted = func(sn abi.SectorNumber, offset abi.UnpaddedPieceSize, err error) {
|
2022-02-21 14:27:51 +00:00
|
|
|
pp.resp = &pieceAcceptResp{sn, offset, err}
|
2022-02-21 09:51:43 +00:00
|
|
|
close(pp.doneCh)
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
2022-02-21 09:51:43 +00:00
|
|
|
m.pendingPieces[proposalCID(deal)] = pp
|
2021-01-18 13:26:03 +00:00
|
|
|
go func() {
|
2022-03-10 19:24:26 +00:00
|
|
|
defer m.inputLk.Unlock()
|
2021-01-18 13:26:03 +00:00
|
|
|
if err := m.updateInput(ctx, sp); err != nil {
|
|
|
|
log.Errorf("%+v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2022-02-22 10:22:51 +00:00
|
|
|
return pp
|
|
|
|
}
|
|
|
|
|
|
|
|
func waitAddPieceResp(ctx context.Context, pp *pendingPiece) (*pieceAcceptResp, error) {
|
2022-02-21 07:56:49 +00:00
|
|
|
select {
|
2022-02-22 10:22:51 +00:00
|
|
|
case <-pp.doneCh:
|
2022-02-21 14:27:51 +00:00
|
|
|
res := pp.resp
|
2022-02-22 10:22:51 +00:00
|
|
|
return res, nil
|
2022-02-21 07:56:49 +00:00
|
|
|
case <-ctx.Done():
|
2022-02-22 10:22:51 +00:00
|
|
|
return nil, ctx.Err()
|
2022-02-21 07:56:49 +00:00
|
|
|
}
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
2021-12-08 17:11:19 +00:00
|
|
|
func (m *Sealing) MatchPendingPiecesToOpenSectors(ctx context.Context) error {
|
|
|
|
sp, err := m.currentSealProof(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("failed to get current seal proof: %w", err)
|
|
|
|
}
|
|
|
|
log.Debug("pieces to sector matching waiting for lock")
|
|
|
|
m.inputLk.Lock()
|
|
|
|
defer m.inputLk.Unlock()
|
|
|
|
return m.updateInput(ctx, sp)
|
|
|
|
}
|
|
|
|
|
2022-03-16 16:33:05 +00:00
|
|
|
type expFn func(sn abi.SectorNumber) (abi.ChainEpoch, abi.TokenAmount, error)
|
|
|
|
|
2021-01-18 13:26:03 +00:00
|
|
|
// called with m.inputLk
|
|
|
|
func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) error {
|
2022-03-16 16:33:05 +00:00
|
|
|
memo := make(map[abi.SectorNumber]struct {
|
|
|
|
e abi.ChainEpoch
|
|
|
|
p abi.TokenAmount
|
|
|
|
})
|
|
|
|
expF := func(sn abi.SectorNumber) (abi.ChainEpoch, abi.TokenAmount, error) {
|
|
|
|
if e, ok := memo[sn]; ok {
|
|
|
|
return e.e, e.p, nil
|
|
|
|
}
|
|
|
|
onChainInfo, err := m.Api.StateSectorGetInfo(ctx, m.maddr, sn, TipSetToken{})
|
|
|
|
if err != nil {
|
|
|
|
return 0, big.Zero(), err
|
|
|
|
}
|
|
|
|
memo[sn] = struct {
|
|
|
|
e abi.ChainEpoch
|
|
|
|
p abi.TokenAmount
|
|
|
|
}{e: onChainInfo.Expiration, p: onChainInfo.InitialPledge}
|
|
|
|
return onChainInfo.Expiration, onChainInfo.InitialPledge, nil
|
|
|
|
}
|
|
|
|
|
2021-01-18 13:26:03 +00:00
|
|
|
ssize, err := sp.SectorSize()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
type match struct {
|
|
|
|
sector abi.SectorID
|
|
|
|
deal cid.Cid
|
|
|
|
|
|
|
|
size abi.UnpaddedPieceSize
|
|
|
|
padding abi.UnpaddedPieceSize
|
|
|
|
}
|
|
|
|
|
|
|
|
var matches []match
|
|
|
|
toAssign := map[cid.Cid]struct{}{} // used to maybe create new sectors
|
|
|
|
|
|
|
|
// todo: this is distinctly O(n^2), may need to be optimized for tiny deals and large scale miners
|
|
|
|
// (unlikely to be a problem now)
|
2021-02-09 17:44:41 +00:00
|
|
|
for proposalCid, piece := range m.pendingPieces {
|
2021-01-18 20:59:34 +00:00
|
|
|
if piece.assigned {
|
|
|
|
continue // already assigned to a sector, skip
|
|
|
|
}
|
2021-01-18 13:26:03 +00:00
|
|
|
|
2021-02-09 17:44:41 +00:00
|
|
|
toAssign[proposalCid] = struct{}{}
|
2021-01-18 13:26:03 +00:00
|
|
|
|
2021-01-18 20:59:34 +00:00
|
|
|
for id, sector := range m.openSectors {
|
|
|
|
avail := abi.PaddedPieceSize(ssize).Unpadded() - sector.used
|
2021-12-08 17:11:19 +00:00
|
|
|
// check that sector lifetime is long enough to fit deal using latest expiration from on chain
|
|
|
|
|
|
|
|
ok, err := sector.dealFitsInLifetime(piece.deal.DealProposal.EndEpoch, expF)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed to check expiration for cc Update sector %d", sector.number)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !ok {
|
2022-03-16 16:33:05 +00:00
|
|
|
exp, _, _ := expF(sector.number)
|
2021-12-08 17:11:19 +00:00
|
|
|
log.Infof("CC update sector %d cannot fit deal, expiration %d before deal end epoch %d", id, exp, piece.deal.DealProposal.EndEpoch)
|
|
|
|
continue
|
|
|
|
}
|
2021-01-18 13:26:03 +00:00
|
|
|
|
|
|
|
if piece.size <= avail { // (note: if we have enough space for the piece, we also have enough space for inter-piece padding)
|
|
|
|
matches = append(matches, match{
|
|
|
|
sector: id,
|
2021-02-09 17:44:41 +00:00
|
|
|
deal: proposalCid,
|
2021-01-18 13:26:03 +00:00
|
|
|
|
|
|
|
size: piece.size,
|
|
|
|
padding: avail % piece.size,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Slice(matches, func(i, j int) bool {
|
|
|
|
if matches[i].padding != matches[j].padding { // less padding is better
|
|
|
|
return matches[i].padding < matches[j].padding
|
|
|
|
}
|
|
|
|
|
|
|
|
if matches[i].size != matches[j].size { // larger pieces are better
|
|
|
|
return matches[i].size < matches[j].size
|
|
|
|
}
|
|
|
|
|
|
|
|
return matches[i].sector.Number < matches[j].sector.Number // prefer older sectors
|
|
|
|
})
|
|
|
|
|
|
|
|
var assigned int
|
|
|
|
for _, mt := range matches {
|
|
|
|
if m.pendingPieces[mt.deal].assigned {
|
|
|
|
assigned++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, found := m.openSectors[mt.sector]; !found {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-04-14 18:26:07 +00:00
|
|
|
avail := abi.PaddedPieceSize(ssize).Unpadded() - m.openSectors[mt.sector].used
|
|
|
|
|
|
|
|
if mt.size > avail {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-01-18 13:26:03 +00:00
|
|
|
err := m.openSectors[mt.sector].maybeAccept(mt.deal)
|
|
|
|
if err != nil {
|
|
|
|
m.pendingPieces[mt.deal].accepted(mt.sector.Number, 0, err) // non-error case in handleAddPiece
|
|
|
|
}
|
|
|
|
|
2021-04-14 18:26:07 +00:00
|
|
|
m.openSectors[mt.sector].used += mt.padding + mt.size
|
|
|
|
|
2021-01-18 13:26:03 +00:00
|
|
|
m.pendingPieces[mt.deal].assigned = true
|
|
|
|
delete(toAssign, mt.deal)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("sector %d rejected deal %s: %+v", mt.sector, mt.deal, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(toAssign) > 0 {
|
2021-12-08 17:11:19 +00:00
|
|
|
log.Errorf("we are trying to create a new sector with open sectors %v", m.openSectors)
|
2022-03-16 16:33:05 +00:00
|
|
|
if err := m.tryGetDealSector(ctx, sp, expF); err != nil {
|
2021-01-18 20:59:34 +00:00
|
|
|
log.Errorw("Failed to create a new sector for deals", "error", err)
|
|
|
|
}
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-16 16:33:05 +00:00
|
|
|
func (m *Sealing) calcTargetExpiration(ctx context.Context, ssize abi.SectorSize) (target abi.ChainEpoch) {
|
|
|
|
var candidates []*pendingPiece
|
|
|
|
|
|
|
|
for _, piece := range m.pendingPieces {
|
|
|
|
if piece.assigned {
|
|
|
|
continue // already assigned to a sector, skip
|
|
|
|
}
|
|
|
|
candidates = append(candidates, piece)
|
|
|
|
}
|
|
|
|
|
|
|
|
// earliest expiration first
|
|
|
|
sort.Slice(candidates, func(i, j int) bool {
|
2022-03-16 16:45:22 +00:00
|
|
|
return candidates[i].deal.DealProposal.EndEpoch < candidates[j].deal.DealProposal.EndEpoch
|
2022-03-16 16:33:05 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
var totalBytes uint64
|
|
|
|
for _, candidate := range candidates {
|
|
|
|
totalBytes += uint64(candidate.size)
|
|
|
|
|
|
|
|
if totalBytes >= uint64(abi.PaddedPieceSize(ssize).Unpadded()) {
|
|
|
|
return candidate.deal.DealProposal.EndEpoch
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_, curEpoch, err := m.Api.ChainHead(ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("getting current epoch: %s", err)
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
_, maxDur := policy.DealDurationBounds(0)
|
|
|
|
|
|
|
|
return curEpoch + maxDur
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) tryGetUpgradeSector(ctx context.Context, sp abi.RegisteredSealProof, ef expFn) (bool, error) {
|
|
|
|
if len(m.available) == 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize, _ := sp.SectorSize() // error already checked in the caller
|
|
|
|
targetExpiration := m.calcTargetExpiration(ctx, ssize)
|
|
|
|
|
|
|
|
var candidate abi.SectorID
|
|
|
|
var bestExpiration abi.ChainEpoch
|
|
|
|
bestPledge := types.TotalFilecoinInt
|
|
|
|
|
|
|
|
for s := range m.available {
|
|
|
|
expiration, pledge, err := ef(s.Number)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorw("checking sector expiration", "error", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// if best is below target, we want larger expirations
|
|
|
|
// if best is above target, we want lower pledge, but only if still above target
|
|
|
|
|
|
|
|
if bestExpiration < targetExpiration {
|
|
|
|
if expiration > bestExpiration {
|
|
|
|
bestExpiration = expiration
|
|
|
|
bestPledge = pledge
|
|
|
|
candidate = s
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if expiration > targetExpiration && pledge.LessThan(bestPledge) {
|
|
|
|
bestExpiration = expiration
|
|
|
|
bestPledge = pledge
|
|
|
|
candidate = s
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if bestExpiration == 0 {
|
|
|
|
// didn't find a good sector
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infow("Upgrading sector", "number", candidate.Number, "type", "deal", "proofType", sp, "expiration", bestExpiration, "pledge", types.FIL(bestPledge))
|
|
|
|
delete(m.available, candidate)
|
|
|
|
m.creating = &candidate.Number
|
|
|
|
return true, m.sectors.Send(uint64(candidate.Number), SectorStartCCUpdate{})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Sealing) tryGetDealSector(ctx context.Context, sp abi.RegisteredSealProof, ef expFn) error {
|
2021-06-15 19:04:11 +00:00
|
|
|
m.startupWait.Wait()
|
2021-06-15 19:04:11 +00:00
|
|
|
|
2021-05-30 16:30:38 +00:00
|
|
|
if m.creating != nil {
|
|
|
|
return nil // new sector is being created right now
|
|
|
|
}
|
|
|
|
|
2021-01-18 13:26:03 +00:00
|
|
|
cfg, err := m.getConfig()
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("getting storage config: %w", err)
|
|
|
|
}
|
|
|
|
|
2022-03-16 16:33:05 +00:00
|
|
|
if cfg.MaxSealingSectorsForDeals > 0 && m.stats.curSealing() >= cfg.MaxSealingSectorsForDeals {
|
2022-01-09 12:18:14 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-16 16:33:05 +00:00
|
|
|
if cfg.MaxWaitDealsSectors > 0 && m.stats.curStaging() >= cfg.MaxWaitDealsSectors {
|
2021-01-18 20:59:34 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-16 16:33:05 +00:00
|
|
|
got, err := m.tryGetUpgradeSector(ctx, sp, ef)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if got {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if !cfg.MakeNewSectorForDeals {
|
2021-01-18 20:59:34 +00:00
|
|
|
return nil
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
2021-03-12 17:42:17 +00:00
|
|
|
sid, err := m.createSector(ctx, cfg, sp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-05-30 16:30:38 +00:00
|
|
|
m.creating = &sid
|
|
|
|
|
2021-03-12 17:42:17 +00:00
|
|
|
log.Infow("Creating sector", "number", sid, "type", "deal", "proofType", sp)
|
|
|
|
return m.sectors.Send(uint64(sid), SectorStart{
|
|
|
|
ID: sid,
|
|
|
|
SectorType: sp,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// call with m.inputLk
|
|
|
|
func (m *Sealing) createSector(ctx context.Context, cfg sealiface.Config, sp abi.RegisteredSealProof) (abi.SectorNumber, error) {
|
2021-01-18 13:26:03 +00:00
|
|
|
// Now actually create a new sector
|
|
|
|
|
|
|
|
sid, err := m.sc.Next()
|
|
|
|
if err != nil {
|
2021-03-12 17:42:17 +00:00
|
|
|
return 0, xerrors.Errorf("getting sector number: %w", err)
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = m.sealer.NewSector(ctx, m.minerSector(sp, sid))
|
|
|
|
if err != nil {
|
2021-03-12 17:42:17 +00:00
|
|
|
return 0, xerrors.Errorf("initializing sector: %w", err)
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
|
|
|
|
2021-03-12 16:25:24 +00:00
|
|
|
// update stats early, fsm planner would do that async
|
2021-10-19 16:52:54 +00:00
|
|
|
m.stats.updateSector(ctx, cfg, m.minerSectorID(sid), UndefinedSectorState)
|
2021-03-12 16:25:24 +00:00
|
|
|
|
2021-03-12 17:42:17 +00:00
|
|
|
return sid, nil
|
2021-01-18 13:26:03 +00:00
|
|
|
}
|
2021-01-18 20:59:34 +00:00
|
|
|
|
|
|
|
func (m *Sealing) StartPacking(sid abi.SectorNumber) error {
|
2021-06-15 19:04:11 +00:00
|
|
|
m.startupWait.Wait()
|
|
|
|
|
2021-05-30 13:13:38 +00:00
|
|
|
log.Infow("starting to seal deal sector", "sector", sid, "trigger", "user")
|
2021-01-18 20:59:34 +00:00
|
|
|
return m.sectors.Send(uint64(sid), SectorStartPacking{})
|
|
|
|
}
|
2021-02-09 17:44:41 +00:00
|
|
|
|
2022-01-21 17:39:18 +00:00
|
|
|
func (m *Sealing) AbortUpgrade(sid abi.SectorNumber) error {
|
|
|
|
m.startupWait.Wait()
|
|
|
|
|
|
|
|
log.Infow("aborting upgrade of sector", "sector", sid, "trigger", "user")
|
2022-01-21 19:07:11 +00:00
|
|
|
return m.sectors.Send(uint64(sid), SectorAbortUpgrade{xerrors.New("triggered by user")})
|
2022-01-21 17:39:18 +00:00
|
|
|
}
|
|
|
|
|
2021-05-19 11:05:07 +00:00
|
|
|
func proposalCID(deal api.PieceDealInfo) cid.Cid {
|
2021-02-09 17:44:41 +00:00
|
|
|
pc, err := deal.DealProposal.Cid()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("DealProposal.Cid error: %+v", err)
|
|
|
|
return cid.Undef
|
|
|
|
}
|
|
|
|
|
|
|
|
return pc
|
|
|
|
}
|