lotus/extern/storage-sealing/sealing.go

420 lines
12 KiB
Go
Raw Normal View History

package sealing
import (
"context"
"errors"
"io"
2020-08-18 14:20:31 +00:00
"math"
2020-07-01 13:30:25 +00:00
"sync"
"time"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
padreader "github.com/filecoin-project/go-padreader"
2020-09-07 03:49:10 +00:00
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
statemachine "github.com/filecoin-project/go-statemachine"
2020-08-17 13:39:33 +00:00
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/actors/builtin/market"
2020-02-21 17:43:44 +00:00
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
)
const SectorStorePrefix = "/sectors"
2020-09-11 11:28:09 +00:00
var ErrTooManySectorsSealing = xerrors.New("too many sectors sealing")
var log = logging.Logger("sectors")
2020-07-15 14:51:02 +00:00
type SectorLocation struct {
2020-07-15 14:53:17 +00:00
Deadline uint64
2020-07-15 14:51:02 +00:00
Partition uint64
}
var ErrSectorAllocated = errors.New("sectorNumber is allocated, but PreCommit info wasn't found on chain")
type SealingAPI interface {
StateWaitMsg(context.Context, cid.Cid) (MsgLookup, error)
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
2020-06-15 13:13:35 +00:00
StateComputeDataCommitment(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tok TipSetToken) (cid.Cid, error)
// Can return ErrSectorAllocated in case precommit info wasn't found, but the sector number is marked as allocated
2020-04-06 21:03:47 +00:00
StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error)
2020-05-28 00:10:50 +00:00
StateSectorGetInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorOnChainInfo, error)
2020-07-15 14:51:02 +00:00
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*SectorLocation, error)
2020-04-06 20:23:37 +00:00
StateMinerSectorSize(context.Context, address.Address, TipSetToken) (abi.SectorSize, error)
StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok TipSetToken) (address.Address, error)
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
2020-06-26 15:58:29 +00:00
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error)
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error)
2020-08-12 17:47:00 +00:00
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
2020-08-11 23:58:35 +00:00
ChainGetRandomnessFromBeacon(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
ChainGetRandomnessFromTickets(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
2020-04-15 18:00:29 +00:00
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
}
type SectorStateNotifee func(before, after SectorInfo)
type Sealing struct {
api SealingAPI
2020-08-12 17:47:00 +00:00
feeCfg FeeConfig
events Events
maddr address.Address
2020-03-23 11:40:02 +00:00
sealer sectorstorage.SectorManager
sectors *statemachine.StateGroup
2020-03-17 20:19:52 +00:00
sc SectorIDCounter
verif ffiwrapper.Verifier
pcp PreCommitPolicy
unsealedInfoMap UnsealedSectorMap
2020-07-01 13:30:25 +00:00
upgradeLk sync.Mutex
toUpgrade map[abi.SectorNumber]struct{}
notifee SectorStateNotifee
stats SectorStats
2020-08-18 14:20:31 +00:00
getConfig GetSealingConfigFunc
2020-06-23 21:47:53 +00:00
}
2020-08-12 17:47:00 +00:00
type FeeConfig struct {
MaxPreCommitGasFee abi.TokenAmount
MaxCommitGasFee abi.TokenAmount
2020-06-23 21:47:53 +00:00
}
type UnsealedSectorMap struct {
infos map[abi.SectorNumber]UnsealedSectorInfo
2020-08-18 14:20:31 +00:00
lk sync.Mutex
}
2020-06-23 21:47:53 +00:00
type UnsealedSectorInfo struct {
2020-07-20 16:43:19 +00:00
numDeals uint64
// stored should always equal sum of pieceSizes.Padded()
2020-07-30 12:22:05 +00:00
stored abi.PaddedPieceSize
2020-06-23 21:47:53 +00:00
pieceSizes []abi.UnpaddedPieceSize
}
func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee) *Sealing {
s := &Sealing{
2020-01-16 02:54:57 +00:00
api: api,
2020-08-12 17:47:00 +00:00
feeCfg: fc,
events: events,
maddr: maddr,
sealer: sealer,
sc: sc,
verif: verif,
pcp: pcp,
unsealedInfoMap: UnsealedSectorMap{
infos: make(map[abi.SectorNumber]UnsealedSectorInfo),
2020-08-18 14:20:31 +00:00
lk: sync.Mutex{},
},
2020-07-01 14:33:59 +00:00
2020-08-18 14:20:31 +00:00
toUpgrade: map[abi.SectorNumber]struct{}{},
notifee: notifee,
2020-08-18 14:20:31 +00:00
getConfig: gc,
stats: SectorStats{
bySector: map[abi.SectorID]statSectorState{},
},
}
s.sectors = statemachine.New(namespace.Wrap(ds, datastore.NewKey(SectorStorePrefix)), s, SectorInfo{})
return s
}
func (m *Sealing) Run(ctx context.Context) error {
if err := m.restartSectors(ctx); err != nil {
log.Errorf("%+v", err)
return xerrors.Errorf("failed load sector states: %w", err)
}
return nil
}
func (m *Sealing) Stop(ctx context.Context) error {
return m.sectors.Stop(ctx)
}
2020-07-30 12:22:05 +00:00
func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) {
log.Infof("Adding piece for deal %d (publish msg: %s)", d.DealID, d.PublishCid)
2020-02-11 01:10:50 +00:00
if (padreader.PaddedSize(uint64(size))) != size {
return 0, 0, xerrors.Errorf("cannot allocate unpadded piece")
}
2020-06-23 19:32:22 +00:00
2020-07-30 12:22:05 +00:00
if size > abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded() {
return 0, 0, xerrors.Errorf("piece cannot fit into a sector")
}
2020-08-18 14:20:31 +00:00
m.unsealedInfoMap.lk.Lock()
sid, pads, err := m.getSectorAndPadding(size)
if err != nil {
2020-08-18 14:20:31 +00:00
m.unsealedInfoMap.lk.Unlock()
return 0, 0, xerrors.Errorf("getting available sector: %w", err)
}
for _, p := range pads {
2020-08-14 14:06:53 +00:00
err = m.addPiece(ctx, sid, p.Unpadded(), NewNullReader(p.Unpadded()), nil)
if err != nil {
2020-08-18 14:20:31 +00:00
m.unsealedInfoMap.lk.Unlock()
return 0, 0, xerrors.Errorf("writing pads: %w", err)
}
}
offset := m.unsealedInfoMap.infos[sid].stored
err = m.addPiece(ctx, sid, size, r, &d)
if err != nil {
2020-08-18 14:20:31 +00:00
m.unsealedInfoMap.lk.Unlock()
return 0, 0, xerrors.Errorf("adding piece to sector: %w", err)
}
2020-08-18 14:20:31 +00:00
startPacking := m.unsealedInfoMap.infos[sid].numDeals >= getDealPerSectorLimit(m.sealer.SectorSize())
m.unsealedInfoMap.lk.Unlock()
if startPacking {
2020-07-30 12:22:05 +00:00
if err := m.StartPacking(sid); err != nil {
return 0, 0, xerrors.Errorf("start packing: %w", err)
}
2020-07-20 16:43:19 +00:00
}
return sid, offset, nil
}
2020-08-18 14:20:31 +00:00
// Caller should hold m.unsealedInfoMap.lk
func (m *Sealing) addPiece(ctx context.Context, sectorID abi.SectorNumber, size abi.UnpaddedPieceSize, r io.Reader, di *DealInfo) error {
log.Infof("Adding piece to sector %d", sectorID)
ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx, DealSectorPriority), m.minerSector(sectorID), m.unsealedInfoMap.infos[sectorID].pieceSizes, size, r)
if err != nil {
return xerrors.Errorf("writing piece: %w", err)
}
piece := Piece{
Piece: ppi,
DealInfo: di,
}
err = m.sectors.Send(uint64(sectorID), SectorAddPiece{NewPiece: piece})
2020-06-23 21:47:53 +00:00
if err != nil {
return err
}
ui := m.unsealedInfoMap.infos[sectorID]
2020-07-20 16:43:19 +00:00
num := m.unsealedInfoMap.infos[sectorID].numDeals
if di != nil {
num = num + 1
}
m.unsealedInfoMap.infos[sectorID] = UnsealedSectorInfo{
2020-07-20 16:43:19 +00:00
numDeals: num,
2020-07-30 12:22:05 +00:00
stored: ui.stored + piece.Piece.Size,
2020-06-23 21:47:53 +00:00
pieceSizes: append(ui.pieceSizes, piece.Piece.Size.Unpadded()),
}
return nil
}
2020-06-23 19:32:22 +00:00
func (m *Sealing) Remove(ctx context.Context, sid abi.SectorNumber) error {
return m.sectors.Send(uint64(sid), SectorRemove{})
}
2020-08-18 14:20:31 +00:00
// Caller should NOT hold m.unsealedInfoMap.lk
func (m *Sealing) StartPacking(sectorID abi.SectorNumber) error {
// locking here ensures that when the SectorStartPacking event is sent, the sector won't be picked up anywhere else
m.unsealedInfoMap.lk.Lock()
defer m.unsealedInfoMap.lk.Unlock()
// cannot send SectorStartPacking to sectors that have already been packed, otherwise it will cause the state machine to exit
if _, ok := m.unsealedInfoMap.infos[sectorID]; !ok {
log.Warnf("call start packing, but sector %v not in unsealedInfoMap.infos, maybe have called", sectorID)
return nil
}
log.Infof("Starting packing sector %d", sectorID)
2020-06-23 21:47:53 +00:00
err := m.sectors.Send(uint64(sectorID), SectorStartPacking{})
if err != nil {
return err
}
log.Infof("send Starting packing event success sector %d", sectorID)
2020-06-23 21:47:53 +00:00
delete(m.unsealedInfoMap.infos, sectorID)
2020-06-23 21:47:53 +00:00
return nil
}
2020-08-18 14:20:31 +00:00
// Caller should hold m.unsealedInfoMap.lk
func (m *Sealing) getSectorAndPadding(size abi.UnpaddedPieceSize) (abi.SectorNumber, []abi.PaddedPieceSize, error) {
2020-07-30 12:22:05 +00:00
ss := abi.PaddedPieceSize(m.sealer.SectorSize())
for k, v := range m.unsealedInfoMap.infos {
2020-07-30 12:22:05 +00:00
pads, padLength := ffiwrapper.GetRequiredPadding(v.stored, size.Padded())
if v.stored+size.Padded()+padLength <= ss {
return k, pads, nil
}
}
2020-08-18 14:20:31 +00:00
ns, err := m.newDealSector()
if err != nil {
return 0, nil, err
}
m.unsealedInfoMap.infos[ns] = UnsealedSectorInfo{
2020-07-20 16:43:19 +00:00
numDeals: 0,
stored: 0,
pieceSizes: nil,
}
return ns, nil, nil
}
2020-08-18 14:20:31 +00:00
// newDealSector creates a new sector for deal storage
func (m *Sealing) newDealSector() (abi.SectorNumber, error) {
// First make sure we don't have too many 'open' sectors
cfg, err := m.getConfig()
if err != nil {
return 0, xerrors.Errorf("getting config: %w", err)
}
if cfg.MaxSealingSectorsForDeals > 0 {
if m.stats.curSealing() > cfg.MaxSealingSectorsForDeals {
2020-09-11 11:28:09 +00:00
return 0, ErrTooManySectorsSealing
}
}
2020-08-18 14:20:31 +00:00
if cfg.MaxWaitDealsSectors > 0 {
// run in a loop because we have to drop the map lock here for a bit
tries := 0
// we have to run in a loop as we're dropping unsealedInfoMap.lk
// to actually call StartPacking. When we do that, another entry can
// get added to unsealedInfoMap.
2020-08-18 14:20:31 +00:00
for uint64(len(m.unsealedInfoMap.infos)) >= cfg.MaxWaitDealsSectors {
if tries > 10 {
// whatever...
break
}
if tries > 0 {
m.unsealedInfoMap.lk.Unlock()
time.Sleep(time.Second)
m.unsealedInfoMap.lk.Lock()
}
tries++
var mostStored abi.PaddedPieceSize = math.MaxUint64
var best abi.SectorNumber = math.MaxUint64
for sn, info := range m.unsealedInfoMap.infos {
2020-08-18 16:27:28 +00:00
if info.stored+1 > mostStored+1 { // 18446744073709551615 + 1 = 0
2020-08-18 14:20:31 +00:00
best = sn
}
}
if best == math.MaxUint64 {
// probably not possible, but who knows
break
}
m.unsealedInfoMap.lk.Unlock()
if err := m.StartPacking(best); err != nil {
log.Error("newDealSector StartPacking error: %+v", err)
continue // let's pretend this is fine
}
m.unsealedInfoMap.lk.Lock()
}
}
// Now actually create a new sector
sid, err := m.sc.Next()
if err != nil {
return 0, xerrors.Errorf("getting sector number: %w", err)
}
err = m.sealer.NewSector(context.TODO(), m.minerSector(sid))
if err != nil {
return 0, xerrors.Errorf("initializing sector: %w", err)
}
rt, err := ffiwrapper.SealProofTypeFromSectorSize(m.sealer.SectorSize())
if err != nil {
return 0, xerrors.Errorf("bad sector size: %w", err)
}
2020-06-23 19:32:22 +00:00
log.Infof("Creating sector %d", sid)
2020-06-23 21:47:53 +00:00
err = m.sectors.Send(uint64(sid), SectorStart{
2020-03-22 20:44:27 +00:00
ID: sid,
SectorType: rt,
})
2020-06-23 21:47:53 +00:00
if err != nil {
return 0, xerrors.Errorf("starting the sector fsm: %w", err)
2020-06-23 21:47:53 +00:00
}
2020-08-18 14:20:31 +00:00
cf, err := m.getConfig()
if err != nil {
return 0, xerrors.Errorf("getting the sealing delay: %w", err)
}
2020-08-18 14:20:31 +00:00
if cf.WaitDealsDelay > 0 {
timer := time.NewTimer(cf.WaitDealsDelay)
go func() {
<-timer.C
if err := m.StartPacking(sid); err != nil {
log.Errorf("starting sector %d: %+v", sid, err)
}
}()
}
return sid, nil
}
2020-03-17 20:19:52 +00:00
2020-06-23 19:32:22 +00:00
// newSectorCC accepts a slice of pieces with no deal (junk data)
func (m *Sealing) newSectorCC(sid abi.SectorNumber, pieces []Piece) error {
rt, err := ffiwrapper.SealProofTypeFromSectorSize(m.sealer.SectorSize())
if err != nil {
return xerrors.Errorf("bad sector size: %w", err)
}
2020-06-23 19:32:22 +00:00
log.Infof("Creating CC sector %d", sid)
return m.sectors.Send(uint64(sid), SectorStartCC{
ID: sid,
Pieces: pieces,
SectorType: rt,
})
2020-06-22 16:42:38 +00:00
}
2020-03-17 20:19:52 +00:00
func (m *Sealing) minerSector(num abi.SectorNumber) abi.SectorID {
mid, err := address.IDFromAddress(m.maddr)
if err != nil {
panic(err)
}
return abi.SectorID{
Number: num,
2020-03-18 01:08:11 +00:00
Miner: abi.ActorID(mid),
2020-03-17 20:19:52 +00:00
}
}
func (m *Sealing) Address() address.Address {
return m.maddr
}
2020-07-20 16:43:19 +00:00
func getDealPerSectorLimit(size abi.SectorSize) uint64 {
if size < 64<<30 {
return 256
}
return 512
2020-07-20 16:43:19 +00:00
}