2019-11-28 17:44:49 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2020-03-11 06:30:48 +00:00
|
|
|
"bytes"
|
2019-11-28 17:44:49 +00:00
|
|
|
"context"
|
2019-11-28 18:08:10 +00:00
|
|
|
"time"
|
|
|
|
|
2020-05-28 15:45:34 +00:00
|
|
|
"github.com/filecoin-project/go-bitfield"
|
|
|
|
|
2020-04-16 20:11:07 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
2020-09-07 03:49:10 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
|
|
|
"github.com/filecoin-project/go-state-types/big"
|
|
|
|
"github.com/filecoin-project/go-state-types/crypto"
|
2020-09-12 03:07:52 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/dline"
|
2020-07-20 13:45:17 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
2020-09-14 11:17:45 +00:00
|
|
|
|
2019-11-28 18:08:10 +00:00
|
|
|
"go.opencensus.io/trace"
|
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
2020-10-08 01:09:33 +00:00
|
|
|
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
2020-09-21 22:24:45 +00:00
|
|
|
|
2020-08-17 13:26:18 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2020-05-26 03:43:17 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2019-11-28 17:44:49 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors"
|
2020-09-17 02:34:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
2020-10-08 01:09:33 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
2019-11-28 17:44:49 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
|
|
|
)
|
|
|
|
|
2020-09-18 16:03:59 +00:00
|
|
|
func (s *WindowPoStScheduler) failPost(err error, ts *types.TipSet, deadline *dline.Info) {
|
2020-10-09 19:52:04 +00:00
|
|
|
s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
2020-09-18 16:03:59 +00:00
|
|
|
c := evtCommon{Error: err}
|
|
|
|
if ts != nil {
|
|
|
|
c.Deadline = deadline
|
|
|
|
c.Height = ts.Height()
|
|
|
|
c.TipSet = ts.Cids()
|
|
|
|
}
|
2020-09-02 18:45:25 +00:00
|
|
|
return WdPoStSchedulerEvt{
|
2020-09-18 16:03:59 +00:00
|
|
|
evtCommon: c,
|
2020-09-02 18:45:25 +00:00
|
|
|
State: SchedulerStateFaulted,
|
|
|
|
}
|
2020-07-20 13:45:17 +00:00
|
|
|
})
|
|
|
|
|
2020-09-18 16:03:59 +00:00
|
|
|
log.Errorf("Got err %w - TODO handle errors", err)
|
2020-04-07 19:55:34 +00:00
|
|
|
/*s.failLk.Lock()
|
2019-12-08 19:48:17 +00:00
|
|
|
if eps > s.failed {
|
|
|
|
s.failed = eps
|
|
|
|
}
|
2020-04-07 19:55:34 +00:00
|
|
|
s.failLk.Unlock()*/
|
2019-12-08 19:48:17 +00:00
|
|
|
}
|
|
|
|
|
2020-09-18 16:03:59 +00:00
|
|
|
// recordProofsEvent records a successful proofs_processed event in the
|
|
|
|
// journal, even if it was a noop (no partitions).
|
|
|
|
func (s *WindowPoStScheduler) recordProofsEvent(partitions []miner.PoStPartition, mcid cid.Cid) {
|
2020-10-09 19:52:04 +00:00
|
|
|
s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStProofs], func() interface{} {
|
2020-09-18 16:03:59 +00:00
|
|
|
return &WdPoStProofsProcessedEvt{
|
|
|
|
evtCommon: s.getEvtCommon(nil),
|
|
|
|
Partitions: partitions,
|
|
|
|
MessageCID: mcid,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// startGeneratePoST kicks off the process of generating a PoST
|
|
|
|
func (s *WindowPoStScheduler) startGeneratePoST(
|
|
|
|
ctx context.Context,
|
|
|
|
ts *types.TipSet,
|
|
|
|
deadline *dline.Info,
|
|
|
|
completeGeneratePoST CompleteGeneratePoSTCb,
|
|
|
|
) context.CancelFunc {
|
2019-11-28 17:44:49 +00:00
|
|
|
ctx, abort := context.WithCancel(ctx)
|
2020-09-18 16:03:59 +00:00
|
|
|
go func() {
|
|
|
|
defer abort()
|
2019-11-28 17:44:49 +00:00
|
|
|
|
2020-10-09 19:52:04 +00:00
|
|
|
s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
2020-09-18 16:03:59 +00:00
|
|
|
return WdPoStSchedulerEvt{
|
|
|
|
evtCommon: s.getEvtCommon(nil),
|
|
|
|
State: SchedulerStateStarted,
|
|
|
|
}
|
|
|
|
})
|
2019-11-28 17:44:49 +00:00
|
|
|
|
2020-09-18 16:03:59 +00:00
|
|
|
posts, err := s.runGeneratePoST(ctx, ts, deadline)
|
|
|
|
completeGeneratePoST(posts, err)
|
|
|
|
}()
|
2020-07-20 13:45:17 +00:00
|
|
|
|
2020-09-18 16:03:59 +00:00
|
|
|
return abort
|
|
|
|
}
|
|
|
|
|
|
|
|
// runGeneratePoST generates the PoST
|
|
|
|
func (s *WindowPoStScheduler) runGeneratePoST(
|
|
|
|
ctx context.Context,
|
|
|
|
ts *types.TipSet,
|
|
|
|
deadline *dline.Info,
|
|
|
|
) ([]miner.SubmitWindowedPoStParams, error) {
|
|
|
|
ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.generatePoST")
|
|
|
|
defer span.End()
|
|
|
|
|
|
|
|
posts, err := s.runPost(ctx, *deadline, ts)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("runPost failed: %+v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(posts) == 0 {
|
|
|
|
s.recordProofsEvent(nil, cid.Undef)
|
|
|
|
}
|
|
|
|
|
|
|
|
return posts, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// startSubmitPoST kicks of the process of submitting PoST
|
|
|
|
func (s *WindowPoStScheduler) startSubmitPoST(
|
|
|
|
ctx context.Context,
|
|
|
|
ts *types.TipSet,
|
|
|
|
deadline *dline.Info,
|
|
|
|
posts []miner.SubmitWindowedPoStParams,
|
|
|
|
completeSubmitPoST CompleteSubmitPoSTCb,
|
|
|
|
) context.CancelFunc {
|
|
|
|
|
|
|
|
ctx, abort := context.WithCancel(ctx)
|
2019-11-28 17:44:49 +00:00
|
|
|
go func() {
|
|
|
|
defer abort()
|
|
|
|
|
2020-09-18 16:03:59 +00:00
|
|
|
err := s.runSubmitPoST(ctx, ts, deadline, posts)
|
|
|
|
if err == nil {
|
2020-10-09 19:52:04 +00:00
|
|
|
s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
2020-09-18 16:03:59 +00:00
|
|
|
return WdPoStSchedulerEvt{
|
|
|
|
evtCommon: s.getEvtCommon(nil),
|
|
|
|
State: SchedulerStateSucceeded,
|
2020-09-02 18:45:25 +00:00
|
|
|
}
|
2020-08-11 15:30:54 +00:00
|
|
|
})
|
|
|
|
}
|
2020-09-18 16:03:59 +00:00
|
|
|
completeSubmitPoST(err)
|
|
|
|
}()
|
2020-08-11 15:30:54 +00:00
|
|
|
|
2020-09-18 16:03:59 +00:00
|
|
|
return abort
|
|
|
|
}
|
2020-09-09 13:01:37 +00:00
|
|
|
|
2020-09-18 16:03:59 +00:00
|
|
|
// runSubmitPoST submits PoST
|
|
|
|
func (s *WindowPoStScheduler) runSubmitPoST(
|
|
|
|
ctx context.Context,
|
|
|
|
ts *types.TipSet,
|
|
|
|
deadline *dline.Info,
|
|
|
|
posts []miner.SubmitWindowedPoStParams,
|
|
|
|
) error {
|
|
|
|
if len(posts) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2020-09-09 13:01:37 +00:00
|
|
|
|
2020-09-18 16:03:59 +00:00
|
|
|
ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.submitPoST")
|
|
|
|
defer span.End()
|
|
|
|
|
|
|
|
// Get randomness from tickets
|
|
|
|
commEpoch := deadline.Open
|
|
|
|
commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil)
|
|
|
|
if err != nil {
|
|
|
|
err = xerrors.Errorf("failed to get chain randomness from tickets for windowPost (ts=%d; deadline=%d): %w", ts.Height(), commEpoch, err)
|
|
|
|
log.Errorf("submitPost failed: %+v", err)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var submitErr error
|
|
|
|
for i := range posts {
|
|
|
|
// Add randomness to PoST
|
|
|
|
post := &posts[i]
|
|
|
|
post.ChainCommitEpoch = commEpoch
|
|
|
|
post.ChainCommitRand = commRand
|
|
|
|
|
|
|
|
// Submit PoST
|
|
|
|
sm, submitErr := s.submitPost(ctx, post)
|
|
|
|
if submitErr != nil {
|
|
|
|
log.Errorf("submit window post failed: %+v", submitErr)
|
|
|
|
} else {
|
|
|
|
s.recordProofsEvent(post.Partitions, sm.Cid())
|
2019-11-28 17:44:49 +00:00
|
|
|
}
|
2020-09-18 16:03:59 +00:00
|
|
|
}
|
2020-07-20 13:45:17 +00:00
|
|
|
|
2020-09-18 16:03:59 +00:00
|
|
|
return submitErr
|
2019-11-28 17:44:49 +00:00
|
|
|
}
|
|
|
|
|
2020-09-07 03:49:10 +00:00
|
|
|
func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField) (bitfield.BitField, error) {
|
2020-05-16 21:50:50 +00:00
|
|
|
spt, err := s.proofType.RegisteredSealProof()
|
|
|
|
if err != nil {
|
2020-08-05 07:37:28 +00:00
|
|
|
return bitfield.BitField{}, xerrors.Errorf("getting seal proof type: %w", err)
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mid, err := address.IDFromAddress(s.actor)
|
|
|
|
if err != nil {
|
2020-08-05 07:37:28 +00:00
|
|
|
return bitfield.BitField{}, err
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
|
|
|
|
2020-05-17 01:36:22 +00:00
|
|
|
sectors := make(map[abi.SectorID]struct{})
|
2020-05-16 21:50:50 +00:00
|
|
|
var tocheck []abi.SectorID
|
2020-05-29 19:36:04 +00:00
|
|
|
err = check.ForEach(func(snum uint64) error {
|
2020-05-16 21:50:50 +00:00
|
|
|
s := abi.SectorID{
|
|
|
|
Miner: abi.ActorID(mid),
|
|
|
|
Number: abi.SectorNumber(snum),
|
|
|
|
}
|
|
|
|
|
|
|
|
tocheck = append(tocheck, s)
|
|
|
|
sectors[s] = struct{}{}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
2020-08-05 07:37:28 +00:00
|
|
|
return bitfield.BitField{}, xerrors.Errorf("iterating over bitfield: %w", err)
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bad, err := s.faultTracker.CheckProvable(ctx, spt, tocheck)
|
|
|
|
if err != nil {
|
2020-08-05 07:37:28 +00:00
|
|
|
return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err)
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
|
|
|
for _, id := range bad {
|
|
|
|
delete(sectors, id)
|
|
|
|
}
|
|
|
|
|
2020-05-29 19:36:04 +00:00
|
|
|
log.Warnw("Checked sectors", "checked", len(tocheck), "good", len(sectors))
|
2020-05-16 21:50:50 +00:00
|
|
|
|
|
|
|
sbf := bitfield.New()
|
|
|
|
for s := range sectors {
|
2020-08-05 07:37:28 +00:00
|
|
|
sbf.Set(uint64(s.Number))
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
|
|
|
|
2020-08-05 07:37:28 +00:00
|
|
|
return sbf, nil
|
2020-05-29 19:36:04 +00:00
|
|
|
}
|
|
|
|
|
2020-09-18 22:40:49 +00:00
|
|
|
func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) {
|
2020-07-14 17:10:31 +00:00
|
|
|
ctx, span := trace.StartSpan(ctx, "storage.checkNextRecoveries")
|
|
|
|
defer span.End()
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-07-20 13:45:17 +00:00
|
|
|
faulty := uint64(0)
|
2020-09-14 21:40:52 +00:00
|
|
|
params := &miner.DeclareFaultsRecoveredParams{
|
|
|
|
Recoveries: []miner.RecoveryDeclaration{},
|
2020-05-29 19:36:04 +00:00
|
|
|
}
|
|
|
|
|
2020-07-14 17:10:31 +00:00
|
|
|
for partIdx, partition := range partitions {
|
2020-09-18 22:40:49 +00:00
|
|
|
unrecovered, err := bitfield.SubtractBitField(partition.FaultySectors, partition.RecoveringSectors)
|
2020-07-14 17:10:31 +00:00
|
|
|
if err != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return nil, nil, xerrors.Errorf("subtracting recovered set from fault set: %w", err)
|
2020-07-14 17:10:31 +00:00
|
|
|
}
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-07-14 17:10:31 +00:00
|
|
|
uc, err := unrecovered.Count()
|
|
|
|
if err != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return nil, nil, xerrors.Errorf("counting unrecovered sectors: %w", err)
|
2020-07-14 17:10:31 +00:00
|
|
|
}
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-07-14 17:10:31 +00:00
|
|
|
if uc == 0 {
|
|
|
|
continue
|
|
|
|
}
|
2020-06-11 18:33:15 +00:00
|
|
|
|
2020-07-14 17:10:31 +00:00
|
|
|
faulty += uc
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-07-14 17:10:31 +00:00
|
|
|
recovered, err := s.checkSectors(ctx, unrecovered)
|
|
|
|
if err != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return nil, nil, xerrors.Errorf("checking unrecovered sectors: %w", err)
|
2020-07-14 17:10:31 +00:00
|
|
|
}
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-07-14 17:10:31 +00:00
|
|
|
// if all sectors failed to recover, don't declare recoveries
|
|
|
|
recoveredCount, err := recovered.Count()
|
|
|
|
if err != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return nil, nil, xerrors.Errorf("counting recovered sectors: %w", err)
|
2020-07-14 17:10:31 +00:00
|
|
|
}
|
2020-07-08 12:35:53 +00:00
|
|
|
|
2020-07-14 17:10:31 +00:00
|
|
|
if recoveredCount == 0 {
|
|
|
|
continue
|
|
|
|
}
|
2020-07-08 12:35:53 +00:00
|
|
|
|
2020-09-14 21:40:52 +00:00
|
|
|
params.Recoveries = append(params.Recoveries, miner.RecoveryDeclaration{
|
2020-07-14 17:10:31 +00:00
|
|
|
Deadline: dlIdx,
|
|
|
|
Partition: uint64(partIdx),
|
|
|
|
Sectors: recovered,
|
|
|
|
})
|
2020-07-06 07:52:58 +00:00
|
|
|
}
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-09-02 18:15:25 +00:00
|
|
|
recoveries := params.Recoveries
|
|
|
|
if len(recoveries) == 0 {
|
2020-07-14 17:10:31 +00:00
|
|
|
if faulty != 0 {
|
|
|
|
log.Warnw("No recoveries to declare", "deadline", dlIdx, "faulty", faulty)
|
|
|
|
}
|
|
|
|
|
2020-09-02 18:15:25 +00:00
|
|
|
return recoveries, nil, nil
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
|
|
|
|
2020-07-14 17:10:37 +00:00
|
|
|
enc, aerr := actors.SerializeParams(params)
|
2020-05-16 21:50:50 +00:00
|
|
|
if aerr != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return recoveries, nil, xerrors.Errorf("could not serialize declare recoveries parameters: %w", aerr)
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
msg := &types.Message{
|
2020-08-01 14:23:13 +00:00
|
|
|
To: s.actor,
|
2020-08-19 21:25:58 +00:00
|
|
|
From: s.worker,
|
2020-10-08 20:32:54 +00:00
|
|
|
Method: miner.Methods.DeclareFaultsRecovered,
|
2020-08-01 14:23:13 +00:00
|
|
|
Params: enc,
|
|
|
|
Value: types.NewInt(0),
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
2020-08-19 21:25:58 +00:00
|
|
|
spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}
|
|
|
|
s.setSender(ctx, msg, spec)
|
2020-05-16 21:50:50 +00:00
|
|
|
|
2020-08-12 20:17:21 +00:00
|
|
|
sm, err := s.api.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)})
|
2020-05-16 21:50:50 +00:00
|
|
|
if err != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return recoveries, sm, xerrors.Errorf("pushing message to mpool: %w", err)
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Warnw("declare faults recovered Message CID", "cid", sm.Cid())
|
|
|
|
|
2020-06-04 22:52:45 +00:00
|
|
|
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence)
|
2020-05-16 21:50:50 +00:00
|
|
|
if err != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return recoveries, sm, xerrors.Errorf("declare faults recovered wait error: %w", err)
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
|
|
|
|
2020-05-28 15:45:34 +00:00
|
|
|
if rec.Receipt.ExitCode != 0 {
|
2020-09-02 18:15:25 +00:00
|
|
|
return recoveries, sm, xerrors.Errorf("declare faults recovered wait non-0 exit code: %d", rec.Receipt.ExitCode)
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
|
|
|
|
2020-09-02 18:15:25 +00:00
|
|
|
return recoveries, sm, nil
|
2020-05-16 21:50:50 +00:00
|
|
|
}
|
|
|
|
|
2020-09-18 22:40:49 +00:00
|
|
|
func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.FaultDeclaration, *types.SignedMessage, error) {
|
2020-07-14 17:10:31 +00:00
|
|
|
ctx, span := trace.StartSpan(ctx, "storage.checkNextFaults")
|
|
|
|
defer span.End()
|
2020-06-01 12:49:48 +00:00
|
|
|
|
2020-07-20 13:45:17 +00:00
|
|
|
bad := uint64(0)
|
2020-09-14 21:40:52 +00:00
|
|
|
params := &miner.DeclareFaultsParams{
|
|
|
|
Faults: []miner.FaultDeclaration{},
|
2020-05-29 19:36:04 +00:00
|
|
|
}
|
|
|
|
|
2020-07-14 17:10:31 +00:00
|
|
|
for partIdx, partition := range partitions {
|
2020-10-01 00:33:20 +00:00
|
|
|
nonFaulty, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, xerrors.Errorf("determining non faulty sectors: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
good, err := s.checkSectors(ctx, nonFaulty)
|
2020-07-14 17:10:31 +00:00
|
|
|
if err != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return nil, nil, xerrors.Errorf("checking sectors: %w", err)
|
2020-07-14 17:10:31 +00:00
|
|
|
}
|
|
|
|
|
2020-10-01 00:33:20 +00:00
|
|
|
newFaulty, err := bitfield.SubtractBitField(nonFaulty, good)
|
2020-07-14 17:10:31 +00:00
|
|
|
if err != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return nil, nil, xerrors.Errorf("calculating faulty sector set: %w", err)
|
2020-07-14 17:10:31 +00:00
|
|
|
}
|
|
|
|
|
2020-10-01 00:33:20 +00:00
|
|
|
c, err := newFaulty.Count()
|
2020-07-14 17:10:31 +00:00
|
|
|
if err != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return nil, nil, xerrors.Errorf("counting faulty sectors: %w", err)
|
2020-07-14 17:10:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if c == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
bad += c
|
|
|
|
|
2020-09-14 21:40:52 +00:00
|
|
|
params.Faults = append(params.Faults, miner.FaultDeclaration{
|
2020-07-14 17:10:31 +00:00
|
|
|
Deadline: dlIdx,
|
|
|
|
Partition: uint64(partIdx),
|
2020-10-01 00:33:20 +00:00
|
|
|
Sectors: newFaulty,
|
2020-07-14 17:10:31 +00:00
|
|
|
})
|
2020-05-29 19:36:04 +00:00
|
|
|
}
|
2019-12-17 22:23:43 +00:00
|
|
|
|
2020-09-02 18:15:25 +00:00
|
|
|
faults := params.Faults
|
|
|
|
if len(faults) == 0 {
|
|
|
|
return faults, nil, nil
|
2020-05-29 19:36:04 +00:00
|
|
|
}
|
2019-12-17 22:23:43 +00:00
|
|
|
|
2020-07-14 17:10:31 +00:00
|
|
|
log.Errorw("DETECTED FAULTY SECTORS, declaring faults", "count", bad)
|
2020-05-29 19:36:04 +00:00
|
|
|
|
|
|
|
enc, aerr := actors.SerializeParams(params)
|
|
|
|
if aerr != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return faults, nil, xerrors.Errorf("could not serialize declare faults parameters: %w", aerr)
|
2020-05-29 19:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
msg := &types.Message{
|
2020-08-01 14:23:13 +00:00
|
|
|
To: s.actor,
|
2020-08-19 21:25:58 +00:00
|
|
|
From: s.worker,
|
2020-10-08 20:32:54 +00:00
|
|
|
Method: miner.Methods.DeclareFaults,
|
2020-08-01 14:23:13 +00:00
|
|
|
Params: enc,
|
|
|
|
Value: types.NewInt(0), // TODO: Is there a fee?
|
2020-05-29 19:36:04 +00:00
|
|
|
}
|
2020-08-19 21:25:58 +00:00
|
|
|
spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}
|
|
|
|
s.setSender(ctx, msg, spec)
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-08-19 21:25:58 +00:00
|
|
|
sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
|
2020-05-29 19:36:04 +00:00
|
|
|
if err != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return faults, sm, xerrors.Errorf("pushing message to mpool: %w", err)
|
2020-05-29 19:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Warnw("declare faults Message CID", "cid", sm.Cid())
|
|
|
|
|
2020-06-03 21:42:06 +00:00
|
|
|
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence)
|
2020-05-29 19:36:04 +00:00
|
|
|
if err != nil {
|
2020-09-02 18:15:25 +00:00
|
|
|
return faults, sm, xerrors.Errorf("declare faults wait error: %w", err)
|
2020-05-29 19:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if rec.Receipt.ExitCode != 0 {
|
2020-09-02 18:15:25 +00:00
|
|
|
return faults, sm, xerrors.Errorf("declare faults wait non-0 exit code: %d", rec.Receipt.ExitCode)
|
2020-05-29 19:36:04 +00:00
|
|
|
}
|
2019-12-17 22:23:43 +00:00
|
|
|
|
2020-09-02 18:15:25 +00:00
|
|
|
return faults, sm, nil
|
2019-12-17 22:23:43 +00:00
|
|
|
}
|
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *types.TipSet) ([]miner.SubmitWindowedPoStParams, error) {
|
2019-11-28 17:44:49 +00:00
|
|
|
ctx, span := trace.StartSpan(ctx, "storage.runPost")
|
|
|
|
defer span.End()
|
|
|
|
|
2020-07-14 17:10:37 +00:00
|
|
|
go func() {
|
2020-08-03 16:56:59 +00:00
|
|
|
// TODO: extract from runPost, run on fault cutoff boundaries
|
|
|
|
|
2020-05-29 19:36:04 +00:00
|
|
|
// check faults / recoveries for the *next* deadline. It's already too
|
|
|
|
// late to declare them for this deadline
|
2020-09-12 03:07:52 +00:00
|
|
|
declDeadline := (di.Index + 2) % di.WPoStPeriodDeadlines
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-09-18 22:40:49 +00:00
|
|
|
partitions, err := s.api.StateMinerPartitions(context.TODO(), s.actor, declDeadline, ts.Key())
|
2020-09-12 03:07:52 +00:00
|
|
|
if err != nil {
|
2020-09-18 22:40:49 +00:00
|
|
|
log.Errorf("getting partitions: %v", err)
|
2020-07-14 17:10:31 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-09-02 18:15:25 +00:00
|
|
|
var (
|
|
|
|
sigmsg *types.SignedMessage
|
|
|
|
recoveries []miner.RecoveryDeclaration
|
|
|
|
faults []miner.FaultDeclaration
|
|
|
|
|
|
|
|
// optionalCid returns the CID of the message, or cid.Undef is the
|
|
|
|
// message is nil. We don't need the argument (could capture the
|
|
|
|
// pointer), but it's clearer and purer like that.
|
|
|
|
optionalCid = func(sigmsg *types.SignedMessage) cid.Cid {
|
|
|
|
if sigmsg == nil {
|
|
|
|
return cid.Undef
|
|
|
|
}
|
|
|
|
return sigmsg.Cid()
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
if recoveries, sigmsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions); err != nil {
|
2020-05-29 19:36:04 +00:00
|
|
|
// TODO: This is potentially quite bad, but not even trying to post when this fails is objectively worse
|
|
|
|
log.Errorf("checking sector recoveries: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-10-09 19:52:04 +00:00
|
|
|
s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStRecoveries], func() interface{} {
|
2020-09-02 18:45:25 +00:00
|
|
|
j := WdPoStRecoveriesProcessedEvt{
|
|
|
|
evtCommon: s.getEvtCommon(err),
|
|
|
|
Declarations: recoveries,
|
|
|
|
MessageCID: optionalCid(sigmsg),
|
|
|
|
}
|
|
|
|
j.Error = err
|
|
|
|
return j
|
2020-09-02 18:15:25 +00:00
|
|
|
})
|
|
|
|
|
2020-09-24 21:30:11 +00:00
|
|
|
if ts.Height() > build.UpgradeIgnitionHeight {
|
|
|
|
return // FORK: declaring faults after ignition upgrade makes no sense
|
|
|
|
}
|
|
|
|
|
2020-09-02 18:15:25 +00:00
|
|
|
if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions); err != nil {
|
2020-05-29 19:36:04 +00:00
|
|
|
// TODO: This is also potentially really bad, but we try to post anyways
|
|
|
|
log.Errorf("checking sector faults: %v", err)
|
|
|
|
}
|
2020-09-02 18:15:25 +00:00
|
|
|
|
2020-10-09 19:52:04 +00:00
|
|
|
s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStFaults], func() interface{} {
|
2020-09-02 18:45:25 +00:00
|
|
|
return WdPoStFaultsProcessedEvt{
|
|
|
|
evtCommon: s.getEvtCommon(err),
|
|
|
|
Declarations: faults,
|
|
|
|
MessageCID: optionalCid(sigmsg),
|
|
|
|
}
|
2020-09-02 18:15:25 +00:00
|
|
|
})
|
2020-07-14 17:10:31 +00:00
|
|
|
}()
|
2020-05-16 21:50:50 +00:00
|
|
|
|
2020-03-11 06:30:48 +00:00
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
if err := s.actor.MarshalCBOR(buf); err != nil {
|
|
|
|
return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err)
|
|
|
|
}
|
2020-08-12 17:29:35 +00:00
|
|
|
|
2020-08-11 23:58:35 +00:00
|
|
|
rand, err := s.api.ChainGetRandomnessFromBeacon(ctx, ts.Key(), crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, buf.Bytes())
|
|
|
|
if err != nil {
|
2020-09-18 16:03:59 +00:00
|
|
|
return nil, xerrors.Errorf("failed to get chain randomness from beacon for window post (ts=%d; deadline=%d): %w", ts.Height(), di, err)
|
2020-08-11 23:58:35 +00:00
|
|
|
}
|
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
// Get the partitions for the given deadline
|
2020-09-18 22:40:49 +00:00
|
|
|
partitions, err := s.api.StateMinerPartitions(ctx, s.actor, di.Index, ts.Key())
|
2020-04-16 20:11:07 +00:00
|
|
|
if err != nil {
|
2020-09-18 22:40:49 +00:00
|
|
|
return nil, xerrors.Errorf("getting partitions: %w", err)
|
2020-04-16 20:11:07 +00:00
|
|
|
}
|
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
// Split partitions into batches, so as not to exceed the number of sectors
|
|
|
|
// allowed in a single message
|
2020-09-18 22:40:49 +00:00
|
|
|
partitionBatches, err := s.batchPartitions(partitions)
|
2020-09-09 13:01:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-04-16 20:11:07 +00:00
|
|
|
}
|
|
|
|
|
2020-09-10 08:21:07 +00:00
|
|
|
// Generate proofs in batches
|
2020-09-09 13:01:37 +00:00
|
|
|
posts := make([]miner.SubmitWindowedPoStParams, 0, len(partitionBatches))
|
|
|
|
for batchIdx, batch := range partitionBatches {
|
|
|
|
batchPartitionStartIdx := 0
|
|
|
|
for _, batch := range partitionBatches[:batchIdx] {
|
|
|
|
batchPartitionStartIdx += len(batch)
|
|
|
|
}
|
2020-09-10 00:59:37 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
params := miner.SubmitWindowedPoStParams{
|
|
|
|
Deadline: di.Index,
|
|
|
|
Partitions: make([]miner.PoStPartition, 0, len(batch)),
|
|
|
|
Proofs: nil,
|
|
|
|
}
|
2020-09-10 00:59:37 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
skipCount := uint64(0)
|
|
|
|
postSkipped := bitfield.New()
|
2020-10-08 01:09:33 +00:00
|
|
|
var postOut []proof2.PoStProof
|
2020-09-10 08:21:07 +00:00
|
|
|
somethingToProve := true
|
2020-04-21 17:22:53 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
for retries := 0; retries < 5; retries++ {
|
2020-09-18 00:39:08 +00:00
|
|
|
var partitions []miner.PoStPartition
|
2020-10-08 01:09:33 +00:00
|
|
|
var sinfos []proof2.SectorInfo
|
2020-09-09 13:01:37 +00:00
|
|
|
for partIdx, partition := range batch {
|
|
|
|
// TODO: Can do this in parallel
|
2020-10-01 00:33:20 +00:00
|
|
|
toProve, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("removing faults from set of sectors to prove: %w", err)
|
|
|
|
}
|
|
|
|
toProve, err = bitfield.MergeBitFields(toProve, partition.RecoveringSectors)
|
2020-09-09 13:01:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err)
|
|
|
|
}
|
2020-04-16 20:11:07 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
good, err := s.checkSectors(ctx, toProve)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("checking sectors to skip: %w", err)
|
|
|
|
}
|
2020-04-16 20:11:07 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
good, err = bitfield.SubtractBitField(good, postSkipped)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("toProve - postSkipped: %w", err)
|
|
|
|
}
|
2020-05-26 09:58:20 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
skipped, err := bitfield.SubtractBitField(toProve, good)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("toProve - good: %w", err)
|
|
|
|
}
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
sc, err := skipped.Count()
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("getting skipped sector count: %w", err)
|
|
|
|
}
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
skipCount += sc
|
2020-09-17 02:34:13 +00:00
|
|
|
|
2020-09-18 22:40:49 +00:00
|
|
|
ssi, err := s.sectorsForProof(ctx, good, partition.AllSectors, ts)
|
2020-09-09 13:01:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("getting sorted sector info: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ssi) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
sinfos = append(sinfos, ssi...)
|
2020-09-18 01:00:31 +00:00
|
|
|
partitions = append(partitions, miner.PoStPartition{
|
2020-09-09 13:01:37 +00:00
|
|
|
Index: uint64(batchPartitionStartIdx + partIdx),
|
|
|
|
Skipped: skipped,
|
|
|
|
})
|
2020-09-10 00:59:37 +00:00
|
|
|
}
|
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
if len(sinfos) == 0 {
|
2020-09-10 08:21:07 +00:00
|
|
|
// nothing to prove for this batch
|
|
|
|
somethingToProve = false
|
2020-09-09 13:01:37 +00:00
|
|
|
break
|
2020-09-10 00:59:37 +00:00
|
|
|
}
|
|
|
|
|
2020-09-10 08:21:07 +00:00
|
|
|
// Generate proof
|
2020-09-16 01:19:27 +00:00
|
|
|
log.Infow("running window post",
|
2020-09-09 13:01:37 +00:00
|
|
|
"chain-random", rand,
|
|
|
|
"deadline", di,
|
|
|
|
"height", ts.Height(),
|
|
|
|
"skipped", skipCount)
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
tsStart := build.Clock.Now()
|
2020-07-22 09:02:46 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
mid, err := address.IDFromAddress(s.actor)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-09-10 00:59:37 +00:00
|
|
|
}
|
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
var ps []abi.SectorID
|
|
|
|
postOut, ps, err = s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, abi.PoStRandomness(rand))
|
|
|
|
elapsed := time.Since(tsStart)
|
2020-05-29 19:36:04 +00:00
|
|
|
|
2020-09-16 01:19:27 +00:00
|
|
|
log.Infow("computing window post", "batch", batchIdx, "elapsed", elapsed)
|
2020-07-22 09:02:46 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
if err == nil {
|
2020-09-10 08:21:07 +00:00
|
|
|
// Proof generation successful, stop retrying
|
2020-09-18 00:39:08 +00:00
|
|
|
params.Partitions = append(params.Partitions, partitions...)
|
2020-09-10 00:59:37 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
break
|
|
|
|
}
|
2020-09-10 00:59:37 +00:00
|
|
|
|
2020-09-10 08:21:07 +00:00
|
|
|
// Proof generation failed, so retry
|
2020-07-14 17:10:31 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
if len(ps) == 0 {
|
2020-09-16 01:19:27 +00:00
|
|
|
return nil, xerrors.Errorf("running window post failed: %w", err)
|
2020-09-09 13:01:37 +00:00
|
|
|
}
|
2019-11-28 17:44:49 +00:00
|
|
|
|
2020-09-16 01:19:27 +00:00
|
|
|
log.Warnw("generate window post skipped sectors", "sectors", ps, "error", err, "try", retries)
|
2020-07-17 14:47:16 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
skipCount += uint64(len(ps))
|
|
|
|
for _, sector := range ps {
|
|
|
|
postSkipped.Set(uint64(sector.Number))
|
|
|
|
}
|
2020-09-10 00:59:37 +00:00
|
|
|
}
|
2019-11-28 17:44:49 +00:00
|
|
|
|
2020-09-10 08:21:07 +00:00
|
|
|
// Nothing to prove for this batch, try the next batch
|
|
|
|
if !somethingToProve {
|
2020-09-09 13:01:37 +00:00
|
|
|
continue
|
2020-09-10 00:59:37 +00:00
|
|
|
}
|
2019-11-28 17:44:49 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
if len(postOut) == 0 {
|
|
|
|
return nil, xerrors.Errorf("received no proofs back from generate window post")
|
2020-09-10 00:59:37 +00:00
|
|
|
}
|
2019-11-28 17:44:49 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
params.Proofs = postOut
|
2020-03-11 06:30:48 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
posts = append(posts, params)
|
|
|
|
}
|
2020-07-20 17:21:10 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
return posts, nil
|
|
|
|
}
|
|
|
|
|
2020-09-18 22:40:49 +00:00
|
|
|
func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition) ([][]api.Partition, error) {
|
2020-09-09 13:01:37 +00:00
|
|
|
// We don't want to exceed the number of sectors allowed in a message.
|
|
|
|
// So given the number of sectors in a partition, work out the number of
|
|
|
|
// partitions that can be in a message without exceeding sectors per
|
|
|
|
// message:
|
|
|
|
// floor(number of sectors allowed in a message / sectors per partition)
|
|
|
|
// eg:
|
|
|
|
// max sectors per message 7: ooooooo
|
|
|
|
// sectors per partition 3: ooo
|
|
|
|
// partitions per message 2: oooOOO
|
|
|
|
// <1><2> (3rd doesn't fit)
|
2020-10-08 01:09:33 +00:00
|
|
|
partitionsPerMsg, err := policy.GetMaxPoStPartitions(s.proofType)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("getting sectors per partition: %w", err)
|
|
|
|
}
|
2019-11-28 17:44:49 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
// The number of messages will be:
|
|
|
|
// ceiling(number of partitions / partitions per message)
|
|
|
|
batchCount := len(partitions) / partitionsPerMsg
|
|
|
|
if len(partitions)%partitionsPerMsg != 0 {
|
|
|
|
batchCount++
|
|
|
|
}
|
|
|
|
|
|
|
|
// Split the partitions into batches
|
2020-09-18 22:40:49 +00:00
|
|
|
batches := make([][]api.Partition, 0, batchCount)
|
2020-09-09 13:01:37 +00:00
|
|
|
for i := 0; i < len(partitions); i += partitionsPerMsg {
|
|
|
|
end := i + partitionsPerMsg
|
|
|
|
if end > len(partitions) {
|
|
|
|
end = len(partitions)
|
|
|
|
}
|
|
|
|
batches = append(batches, partitions[i:end])
|
|
|
|
}
|
2020-09-18 16:03:59 +00:00
|
|
|
|
2020-09-09 13:01:37 +00:00
|
|
|
return batches, nil
|
2019-11-28 17:44:49 +00:00
|
|
|
}
|
|
|
|
|
2020-10-08 01:09:33 +00:00
|
|
|
func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof2.SectorInfo, error) {
|
2020-09-21 19:05:01 +00:00
|
|
|
sset, err := s.api.StateMinerSectors(ctx, s.actor, &goodSectors, ts.Key())
|
2019-11-28 17:44:49 +00:00
|
|
|
if err != nil {
|
2020-04-07 19:55:34 +00:00
|
|
|
return nil, err
|
2019-11-28 17:44:49 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 02:53:21 +00:00
|
|
|
if len(sset) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2020-10-08 01:09:33 +00:00
|
|
|
substitute := proof2.SectorInfo{
|
2020-09-21 19:05:01 +00:00
|
|
|
SectorNumber: sset[0].SectorNumber,
|
|
|
|
SealedCID: sset[0].SealedCID,
|
|
|
|
SealProof: sset[0].SealProof,
|
2020-08-26 02:53:21 +00:00
|
|
|
}
|
|
|
|
|
2020-10-08 01:09:33 +00:00
|
|
|
sectorByID := make(map[uint64]proof2.SectorInfo, len(sset))
|
2020-08-26 02:53:21 +00:00
|
|
|
for _, sector := range sset {
|
2020-10-08 01:09:33 +00:00
|
|
|
sectorByID[uint64(sector.SectorNumber)] = proof2.SectorInfo{
|
2020-09-21 19:05:01 +00:00
|
|
|
SectorNumber: sector.SectorNumber,
|
|
|
|
SealedCID: sector.SealedCID,
|
|
|
|
SealProof: sector.SealProof,
|
2019-11-28 17:44:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-08 01:09:33 +00:00
|
|
|
proofSectors := make([]proof2.SectorInfo, 0, len(sset))
|
2020-08-26 02:53:21 +00:00
|
|
|
if err := allSectors.ForEach(func(sectorNo uint64) error {
|
|
|
|
if info, found := sectorByID[sectorNo]; found {
|
|
|
|
proofSectors = append(proofSectors, info)
|
|
|
|
} else {
|
|
|
|
proofSectors = append(proofSectors, substitute)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, xerrors.Errorf("iterating partition sector bitmap: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return proofSectors, nil
|
2019-11-28 17:44:49 +00:00
|
|
|
}
|
|
|
|
|
2020-08-11 15:30:54 +00:00
|
|
|
func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.SubmitWindowedPoStParams) (*types.SignedMessage, error) {
|
2019-11-28 17:44:49 +00:00
|
|
|
ctx, span := trace.StartSpan(ctx, "storage.commitPost")
|
|
|
|
defer span.End()
|
|
|
|
|
2020-07-20 13:45:17 +00:00
|
|
|
var sm *types.SignedMessage
|
|
|
|
|
2019-11-28 17:44:49 +00:00
|
|
|
enc, aerr := actors.SerializeParams(proof)
|
|
|
|
if aerr != nil {
|
2020-09-16 01:19:27 +00:00
|
|
|
return nil, xerrors.Errorf("could not serialize submit window post parameters: %w", aerr)
|
2019-11-28 17:44:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
msg := &types.Message{
|
2020-08-01 14:23:13 +00:00
|
|
|
To: s.actor,
|
2020-08-19 21:25:58 +00:00
|
|
|
From: s.worker,
|
2020-10-08 20:32:54 +00:00
|
|
|
Method: miner.Methods.SubmitWindowedPoSt,
|
2020-08-01 14:23:13 +00:00
|
|
|
Params: enc,
|
2020-09-09 18:34:55 +00:00
|
|
|
Value: types.NewInt(0),
|
2019-11-28 17:44:49 +00:00
|
|
|
}
|
2020-08-19 21:25:58 +00:00
|
|
|
spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}
|
|
|
|
s.setSender(ctx, msg, spec)
|
2019-11-28 17:44:49 +00:00
|
|
|
|
|
|
|
// TODO: consider maybe caring about the output
|
2020-08-19 21:25:58 +00:00
|
|
|
sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
|
2020-08-26 15:38:23 +00:00
|
|
|
|
2019-11-28 17:44:49 +00:00
|
|
|
if err != nil {
|
2020-08-11 15:30:54 +00:00
|
|
|
return nil, xerrors.Errorf("pushing message to mpool: %w", err)
|
2019-11-28 17:44:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-21 17:22:53 +00:00
|
|
|
log.Infof("Submitted window post: %s", sm.Cid())
|
2019-11-28 17:44:49 +00:00
|
|
|
|
2020-01-29 22:47:28 +00:00
|
|
|
go func() {
|
2020-06-03 21:42:06 +00:00
|
|
|
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence)
|
2020-01-29 22:47:28 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if rec.Receipt.ExitCode == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-04-21 17:22:53 +00:00
|
|
|
log.Errorf("Submitting window post %s failed: exit %d", sm.Cid(), rec.Receipt.ExitCode)
|
2020-01-29 22:47:28 +00:00
|
|
|
}()
|
|
|
|
|
2020-08-11 15:30:54 +00:00
|
|
|
return sm, nil
|
2019-11-28 17:44:49 +00:00
|
|
|
}
|
2020-08-19 20:08:04 +00:00
|
|
|
|
2020-08-19 21:25:58 +00:00
|
|
|
func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) {
|
2020-08-19 20:08:04 +00:00
|
|
|
mi, err := s.api.StateMinerInfo(ctx, s.actor, types.EmptyTSK)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorw("error getting miner info", "error", err)
|
|
|
|
|
|
|
|
// better than just failing
|
|
|
|
msg.From = s.worker
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-19 21:25:58 +00:00
|
|
|
gm, err := s.api.GasEstimateMessageGas(ctx, msg, spec, types.EmptyTSK)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorw("estimating gas", "error", err)
|
|
|
|
msg.From = s.worker
|
|
|
|
return
|
|
|
|
}
|
|
|
|
*msg = *gm
|
|
|
|
|
2020-08-19 20:08:04 +00:00
|
|
|
minFunds := big.Add(msg.RequiredFunds(), msg.Value)
|
|
|
|
|
|
|
|
pa, err := AddressFor(ctx, s.api, mi, PoStAddr, minFunds)
|
|
|
|
if err != nil {
|
2020-09-16 01:19:27 +00:00
|
|
|
log.Errorw("error selecting address for window post", "error", err)
|
2020-08-19 20:08:04 +00:00
|
|
|
msg.From = s.worker
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
msg.From = pa
|
2020-08-19 23:26:13 +00:00
|
|
|
}
|