Merge branch 'testnet/3' of /tmp/lotus into feat/extract-lotus-goodies

This commit is contained in:
laser 2020-04-06 16:20:09 -07:00
commit 52afe582e1
16 changed files with 3026 additions and 0 deletions

1214
cbor_gen.go Normal file

File diff suppressed because it is too large Load Diff

157
checks.go Normal file
View File

@ -0,0 +1,157 @@
package sealing
import (
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/sector-storage/zerocomm"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/crypto"
)
// TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting
// We should implement some wait-for-api logic
type ErrApi struct{ error }
type ErrInvalidDeals struct{ error }
type ErrInvalidPiece struct{ error }
type ErrExpiredDeals struct{ error }
type ErrBadCommD struct{ error }
type ErrExpiredTicket struct{ error }
type ErrBadSeed struct{ error }
type ErrInvalidProof struct{ error }
// checkPieces validates that:
// - Each piece han a corresponding on chain deal
// - Piece commitments match with on chain deals
// - Piece sizes match
// - Deals aren't expired
func checkPieces(ctx context.Context, si SectorInfo, api SealingAPI) error {
tok, height, err := api.ChainHead(ctx)
if err != nil {
return &ErrApi{xerrors.Errorf("getting chain head: %w", err)}
}
for i, piece := range si.Pieces {
if piece.DealID == nil {
exp := zerocomm.ZeroPieceCommitment(piece.Size)
if piece.CommP != exp {
return &ErrInvalidPiece{xerrors.Errorf("deal %d piece %d had non-zero CommP %+v", piece.DealID, i, piece.CommP)}
}
continue
}
proposal, _, err := api.StateMarketStorageDeal(ctx, *piece.DealID, tok)
if err != nil {
return &ErrApi{xerrors.Errorf("getting deal %d for piece %d: %w", piece.DealID, i, err)}
}
if proposal.PieceCID != piece.CommP {
return &ErrInvalidDeals{xerrors.Errorf("piece %d (or %d) of sector %d refers deal %d with wrong CommP: %x != %x", i, len(si.Pieces), si.SectorNumber, piece.DealID, piece.CommP, proposal.PieceCID)}
}
if piece.Size != proposal.PieceSize.Unpadded() {
return &ErrInvalidDeals{xerrors.Errorf("piece %d (or %d) of sector %d refers deal %d with different size: %d != %d", i, len(si.Pieces), si.SectorNumber, piece.DealID, piece.Size, proposal.PieceSize)}
}
if height >= proposal.StartEpoch {
return &ErrExpiredDeals{xerrors.Errorf("piece %d (or %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(si.Pieces), si.SectorNumber, piece.DealID, proposal.StartEpoch, height)}
}
}
return nil
}
// checkPrecommit checks that data commitment generated in the sealing process
// matches pieces, and that the seal ticket isn't expired
func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) (err error) {
tok, height, err := api.ChainHead(ctx)
if err != nil {
return &ErrApi{xerrors.Errorf("getting chain head: %w", err)}
}
commD, err := api.StateComputeDataCommitment(ctx, maddr, si.SectorType, si.deals(), tok)
if err != nil {
return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)}
}
if !commD.Equals(*si.CommD) {
return &ErrBadCommD{xerrors.Errorf("on chain CommD differs from sector: %s != %s", commD, si.CommD)}
}
if int64(height)-int64(si.TicketEpoch+SealRandomnessLookback) > SealRandomnessLookbackLimit {
return &ErrExpiredTicket{xerrors.Errorf("ticket expired: seal height: %d, head: %d", si.TicketEpoch+SealRandomnessLookback, height)}
}
return nil
}
func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte) (err error) {
tok, _, err := m.api.ChainHead(ctx)
if err != nil {
return &ErrApi{xerrors.Errorf("getting chain head: %w", err)}
}
if si.SeedEpoch == 0 {
return &ErrBadSeed{xerrors.Errorf("seed epoch was not set")}
}
pci, err := m.api.StateSectorPreCommitInfo(ctx, m.maddr, si.SectorNumber, tok)
if err != nil {
return xerrors.Errorf("getting precommit info: %w", err)
}
if pci.PreCommitEpoch+miner.PreCommitChallengeDelay != si.SeedEpoch {
return &ErrBadSeed{xerrors.Errorf("seed epoch doesn't match on chain info: %d != %d", pci.PreCommitEpoch+miner.PreCommitChallengeDelay, si.SeedEpoch)}
}
seed, err := m.api.ChainGetRandomness(ctx, tok, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, si.SeedEpoch, nil)
if err != nil {
return &ErrApi{xerrors.Errorf("failed to get randomness for computing seal proof: %w", err)}
}
if string(seed) != string(si.SeedValue) {
return &ErrBadSeed{xerrors.Errorf("seed has changed")}
}
ss, err := m.api.StateMinerSectorSize(ctx, m.maddr, tok)
if err != nil {
return &ErrApi{err}
}
_, spt, err := ffiwrapper.ProofTypeFromSectorSize(ss)
if err != nil {
return err
}
if *si.CommR != pci.Info.SealedCID {
log.Warn("on-chain sealed CID doesn't match!")
}
ok, err := m.verif.VerifySeal(abi.SealVerifyInfo{
SectorID: m.minerSector(si.SectorNumber),
OnChain: abi.OnChainSealVerifyInfo{
SealedCID: pci.Info.SealedCID,
InteractiveEpoch: si.SeedEpoch,
RegisteredProof: spt,
Proof: proof,
SectorNumber: si.SectorNumber,
SealRandEpoch: si.TicketEpoch,
},
Randomness: si.TicketValue,
InteractiveRandomness: si.SeedValue,
UnsealedCID: *si.CommD,
})
if err != nil {
return xerrors.Errorf("verify seal: %w", err)
}
if !ok {
return &ErrInvalidProof{xerrors.New("invalid proof (compute error?)")}
}
return nil
}

13
constants.go Normal file
View File

@ -0,0 +1,13 @@
package sealing
// Epochs
const Finality = 500
// Epochs
const SealRandomnessLookback = Finality
// Epochs
const SealRandomnessLookbackLimit = SealRandomnessLookback + 2000
// Epochs
const InteractivePoRepConfidence = 6

15
events.go Normal file
View File

@ -0,0 +1,15 @@
package sealing
import (
"context"
"github.com/filecoin-project/specs-actors/actors/abi"
)
// `curH`-`ts.Height` = `confidence`
type HeightHandler func(ctx context.Context, tok TipSetToken, curH abi.ChainEpoch) error
type RevertHandler func(ctx context.Context, tok TipSetToken) error
type Events interface {
ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error
}

318
fsm.go Normal file
View File

@ -0,0 +1,318 @@
package sealing
import (
"bytes"
"context"
"encoding/json"
"fmt"
"reflect"
"time"
"golang.org/x/xerrors"
statemachine "github.com/filecoin-project/go-statemachine"
"github.com/filecoin-project/specs-actors/actors/abi"
)
func (m *Sealing) Plan(events []statemachine.Event, user interface{}) (interface{}, uint64, error) {
next, err := m.plan(events, user.(*SectorInfo))
if err != nil || next == nil {
return nil, uint64(len(events)), err
}
return func(ctx statemachine.Context, si SectorInfo) error {
err := next(ctx, si)
if err != nil {
log.Errorf("unhandled sector error (%d): %+v", si.SectorNumber, err)
return nil
}
return nil
}, uint64(len(events)), nil // TODO: This processed event count is not very correct
}
var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *SectorInfo) error{
UndefinedSectorState: planOne(on(SectorStart{}, Packing)),
Packing: planOne(on(SectorPacked{}, PreCommit1)),
PreCommit1: planOne(
on(SectorPreCommit1{}, PreCommit2),
on(SectorSealPreCommitFailed{}, SealFailed),
on(SectorPackingFailed{}, PackingFailed),
),
PreCommit2: planOne(
on(SectorPreCommit2{}, PreCommitting),
on(SectorSealPreCommitFailed{}, SealFailed),
on(SectorPackingFailed{}, PackingFailed),
),
PreCommitting: planOne(
on(SectorSealPreCommitFailed{}, SealFailed),
on(SectorPreCommitted{}, WaitSeed),
on(SectorChainPreCommitFailed{}, PreCommitFailed),
),
WaitSeed: planOne(
on(SectorSeedReady{}, Committing),
on(SectorChainPreCommitFailed{}, PreCommitFailed),
),
Committing: planCommitting,
CommitWait: planOne(
on(SectorProving{}, FinalizeSector),
on(SectorCommitFailed{}, CommitFailed),
),
FinalizeSector: planOne(
on(SectorFinalized{}, Proving),
),
Proving: planOne(
on(SectorFaultReported{}, FaultReported),
on(SectorFaulty{}, Faulty),
),
SealFailed: planOne(
on(SectorRetrySeal{}, PreCommit1),
),
PreCommitFailed: planOne(
on(SectorRetryPreCommit{}, PreCommitting),
on(SectorRetryWaitSeed{}, WaitSeed),
on(SectorSealPreCommitFailed{}, SealFailed),
),
ComputeProofFailed: planOne(
on(SectorRetryComputeProof{}, Committing),
),
CommitFailed: planOne(
on(SectorSealPreCommitFailed{}, SealFailed),
on(SectorRetryWaitSeed{}, WaitSeed),
on(SectorRetryComputeProof{}, Committing),
on(SectorRetryInvalidProof{}, Committing),
),
Faulty: planOne(
on(SectorFaultReported{}, FaultReported),
),
FaultedFinal: final,
}
func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(statemachine.Context, SectorInfo) error, error) {
/////
// First process all events
for _, event := range events {
e, err := json.Marshal(event)
if err != nil {
log.Errorf("marshaling event for logging: %+v", err)
continue
}
l := Log{
Timestamp: uint64(time.Now().Unix()),
Message: string(e),
Kind: fmt.Sprintf("event;%T", event.User),
}
if err, iserr := event.User.(xerrors.Formatter); iserr {
l.Trace = fmt.Sprintf("%+v", err)
}
state.Log = append(state.Log, l)
}
p := fsmPlanners[state.State]
if p == nil {
return nil, xerrors.Errorf("planner for state %s not found", state.State)
}
if err := p(events, state); err != nil {
return nil, xerrors.Errorf("running planner for state %s failed: %w", state.State, err)
}
/////
// Now decide what to do next
/*
* Empty
| |
| v
*<- Packing <- incoming
| |
| v
*<- PreCommit1 <--> SealFailed
| | ^^^
| v |||
*<- PreCommit2 -------/||
| | ||
| v /-------/|
* PreCommitting <-----+---> PreCommitFailed
| | | ^
| v | |
*<- WaitSeed -----------+-----/
| ||| ^ |
| ||| \--------*-----/
| ||| |
| vvv v----+----> ComputeProofFailed
*<- Committing |
| | ^--> CommitFailed
| v ^
*<- CommitWait ---/
| |
| v
*<- Proving
|
v
FailedUnrecoverable
UndefinedSectorState <- ¯\_()_/¯
| ^
*---------------------/
*/
switch state.State {
// Happy path
case Packing:
return m.handlePacking, nil
case PreCommit1:
return m.handlePreCommit1, nil
case PreCommit2:
return m.handlePreCommit2, nil
case PreCommitting:
return m.handlePreCommitting, nil
case WaitSeed:
return m.handleWaitSeed, nil
case Committing:
return m.handleCommitting, nil
case CommitWait:
return m.handleCommitWait, nil
case FinalizeSector:
return m.handleFinalizeSector, nil
case Proving:
// TODO: track sector health / expiration
log.Infof("Proving sector %d", state.SectorNumber)
// Handled failure modes
case SealFailed:
return m.handleSealFailed, nil
case PreCommitFailed:
return m.handlePreCommitFailed, nil
case ComputeProofFailed:
return m.handleComputeProofFailed, nil
case CommitFailed:
return m.handleCommitFailed, nil
// Faults
case Faulty:
return m.handleFaulty, nil
case FaultReported:
return m.handleFaultReported, nil
// Fatal errors
case UndefinedSectorState:
log.Error("sector update with undefined state!")
case FailedUnrecoverable:
log.Errorf("sector %d failed unrecoverably", state.SectorNumber)
default:
log.Errorf("unexpected sector update state: %d", state.State)
}
return nil, nil
}
func planCommitting(events []statemachine.Event, state *SectorInfo) error {
for _, event := range events {
switch e := event.User.(type) {
case globalMutator:
if e.applyGlobal(state) {
return nil
}
case SectorCommitted: // the normal case
e.apply(state)
state.State = CommitWait
case SectorSeedReady: // seed changed :/
if e.SeedEpoch == state.SeedEpoch && bytes.Equal(e.SeedValue, state.SeedValue) {
log.Warnf("planCommitting: got SectorSeedReady, but the seed didn't change")
continue // or it didn't!
}
log.Warnf("planCommitting: commit Seed changed")
e.apply(state)
state.State = Committing
return nil
case SectorComputeProofFailed:
state.State = ComputeProofFailed
case SectorSealPreCommitFailed:
state.State = CommitFailed
case SectorCommitFailed:
state.State = CommitFailed
default:
return xerrors.Errorf("planCommitting got event of unknown type %T, events: %+v", event.User, events)
}
}
return nil
}
func (m *Sealing) restartSectors(ctx context.Context) error {
trackedSectors, err := m.ListSectors()
if err != nil {
log.Errorf("loading sector list: %+v", err)
}
for _, sector := range trackedSectors {
if err := m.sectors.Send(uint64(sector.SectorNumber), SectorRestart{}); err != nil {
log.Errorf("restarting sector %d: %+v", sector.SectorNumber, err)
}
}
// TODO: Grab on-chain sector set and diff with trackedSectors
return nil
}
func (m *Sealing) ForceSectorState(ctx context.Context, id abi.SectorNumber, state SectorState) error {
return m.sectors.Send(id, SectorForceState{state})
}
func final(events []statemachine.Event, state *SectorInfo) error {
return xerrors.Errorf("didn't expect any events in state %s, got %+v", state.State, events)
}
func on(mut mutator, next SectorState) func() (mutator, SectorState) {
return func() (mutator, SectorState) {
return mut, next
}
}
func planOne(ts ...func() (mut mutator, next SectorState)) func(events []statemachine.Event, state *SectorInfo) error {
return func(events []statemachine.Event, state *SectorInfo) error {
if len(events) != 1 {
for _, event := range events {
if gm, ok := event.User.(globalMutator); ok {
gm.applyGlobal(state)
return nil
}
}
return xerrors.Errorf("planner for state %s only has a plan for a single event only, got %+v", state.State, events)
}
if gm, ok := events[0].User.(globalMutator); ok {
gm.applyGlobal(state)
return nil
}
for _, t := range ts {
mut, next := t()
if reflect.TypeOf(events[0].User) != reflect.TypeOf(mut) {
continue
}
if err, iserr := events[0].User.(error); iserr {
log.Warnf("sector %d got error event %T: %+v", state.SectorNumber, events[0].User, err)
}
events[0].User.(mutator).apply(state)
state.State = next
return nil
}
return xerrors.Errorf("planner for state %s received unexpected event %T (%+v)", state.State, events[0].User, events[0])
}
}

195
fsm_events.go Normal file
View File

@ -0,0 +1,195 @@
package sealing
import (
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
)
type mutator interface {
apply(state *SectorInfo)
}
// globalMutator is an event which can apply in every state
type globalMutator interface {
// applyGlobal applies the event to the state. If if returns true,
// event processing should be interrupted
applyGlobal(state *SectorInfo) bool
}
// Global events
type SectorRestart struct{}
func (evt SectorRestart) applyGlobal(*SectorInfo) bool { return false }
type SectorFatalError struct{ error }
func (evt SectorFatalError) FormatError(xerrors.Printer) (next error) { return evt.error }
func (evt SectorFatalError) applyGlobal(state *SectorInfo) bool {
log.Errorf("Fatal error on sector %d: %+v", state.SectorNumber, evt.error)
// TODO: Do we want to mark the state as unrecoverable?
// I feel like this should be a softer error, where the user would
// be able to send a retry event of some kind
return true
}
type SectorForceState struct {
State SectorState
}
func (evt SectorForceState) applyGlobal(state *SectorInfo) bool {
state.State = evt.State
return true
}
// Normal path
type SectorStart struct {
ID abi.SectorNumber
SectorType abi.RegisteredProof
Pieces []Piece
}
func (evt SectorStart) apply(state *SectorInfo) {
state.SectorNumber = evt.ID
state.Pieces = evt.Pieces
state.SectorType = evt.SectorType
}
type SectorPacked struct{ Pieces []Piece }
func (evt SectorPacked) apply(state *SectorInfo) {
state.Pieces = append(state.Pieces, evt.Pieces...)
}
type SectorPackingFailed struct{ error }
func (evt SectorPackingFailed) apply(*SectorInfo) {}
type SectorPreCommit1 struct {
PreCommit1Out storage.PreCommit1Out
TicketValue abi.SealRandomness
TicketEpoch abi.ChainEpoch
}
func (evt SectorPreCommit1) apply(state *SectorInfo) {
state.PreCommit1Out = evt.PreCommit1Out
state.TicketEpoch = evt.TicketEpoch
state.TicketValue = evt.TicketValue
}
type SectorPreCommit2 struct {
Sealed cid.Cid
Unsealed cid.Cid
}
func (evt SectorPreCommit2) apply(state *SectorInfo) {
commd := evt.Unsealed
state.CommD = &commd
commr := evt.Sealed
state.CommR = &commr
}
type SectorSealPreCommitFailed struct{ error }
func (evt SectorSealPreCommitFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
func (evt SectorSealPreCommitFailed) apply(si *SectorInfo) {
si.InvalidProofs = 0 // reset counter
}
type SectorChainPreCommitFailed struct{ error }
func (evt SectorChainPreCommitFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
func (evt SectorChainPreCommitFailed) apply(*SectorInfo) {}
type SectorPreCommitted struct {
Message cid.Cid
}
func (evt SectorPreCommitted) apply(state *SectorInfo) {
state.PreCommitMessage = &evt.Message
}
type SectorSeedReady struct {
SeedValue abi.InteractiveSealRandomness
SeedEpoch abi.ChainEpoch
}
func (evt SectorSeedReady) apply(state *SectorInfo) {
state.SeedEpoch = evt.SeedEpoch
state.SeedValue = evt.SeedValue
}
type SectorComputeProofFailed struct{ error }
func (evt SectorComputeProofFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
func (evt SectorComputeProofFailed) apply(*SectorInfo) {}
type SectorCommitFailed struct{ error }
func (evt SectorCommitFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
func (evt SectorCommitFailed) apply(*SectorInfo) {}
type SectorCommitted struct {
Message cid.Cid
Proof []byte
}
func (evt SectorCommitted) apply(state *SectorInfo) {
state.Proof = evt.Proof
state.CommitMessage = &evt.Message
}
type SectorProving struct{}
func (evt SectorProving) apply(*SectorInfo) {}
type SectorFinalized struct{}
func (evt SectorFinalized) apply(*SectorInfo) {}
type SectorFinalizeFailed struct{ error }
func (evt SectorFinalizeFailed) FormatError(xerrors.Printer) (next error) { return evt.error }
func (evt SectorFinalizeFailed) apply(*SectorInfo) {}
// Failed state recovery
type SectorRetrySeal struct{}
func (evt SectorRetrySeal) apply(state *SectorInfo) {}
type SectorRetryPreCommit struct{}
func (evt SectorRetryPreCommit) apply(state *SectorInfo) {}
type SectorRetryWaitSeed struct{}
func (evt SectorRetryWaitSeed) apply(state *SectorInfo) {}
type SectorRetryComputeProof struct{}
func (evt SectorRetryComputeProof) apply(state *SectorInfo) {}
type SectorRetryInvalidProof struct{}
func (evt SectorRetryInvalidProof) apply(state *SectorInfo) {
state.InvalidProofs++
}
// Faults
type SectorFaulty struct{}
func (evt SectorFaulty) apply(state *SectorInfo) {}
type SectorFaultReported struct{ reportMsg cid.Cid }
func (evt SectorFaultReported) apply(state *SectorInfo) {
state.FaultReportMsg = &evt.reportMsg
}
type SectorFaultedFinal struct{}

110
fsm_test.go Normal file
View File

@ -0,0 +1,110 @@
package sealing
import (
"testing"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-statemachine"
)
func init() {
_ = logging.SetLogLevel("*", "INFO")
}
func (t *test) planSingle(evt interface{}) {
_, err := t.s.plan([]statemachine.Event{{evt}}, t.state)
require.NoError(t.t, err)
}
type test struct {
s *Sealing
t *testing.T
state *SectorInfo
next func(statemachine.Context, SectorInfo) error
}
func TestHappyPath(t *testing.T) {
m := test{
s: &Sealing{},
t: t,
state: &SectorInfo{State: Packing},
}
m.planSingle(SectorPacked{})
require.Equal(m.t, m.state.State, PreCommit1)
m.planSingle(SectorPreCommit1{})
require.Equal(m.t, m.state.State, PreCommit2)
m.planSingle(SectorPreCommit2{})
require.Equal(m.t, m.state.State, PreCommitting)
m.planSingle(SectorPreCommitted{})
require.Equal(m.t, m.state.State, WaitSeed)
m.planSingle(SectorSeedReady{})
require.Equal(m.t, m.state.State, Committing)
m.planSingle(SectorCommitted{})
require.Equal(m.t, m.state.State, CommitWait)
m.planSingle(SectorProving{})
require.Equal(m.t, m.state.State, FinalizeSector)
m.planSingle(SectorFinalized{})
require.Equal(m.t, m.state.State, Proving)
}
func TestSeedRevert(t *testing.T) {
m := test{
s: &Sealing{},
t: t,
state: &SectorInfo{State: Packing},
}
m.planSingle(SectorPacked{})
require.Equal(m.t, m.state.State, PreCommit1)
m.planSingle(SectorPreCommit1{})
require.Equal(m.t, m.state.State, PreCommit2)
m.planSingle(SectorPreCommit2{})
require.Equal(m.t, m.state.State, PreCommitting)
m.planSingle(SectorPreCommitted{})
require.Equal(m.t, m.state.State, WaitSeed)
m.planSingle(SectorSeedReady{})
require.Equal(m.t, m.state.State, Committing)
_, err := m.s.plan([]statemachine.Event{{SectorSeedReady{SeedValue: nil, SeedEpoch: 5}}, {SectorCommitted{}}}, m.state)
require.NoError(t, err)
require.Equal(m.t, m.state.State, Committing)
// not changing the seed this time
_, err = m.s.plan([]statemachine.Event{{SectorSeedReady{SeedValue: nil, SeedEpoch: 5}}, {SectorCommitted{}}}, m.state)
require.Equal(m.t, m.state.State, CommitWait)
m.planSingle(SectorProving{})
require.Equal(m.t, m.state.State, FinalizeSector)
m.planSingle(SectorFinalized{})
require.Equal(m.t, m.state.State, Proving)
}
func TestPlanCommittingHandlesSectorCommitFailed(t *testing.T) {
m := test{
s: &Sealing{},
t: t,
state: &SectorInfo{State: Committing},
}
events := []statemachine.Event{{SectorCommitFailed{}}}
require.NoError(t, planCommitting(events, m.state))
require.Equal(t, CommitFailed, m.state.State)
}

81
garbage.go Normal file
View File

@ -0,0 +1,81 @@
package sealing
import (
"context"
"io"
"golang.org/x/xerrors"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/lotus/lib/nullreader"
)
func (m *Sealing) pledgeReader(size abi.UnpaddedPieceSize) io.Reader {
return io.LimitReader(&nullreader.Reader{}, int64(size))
}
func (m *Sealing) pledgeSector(ctx context.Context, sectorID abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]Piece, error) {
if len(sizes) == 0 {
return nil, nil
}
log.Infof("Pledge %d, contains %+v", sectorID, existingPieceSizes)
out := make([]Piece, len(sizes))
for i, size := range sizes {
ppi, err := m.sealer.AddPiece(ctx, sectorID, existingPieceSizes, size, m.pledgeReader(size))
if err != nil {
return nil, xerrors.Errorf("add piece: %w", err)
}
existingPieceSizes = append(existingPieceSizes, size)
out[i] = Piece{
Size: ppi.Size.Unpadded(),
CommP: ppi.PieceCID,
}
}
return out, nil
}
func (m *Sealing) PledgeSector() error {
go func() {
ctx := context.TODO() // we can't use the context from command which invokes
// this, as we run everything here async, and it's cancelled when the
// command exits
size := abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded()
_, rt, err := ffiwrapper.ProofTypeFromSectorSize(m.sealer.SectorSize())
if err != nil {
log.Error(err)
return
}
sid, err := m.sc.Next()
if err != nil {
log.Errorf("%+v", err)
return
}
err = m.sealer.NewSector(ctx, m.minerSector(sid))
if err != nil {
log.Errorf("%+v", err)
return
}
pieces, err := m.pledgeSector(ctx, m.minerSector(sid), []abi.UnpaddedPieceSize{}, size)
if err != nil {
log.Errorf("%+v", err)
return
}
if err := m.newSector(sid, rt, pieces); err != nil {
log.Errorf("%+v", err)
return
}
}()
return nil
}

151
sealing.go Normal file
View File

@ -0,0 +1,151 @@
package sealing
import (
"context"
"io"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
padreader "github.com/filecoin-project/go-padreader"
statemachine "github.com/filecoin-project/go-statemachine"
sectorstorage "github.com/filecoin-project/sector-storage"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/crypto"
)
const SectorStorePrefix = "/sectors"
var log = logging.Logger("sectors")
type SealingAPI interface {
StateWaitMsg(context.Context, cid.Cid) (MsgLookup, error)
StateComputeDataCommitment(ctx context.Context, maddr address.Address, sectorType abi.RegisteredProof, deals []abi.DealID, tok TipSetToken) (cid.Cid, error)
StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error)
StateMinerSectorSize(context.Context, address.Address, TipSetToken) (abi.SectorSize, error)
StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, market.DealState, error)
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, gasPrice big.Int, gasLimit int64, params []byte) (cid.Cid, error)
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
ChainGetRandomness(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
}
type Sealing struct {
api SealingAPI
events Events
maddr address.Address
worker address.Address
sealer sectorstorage.SectorManager
sectors *statemachine.StateGroup
sc SectorIDCounter
verif ffiwrapper.Verifier
tktFn TicketFn
}
func New(api SealingAPI, events Events, maddr address.Address, worker address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, tktFn TicketFn) *Sealing {
s := &Sealing{
api: api,
events: events,
maddr: maddr,
worker: worker,
sealer: sealer,
sc: sc,
verif: verif,
tktFn: tktFn,
}
s.sectors = statemachine.New(namespace.Wrap(ds, datastore.NewKey(SectorStorePrefix)), s, SectorInfo{})
return s
}
func (m *Sealing) Run(ctx context.Context) error {
if err := m.restartSectors(ctx); err != nil {
log.Errorf("%+v", err)
return xerrors.Errorf("failed load sector states: %w", err)
}
return nil
}
func (m *Sealing) Stop(ctx context.Context) error {
return m.sectors.Stop(ctx)
}
func (m *Sealing) AllocatePiece(size abi.UnpaddedPieceSize) (sectorID abi.SectorNumber, offset uint64, err error) {
if (padreader.PaddedSize(uint64(size))) != size {
return 0, 0, xerrors.Errorf("cannot allocate unpadded piece")
}
sid, err := m.sc.Next()
if err != nil {
return 0, 0, xerrors.Errorf("getting sector number: %w", err)
}
err = m.sealer.NewSector(context.TODO(), m.minerSector(sid)) // TODO: Put more than one thing in a sector
if err != nil {
return 0, 0, xerrors.Errorf("initializing sector: %w", err)
}
// offset hard-coded to 0 since we only put one thing in a sector for now
return sid, 0, nil
}
func (m *Sealing) SealPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, sectorID abi.SectorNumber, dealID abi.DealID) error {
log.Infof("Seal piece for deal %d", dealID)
ppi, err := m.sealer.AddPiece(ctx, m.minerSector(sectorID), []abi.UnpaddedPieceSize{}, size, r)
if err != nil {
return xerrors.Errorf("adding piece to sector: %w", err)
}
_, rt, err := ffiwrapper.ProofTypeFromSectorSize(m.sealer.SectorSize())
if err != nil {
return xerrors.Errorf("bad sector size: %w", err)
}
return m.newSector(sectorID, rt, []Piece{
{
DealID: &dealID,
Size: ppi.Size.Unpadded(),
CommP: ppi.PieceCID,
},
})
}
func (m *Sealing) newSector(sid abi.SectorNumber, rt abi.RegisteredProof, pieces []Piece) error {
log.Infof("Start sealing %d", sid)
return m.sectors.Send(uint64(sid), SectorStart{
ID: sid,
Pieces: pieces,
SectorType: rt,
})
}
func (m *Sealing) minerSector(num abi.SectorNumber) abi.SectorID {
mid, err := address.IDFromAddress(m.maddr)
if err != nil {
panic(err)
}
return abi.SectorID{
Number: num,
Miner: abi.ActorID(mid),
}
}
func (m *Sealing) Address() address.Address {
return m.maddr
}

29
sector_state.go Normal file
View File

@ -0,0 +1,29 @@
package sealing
type SectorState string
const (
UndefinedSectorState SectorState = ""
// happy path
Empty SectorState = "Empty"
Packing SectorState = "Packing" // sector not in sealStore, and not on chain
PreCommit1 SectorState = "PreCommit1" // do PreCommit1
PreCommit2 SectorState = "PreCommit2" // do PreCommit1
PreCommitting SectorState = "PreCommitting" // on chain pre-commit
WaitSeed SectorState = "WaitSeed" // waiting for seed
Committing SectorState = "Committing"
CommitWait SectorState = "CommitWait" // waiting for message to land on chain
FinalizeSector SectorState = "FinalizeSector"
Proving SectorState = "Proving"
// error modes
FailedUnrecoverable SectorState = "FailedUnrecoverable"
SealFailed SectorState = "SealFailed"
PreCommitFailed SectorState = "PreCommitFailed"
ComputeProofFailed SectorState = "ComputeProofFailed"
CommitFailed SectorState = "CommitFailed"
PackingFailed SectorState = "PackingFailed"
Faulty SectorState = "Faulty" // sector is corrupted or gone for some reason
FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain
FaultedFinal SectorState = "FaultedFinal" // fault declared on chain
)

295
states.go Normal file
View File

@ -0,0 +1,295 @@
package sealing
import (
"bytes"
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-statemachine"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-storage/storage"
)
func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) error {
log.Infow("performing filling up rest of the sector...", "sector", sector.SectorNumber)
var allocated abi.UnpaddedPieceSize
for _, piece := range sector.Pieces {
allocated += piece.Size
}
ubytes := abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded()
if allocated > ubytes {
return xerrors.Errorf("too much data in sector: %d > %d", allocated, ubytes)
}
fillerSizes, err := fillersFromRem(ubytes - allocated)
if err != nil {
return err
}
if len(fillerSizes) > 0 {
log.Warnf("Creating %d filler pieces for sector %d", len(fillerSizes), sector.SectorNumber)
}
pieces, err := m.pledgeSector(ctx.Context(), m.minerSector(sector.SectorNumber), sector.existingPieces(), fillerSizes...)
if err != nil {
return xerrors.Errorf("filling up the sector (%v): %w", fillerSizes, err)
}
return ctx.Send(SectorPacked{Pieces: pieces})
}
func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error {
if err := checkPieces(ctx.Context(), sector, m.api); err != nil { // Sanity check state
switch err.(type) {
case *ErrApi:
log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err)
return nil
case *ErrInvalidDeals:
return ctx.Send(SectorPackingFailed{xerrors.Errorf("invalid deals in sector: %w", err)})
case *ErrExpiredDeals: // Probably not much we can do here, maybe re-pack the sector?
return ctx.Send(SectorPackingFailed{xerrors.Errorf("expired deals in sector: %w", err)})
default:
return xerrors.Errorf("checkPieces sanity check error: %w", err)
}
}
log.Infow("performing sector replication...", "sector", sector.SectorNumber)
ticketValue, ticketEpoch, err := m.tktFn(ctx.Context())
if err != nil {
return ctx.Send(SectorSealPreCommitFailed{xerrors.Errorf("getting ticket failed: %w", err)})
}
pc1o, err := m.sealer.SealPreCommit1(ctx.Context(), m.minerSector(sector.SectorNumber), ticketValue, sector.pieceInfos())
if err != nil {
return ctx.Send(SectorSealPreCommitFailed{xerrors.Errorf("seal pre commit(1) failed: %w", err)})
}
return ctx.Send(SectorPreCommit1{
PreCommit1Out: pc1o,
TicketValue: ticketValue,
TicketEpoch: ticketEpoch,
})
}
func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) error {
cids, err := m.sealer.SealPreCommit2(ctx.Context(), m.minerSector(sector.SectorNumber), sector.PreCommit1Out)
if err != nil {
return ctx.Send(SectorSealPreCommitFailed{xerrors.Errorf("seal pre commit(2) failed: %w", err)})
}
return ctx.Send(SectorPreCommit2{
Unsealed: cids.Unsealed,
Sealed: cids.Sealed,
})
}
func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error {
if err := checkPrecommit(ctx.Context(), m.Address(), sector, m.api); err != nil {
switch err.(type) {
case *ErrApi:
log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err)
return nil
case *ErrBadCommD: // TODO: Should this just back to packing? (not really needed since handlePreCommit1 will do that too)
return ctx.Send(SectorSealPreCommitFailed{xerrors.Errorf("bad CommD error: %w", err)})
case *ErrExpiredTicket:
return ctx.Send(SectorSealPreCommitFailed{xerrors.Errorf("ticket expired: %w", err)})
default:
return xerrors.Errorf("checkPrecommit sanity check error: %w", err)
}
}
params := &miner.SectorPreCommitInfo{
Expiration: 10000000, // TODO: implement
SectorNumber: sector.SectorNumber,
RegisteredProof: sector.SectorType,
SealedCID: *sector.CommR,
SealRandEpoch: sector.TicketEpoch,
DealIDs: sector.deals(),
}
enc := new(bytes.Buffer)
if err := params.MarshalCBOR(enc); err != nil {
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("could not serialize pre-commit sector parameters: %w", err)})
}
log.Info("submitting precommit for sector: ", sector.SectorNumber)
mcid, err := m.api.SendMsg(ctx.Context(), m.worker, m.maddr, builtin.MethodsMiner.PreCommitSector, big.NewInt(0), big.NewInt(1), 1000000, enc.Bytes())
if err != nil {
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
}
return ctx.Send(SectorPreCommitted{Message: mcid})
}
func (m *Sealing) handleWaitSeed(ctx statemachine.Context, sector SectorInfo) error {
// would be ideal to just use the events.Called handler, but it wouldnt be able to handle individual message timeouts
log.Info("Sector precommitted: ", sector.SectorNumber)
mw, err := m.api.StateWaitMsg(ctx.Context(), *sector.PreCommitMessage)
if err != nil {
return ctx.Send(SectorChainPreCommitFailed{err})
}
if mw.Receipt.ExitCode != 0 {
log.Error("sector precommit failed: ", mw.Receipt.ExitCode)
err := xerrors.Errorf("sector precommit failed: %d", mw.Receipt.ExitCode)
return ctx.Send(SectorChainPreCommitFailed{err})
}
log.Info("precommit message landed on chain: ", sector.SectorNumber)
pci, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, mw.TipSetTok)
if err != nil {
return xerrors.Errorf("getting precommit info: %w", err)
}
randHeight := pci.PreCommitEpoch + miner.PreCommitChallengeDelay
err = m.events.ChainAt(func(ectx context.Context, tok TipSetToken, curH abi.ChainEpoch) error {
rand, err := m.api.ChainGetRandomness(ectx, tok, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, randHeight, nil)
if err != nil {
err = xerrors.Errorf("failed to get randomness for computing seal proof: %w", err)
_ = ctx.Send(SectorFatalError{error: err})
return err
}
_ = ctx.Send(SectorSeedReady{SeedValue: abi.InteractiveSealRandomness(rand), SeedEpoch: randHeight})
return nil
}, func(ctx context.Context, ts TipSetToken) error {
log.Warn("revert in interactive commit sector step")
// TODO: need to cancel running process and restart...
return nil
}, InteractivePoRepConfidence, randHeight)
if err != nil {
log.Warn("waitForPreCommitMessage ChainAt errored: ", err)
}
return nil
}
func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) error {
log.Info("scheduling seal proof computation...")
log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%x; d:%x", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD)
cids := storage.SectorCids{
Unsealed: *sector.CommD,
Sealed: *sector.CommR,
}
c2in, err := m.sealer.SealCommit1(ctx.Context(), m.minerSector(sector.SectorNumber), sector.TicketValue, sector.SeedValue, sector.pieceInfos(), cids)
if err != nil {
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed: %w", err)})
}
proof, err := m.sealer.SealCommit2(ctx.Context(), m.minerSector(sector.SectorNumber), c2in)
if err != nil {
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed: %w", err)})
}
if err := m.checkCommit(ctx.Context(), sector, proof); err != nil {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)})
}
// TODO: Consider splitting states and persist proof for faster recovery
params := &miner.ProveCommitSectorParams{
SectorNumber: sector.SectorNumber,
Proof: proof,
}
enc := new(bytes.Buffer)
if err := params.MarshalCBOR(enc); err != nil {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("could not serialize commit sector parameters: %w", err)})
}
// TODO: check seed / ticket are up to date
mcid, err := m.api.SendMsg(ctx.Context(), m.worker, m.maddr, builtin.MethodsMiner.ProveCommitSector, big.NewInt(0), big.NewInt(1), 1000000, enc.Bytes())
if err != nil {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
}
return ctx.Send(SectorCommitted{
Proof: proof,
Message: mcid,
})
}
func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo) error {
if sector.CommitMessage == nil {
log.Errorf("sector %d entered commit wait state without a message cid", sector.SectorNumber)
return ctx.Send(SectorCommitFailed{xerrors.Errorf("entered commit wait with no commit cid")})
}
mw, err := m.api.StateWaitMsg(ctx.Context(), *sector.CommitMessage)
if err != nil {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("failed to wait for porep inclusion: %w", err)})
}
if mw.Receipt.ExitCode != 0 {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("submitting sector proof failed (exit=%d, msg=%s) (t:%x; s:%x(%d); p:%x)", mw.Receipt.ExitCode, sector.CommitMessage, sector.TicketValue, sector.SeedValue, sector.SeedEpoch, sector.Proof)})
}
return ctx.Send(SectorProving{})
}
func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorInfo) error {
// TODO: Maybe wait for some finality
if err := m.sealer.FinalizeSector(ctx.Context(), m.minerSector(sector.SectorNumber)); err != nil {
return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)})
}
return ctx.Send(SectorFinalized{})
}
func (m *Sealing) handleFaulty(ctx statemachine.Context, sector SectorInfo) error {
// TODO: check if the fault has already been reported, and that this sector is even valid
// TODO: coalesce faulty sector reporting
bf := abi.NewBitField()
bf.Set(uint64(sector.SectorNumber))
params := &miner.DeclareTemporaryFaultsParams{
SectorNumbers: bf,
Duration: 99999999, // TODO: This is very unlikely to be the correct number
}
enc := new(bytes.Buffer)
if err := params.MarshalCBOR(enc); err != nil {
return ctx.Send(SectorCommitFailed{xerrors.Errorf("failed to serialize declare fault params: %w", err)})
}
mcid, err := m.api.SendMsg(ctx.Context(), m.worker, m.maddr, builtin.MethodsMiner.DeclareTemporaryFaults, big.NewInt(0), big.NewInt(1), 1000000, enc.Bytes())
if err != nil {
return xerrors.Errorf("failed to push declare faults message to network: %w", err)
}
return ctx.Send(SectorFaultReported{reportMsg: mcid})
}
func (m *Sealing) handleFaultReported(ctx statemachine.Context, sector SectorInfo) error {
if sector.FaultReportMsg == nil {
return xerrors.Errorf("entered fault reported state without a FaultReportMsg cid")
}
mw, err := m.api.StateWaitMsg(ctx.Context(), *sector.FaultReportMsg)
if err != nil {
return xerrors.Errorf("failed to wait for fault declaration: %w", err)
}
if mw.Receipt.ExitCode != 0 {
log.Errorf("UNHANDLED: declaring sector fault failed (exit=%d, msg=%s) (id: %d)", mw.Receipt.ExitCode, *sector.FaultReportMsg, sector.SectorNumber)
return xerrors.Errorf("UNHANDLED: submitting fault declaration failed (exit %d)", mw.Receipt.ExitCode)
}
return ctx.Send(SectorFaultedFinal{})
}

161
states_failed.go Normal file
View File

@ -0,0 +1,161 @@
package sealing
import (
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-statemachine"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
)
const minRetryTime = 1 * time.Minute
func failedCooldown(ctx statemachine.Context, sector SectorInfo) error {
// TODO: Exponential backoff when we see consecutive failures
retryStart := time.Unix(int64(sector.Log[len(sector.Log)-1].Timestamp), 0).Add(minRetryTime)
if len(sector.Log) > 0 && !time.Now().After(retryStart) {
log.Infof("%s(%d), waiting %s before retrying", sector.State, sector.SectorNumber, time.Until(retryStart))
select {
case <-time.After(time.Until(retryStart)):
case <-ctx.Context().Done():
return ctx.Context().Err()
}
}
return nil
}
func (m *Sealing) checkPreCommitted(ctx statemachine.Context, sector SectorInfo) (*miner.SectorPreCommitOnChainInfo, bool) {
tok, _, err := m.api.ChainHead(ctx.Context())
if err != nil {
log.Errorf("handleSealFailed(%d): temp error: %+v", sector.SectorNumber, err)
return nil, true
}
info, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok)
if err != nil {
log.Errorf("handleSealFailed(%d): temp error: %+v", sector.SectorNumber, err)
return nil, true
}
return info, false
}
func (m *Sealing) handleSealFailed(ctx statemachine.Context, sector SectorInfo) error {
if _, is := m.checkPreCommitted(ctx, sector); is {
// TODO: Remove this after we can re-precommit
return nil // noop, for now
}
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorRetrySeal{})
}
func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorInfo) error {
if err := checkPrecommit(ctx.Context(), m.Address(), sector, m.api); err != nil {
switch err.(type) {
case *ErrApi:
log.Errorf("handlePreCommitFailed: api error, not proceeding: %+v", err)
return nil
case *ErrBadCommD: // TODO: Should this just back to packing? (not really needed since handlePreCommit1 will do that too)
return ctx.Send(SectorSealPreCommitFailed{xerrors.Errorf("bad CommD error: %w", err)})
case *ErrExpiredTicket:
return ctx.Send(SectorSealPreCommitFailed{xerrors.Errorf("ticket expired error: %w", err)})
default:
return xerrors.Errorf("checkPrecommit sanity check error: %w", err)
}
}
if pci, is := m.checkPreCommitted(ctx, sector); is && pci != nil {
if sector.PreCommitMessage != nil {
log.Warn("sector %d is precommitted on chain, but we don't have precommit message", sector.SectorNumber)
return nil // TODO: SeedWait needs this currently
}
if pci.Info.SealedCID != *sector.CommR {
log.Warn("sector %d is precommitted on chain, with different CommR: %x != %x", sector.SectorNumber, pci.Info.SealedCID, sector.CommR)
return nil // TODO: remove when the actor allows re-precommit
}
// TODO: we could compare more things, but I don't think we really need to
// CommR tells us that CommD (and CommPs), and the ticket are all matching
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorRetryWaitSeed{})
}
if sector.PreCommitMessage != nil {
log.Warn("retrying precommit even though the message failed to apply")
}
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorRetryPreCommit{})
}
func (m *Sealing) handleComputeProofFailed(ctx statemachine.Context, sector SectorInfo) error {
// TODO: Check sector files
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorRetryComputeProof{})
}
func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo) error {
if err := checkPrecommit(ctx.Context(), m.maddr, sector, m.api); err != nil {
switch err.(type) {
case *ErrApi:
log.Errorf("handleCommitFailed: api error, not proceeding: %+v", err)
return nil
case *ErrBadCommD:
return ctx.Send(SectorSealPreCommitFailed{xerrors.Errorf("bad CommD error: %w", err)})
case *ErrExpiredTicket:
return ctx.Send(SectorSealPreCommitFailed{xerrors.Errorf("ticket expired error: %w", err)})
default:
return xerrors.Errorf("checkPrecommit sanity check error: %w", err)
}
}
if err := m.checkCommit(ctx.Context(), sector, sector.Proof); err != nil {
switch err.(type) {
case *ErrApi:
log.Errorf("handleCommitFailed: api error, not proceeding: %+v", err)
return nil
case *ErrBadSeed:
log.Errorf("seed changed, will retry: %+v", err)
return ctx.Send(SectorRetryWaitSeed{})
case *ErrInvalidProof:
if err := failedCooldown(ctx, sector); err != nil {
return err
}
if sector.InvalidProofs > 0 {
return ctx.Send(SectorSealPreCommitFailed{xerrors.Errorf("consecutive invalid proofs")})
}
return ctx.Send(SectorRetryInvalidProof{})
default:
return xerrors.Errorf("checkCommit sanity check error: %w", err)
}
}
// TODO: Check sector files
if err := failedCooldown(ctx, sector); err != nil {
return err
}
return ctx.Send(SectorRetryComputeProof{})
}

123
types.go Normal file
View File

@ -0,0 +1,123 @@
package sealing
import (
"bytes"
"context"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/filecoin-project/specs-storage/storage"
)
type Piece struct {
DealID *abi.DealID
Size abi.UnpaddedPieceSize
CommP cid.Cid
}
type Log struct {
Timestamp uint64
Trace string // for errors
Message string
// additional data (Event info)
Kind string
}
type SectorInfo struct {
State SectorState
SectorNumber abi.SectorNumber // TODO: this field's name should be changed to SectorNumber
Nonce uint64 // TODO: remove
SectorType abi.RegisteredProof
// Packing
Pieces []Piece
// PreCommit1
TicketValue abi.SealRandomness
TicketEpoch abi.ChainEpoch
PreCommit1Out storage.PreCommit1Out
// PreCommit2
CommD *cid.Cid
CommR *cid.Cid
Proof []byte
PreCommitMessage *cid.Cid
// WaitSeed
SeedValue abi.InteractiveSealRandomness
SeedEpoch abi.ChainEpoch
// Committing
CommitMessage *cid.Cid
InvalidProofs uint64 // failed proof computations (doesn't validate with proof inputs)
// Faults
FaultReportMsg *cid.Cid
// Debug
LastErr string
Log []Log
}
func (t *SectorInfo) pieceInfos() []abi.PieceInfo {
out := make([]abi.PieceInfo, len(t.Pieces))
for i, piece := range t.Pieces {
out[i] = abi.PieceInfo{
Size: piece.Size.Padded(),
PieceCID: piece.CommP,
}
}
return out
}
func (t *SectorInfo) deals() []abi.DealID {
out := make([]abi.DealID, 0, len(t.Pieces))
for _, piece := range t.Pieces {
if piece.DealID == nil {
continue
}
out = append(out, *piece.DealID)
}
return out
}
func (t *SectorInfo) existingPieces() []abi.UnpaddedPieceSize {
out := make([]abi.UnpaddedPieceSize, len(t.Pieces))
for i, piece := range t.Pieces {
out[i] = piece.Size
}
return out
}
type TicketFn func(context.Context) (abi.SealRandomness, abi.ChainEpoch, error)
type SectorIDCounter interface {
Next() (abi.SectorNumber, error)
}
type TipSetToken []byte
type MsgLookup struct {
Receipt MessageReceipt
TipSetTok TipSetToken
Height abi.ChainEpoch
}
type MessageReceipt struct {
ExitCode exitcode.ExitCode
Return []byte
GasUsed int64
}
func (mr *MessageReceipt) Equals(o *MessageReceipt) bool {
return mr.ExitCode == o.ExitCode && bytes.Equal(mr.Return, o.Return) && mr.GasUsed == o.GasUsed
}

62
types_test.go Normal file
View File

@ -0,0 +1,62 @@
package sealing
import (
"bytes"
"testing"
"gotest.tools/assert"
cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
)
func TestSectorInfoSelialization(t *testing.T) {
d := abi.DealID(1234)
dummyCid := builtin.AccountActorCodeID
si := &SectorInfo{
State: "stateful",
SectorNumber: 234,
Nonce: 345,
Pieces: []Piece{{
DealID: &d,
Size: 5,
CommP: dummyCid,
}},
CommD: &dummyCid,
CommR: nil,
Proof: nil,
TicketValue: []byte{87, 78, 7, 87},
TicketEpoch: 345,
PreCommitMessage: nil,
SeedValue: []byte{},
SeedEpoch: 0,
CommitMessage: nil,
FaultReportMsg: nil,
LastErr: "hi",
}
b, err := cborutil.Dump(si)
if err != nil {
t.Fatal(err)
}
var si2 SectorInfo
if err := cborutil.ReadCborRPC(bytes.NewReader(b), &si); err != nil {
return
}
assert.Equal(t, si.State, si2.State)
assert.Equal(t, si.Nonce, si2.Nonce)
assert.Equal(t, si.SectorNumber, si2.SectorNumber)
assert.Equal(t, si.Pieces, si2.Pieces)
assert.Equal(t, si.CommD, si2.CommD)
assert.Equal(t, si.TicketValue, si2.TicketValue)
assert.Equal(t, si.TicketEpoch, si2.TicketEpoch)
assert.Equal(t, si, si2)
}

57
utils.go Normal file
View File

@ -0,0 +1,57 @@
package sealing
import (
"math/bits"
"github.com/filecoin-project/specs-actors/actors/abi"
)
func fillersFromRem(in abi.UnpaddedPieceSize) ([]abi.UnpaddedPieceSize, error) {
// Convert to in-sector bytes for easier math:
//
// Sector size to user bytes ratio is constant, e.g. for 1024B we have 1016B
// of user-usable data.
//
// (1024/1016 = 128/127)
//
// Given that we can get sector size by simply adding 1/127 of the user
// bytes
//
// (we convert to sector bytes as they are nice round binary numbers)
toFill := uint64(in + (in / 127))
// We need to fill the sector with pieces that are powers of 2. Conveniently
// computers store numbers in binary, which means we can look at 1s to get
// all the piece sizes we need to fill the sector. It also means that number
// of pieces is the number of 1s in the number of remaining bytes to fill
out := make([]abi.UnpaddedPieceSize, bits.OnesCount64(toFill))
for i := range out {
// Extract the next lowest non-zero bit
next := bits.TrailingZeros64(toFill)
psize := uint64(1) << next
// e.g: if the number is 0b010100, psize will be 0b000100
// set that bit to 0 by XORing it, so the next iteration looks at the
// next bit
toFill ^= psize
// Add the piece size to the list of pieces we need to create
out[i] = abi.PaddedPieceSize(psize).Unpadded()
}
return out, nil
}
func (m *Sealing) ListSectors() ([]SectorInfo, error) {
var sectors []SectorInfo
if err := m.sectors.List(&sectors); err != nil {
return nil, err
}
return sectors, nil
}
func (m *Sealing) GetSectorInfo(sid abi.SectorNumber) (SectorInfo, error) {
var out SectorInfo
err := m.sectors.Get(uint64(sid)).Get(&out)
return out, err
}

45
utils_test.go Normal file
View File

@ -0,0 +1,45 @@
package sealing
import (
"testing"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/stretchr/testify/assert"
)
func testFill(t *testing.T, n abi.UnpaddedPieceSize, exp []abi.UnpaddedPieceSize) {
f, err := fillersFromRem(n)
assert.NoError(t, err)
assert.Equal(t, exp, f)
var sum abi.UnpaddedPieceSize
for _, u := range f {
sum += u
}
assert.Equal(t, n, sum)
}
func TestFillersFromRem(t *testing.T) {
for i := 8; i < 32; i++ {
// single
ub := abi.PaddedPieceSize(uint64(1) << i).Unpadded()
testFill(t, ub, []abi.UnpaddedPieceSize{ub})
// 2
ub = abi.PaddedPieceSize(uint64(5) << i).Unpadded()
ub1 := abi.PaddedPieceSize(uint64(1) << i).Unpadded()
ub3 := abi.PaddedPieceSize(uint64(4) << i).Unpadded()
testFill(t, ub, []abi.UnpaddedPieceSize{ub1, ub3})
// 4
ub = abi.PaddedPieceSize(uint64(15) << i).Unpadded()
ub2 := abi.PaddedPieceSize(uint64(2) << i).Unpadded()
ub4 := abi.PaddedPieceSize(uint64(8) << i).Unpadded()
testFill(t, ub, []abi.UnpaddedPieceSize{ub1, ub2, ub3, ub4})
// different 2
ub = abi.PaddedPieceSize(uint64(9) << i).Unpadded()
testFill(t, ub, []abi.UnpaddedPieceSize{ub1, ub4})
}
}