Merge pull request #3 from filecoin-project/feat/pre-commit-expiration-policy

initialize FSM with pre-commit expiry policy
This commit is contained in:
Erin Swenson-Healey 2020-04-08 08:39:57 -07:00 committed by GitHub
commit 1c35692235
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 580 additions and 120 deletions

View File

@ -18,64 +18,41 @@ func (t *Piece) MarshalCBOR(w io.Writer) error {
_, err := w.Write(cbg.CborNull)
return err
}
if _, err := w.Write([]byte{163}); err != nil {
if _, err := w.Write([]byte{162}); err != nil {
return err
}
// t.DealID (abi.DealID) (uint64)
if len("DealID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealID\" was too long")
// t.Piece (abi.PieceInfo) (struct)
if len("Piece") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Piece\" was too long")
}
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("DealID")))); err != nil {
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Piece")))); err != nil {
return err
}
if _, err := w.Write([]byte("DealID")); err != nil {
if _, err := w.Write([]byte("Piece")); err != nil {
return err
}
if t.DealID == nil {
if _, err := w.Write(cbg.CborNull); err != nil {
return err
}
} else {
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(*t.DealID))); err != nil {
return err
}
}
// t.Size (abi.UnpaddedPieceSize) (uint64)
if len("Size") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Size\" was too long")
}
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Size")))); err != nil {
return err
}
if _, err := w.Write([]byte("Size")); err != nil {
if err := t.Piece.MarshalCBOR(w); err != nil {
return err
}
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Size))); err != nil {
// t.DealInfo (sealing.DealInfo) (struct)
if len("DealInfo") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealInfo\" was too long")
}
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("DealInfo")))); err != nil {
return err
}
if _, err := w.Write([]byte("DealInfo")); err != nil {
return err
}
// t.CommP (cid.Cid) (struct)
if len("CommP") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"CommP\" was too long")
}
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("CommP")))); err != nil {
if err := t.DealInfo.MarshalCBOR(w); err != nil {
return err
}
if _, err := w.Write([]byte("CommP")); err != nil {
return err
}
if err := cbg.WriteCid(w, t.CommP); err != nil {
return xerrors.Errorf("failed to write cid field t.CommP: %w", err)
}
return nil
}
@ -109,8 +86,18 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
}
switch name {
// t.DealID (abi.DealID) (uint64)
case "DealID":
// t.Piece (abi.PieceInfo) (struct)
case "Piece":
{
if err := t.Piece.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.Piece: %w", err)
}
}
// t.DealInfo (sealing.DealInfo) (struct)
case "DealInfo":
{
@ -124,20 +111,96 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
return err
}
} else {
maj, extra, err = cbg.CborReadHeader(br)
if err != nil {
return err
t.DealInfo = new(DealInfo)
if err := t.DealInfo.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err)
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
typed := abi.DealID(extra)
t.DealID = &typed
}
}
// t.Size (abi.UnpaddedPieceSize) (uint64)
case "Size":
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
}
}
return nil
}
func (t *DealInfo) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
if _, err := w.Write([]byte{162}); err != nil {
return err
}
// t.DealID (abi.DealID) (uint64)
if len("DealID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealID\" was too long")
}
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("DealID")))); err != nil {
return err
}
if _, err := w.Write([]byte("DealID")); err != nil {
return err
}
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.DealID))); err != nil {
return err
}
// t.DealSchedule (sealing.DealSchedule) (struct)
if len("DealSchedule") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealSchedule\" was too long")
}
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("DealSchedule")))); err != nil {
return err
}
if _, err := w.Write([]byte("DealSchedule")); err != nil {
return err
}
if err := t.DealSchedule.MarshalCBOR(w); err != nil {
return err
}
return nil
}
func (t *DealInfo) UnmarshalCBOR(r io.Reader) error {
br := cbg.GetPeeker(r)
maj, extra, err := cbg.CborReadHeader(br)
if err != nil {
return err
}
if maj != cbg.MajMap {
return fmt.Errorf("cbor input should be of type map")
}
if extra > cbg.MaxLength {
return fmt.Errorf("DealInfo: map struct too large (%d)", extra)
}
var name string
n := extra
for i := uint64(0); i < n; i++ {
{
sval, err := cbg.ReadString(br)
if err != nil {
return err
}
name = string(sval)
}
switch name {
// t.DealID (abi.DealID) (uint64)
case "DealID":
{
@ -148,21 +211,163 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error {
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Size = abi.UnpaddedPieceSize(extra)
t.DealID = abi.DealID(extra)
}
// t.CommP (cid.Cid) (struct)
case "CommP":
// t.DealSchedule (sealing.DealSchedule) (struct)
case "DealSchedule":
{
c, err := cbg.ReadCid(br)
if err != nil {
return xerrors.Errorf("failed to read cid field t.CommP: %w", err)
if err := t.DealSchedule.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err)
}
t.CommP = c
}
default:
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
}
}
return nil
}
func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
if _, err := w.Write([]byte{162}); err != nil {
return err
}
// t.StartEpoch (abi.ChainEpoch) (int64)
if len("StartEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
}
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("StartEpoch")))); err != nil {
return err
}
if _, err := w.Write([]byte("StartEpoch")); err != nil {
return err
}
if t.StartEpoch >= 0 {
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.StartEpoch))); err != nil {
return err
}
} else {
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajNegativeInt, uint64(-t.StartEpoch)-1)); err != nil {
return err
}
}
// t.EndEpoch (abi.ChainEpoch) (int64)
if len("EndEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
}
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("EndEpoch")))); err != nil {
return err
}
if _, err := w.Write([]byte("EndEpoch")); err != nil {
return err
}
if t.EndEpoch >= 0 {
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.EndEpoch))); err != nil {
return err
}
} else {
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajNegativeInt, uint64(-t.EndEpoch)-1)); err != nil {
return err
}
}
return nil
}
func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
br := cbg.GetPeeker(r)
maj, extra, err := cbg.CborReadHeader(br)
if err != nil {
return err
}
if maj != cbg.MajMap {
return fmt.Errorf("cbor input should be of type map")
}
if extra > cbg.MaxLength {
return fmt.Errorf("DealSchedule: map struct too large (%d)", extra)
}
var name string
n := extra
for i := uint64(0); i < n; i++ {
{
sval, err := cbg.ReadString(br)
if err != nil {
return err
}
name = string(sval)
}
switch name {
// t.StartEpoch (abi.ChainEpoch) (int64)
case "StartEpoch":
{
maj, extra, err := cbg.CborReadHeader(br)
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.StartEpoch = abi.ChainEpoch(extraI)
}
// t.EndEpoch (abi.ChainEpoch) (int64)
case "EndEpoch":
{
maj, extra, err := cbg.CborReadHeader(br)
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.EndEpoch = abi.ChainEpoch(extraI)
}
default:

View File

@ -27,40 +27,38 @@ type ErrExpiredTicket struct{ error }
type ErrBadSeed struct{ error }
type ErrInvalidProof struct{ error }
// checkPieces validates that:
// - Each piece han a corresponding on chain deal
// - Piece commitments match with on chain deals
// - Piece sizes match
// - Deals aren't expired
func checkPieces(ctx context.Context, si SectorInfo, api SealingAPI) error {
tok, height, err := api.ChainHead(ctx)
if err != nil {
return &ErrApi{xerrors.Errorf("getting chain head: %w", err)}
}
for i, piece := range si.Pieces {
if piece.DealID == nil {
exp := zerocomm.ZeroPieceCommitment(piece.Size)
if piece.CommP != exp {
return &ErrInvalidPiece{xerrors.Errorf("deal %d piece %d had non-zero CommP %+v", piece.DealID, i, piece.CommP)}
for i, p := range si.Pieces {
// if no deal is associated with the piece, ensure that we added it as
// filler (i.e. ensure that it has a zero PieceCID)
if p.DealInfo == nil {
exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded())
if !p.Piece.PieceCID.Equals(exp) {
return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", si.SectorNumber, i, p.Piece.PieceCID)}
}
continue
}
proposal, _, err := api.StateMarketStorageDeal(ctx, *piece.DealID, tok)
proposal, _, err := api.StateMarketStorageDeal(ctx, p.DealInfo.DealID, tok)
if err != nil {
return &ErrApi{xerrors.Errorf("getting deal %d for piece %d: %w", piece.DealID, i, err)}
return &ErrApi{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)}
}
if proposal.PieceCID != piece.CommP {
return &ErrInvalidDeals{xerrors.Errorf("piece %d (or %d) of sector %d refers deal %d with wrong CommP: %x != %x", i, len(si.Pieces), si.SectorNumber, piece.DealID, piece.CommP, proposal.PieceCID)}
if proposal.PieceCID != p.Piece.PieceCID {
return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %x != %x", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)}
}
if piece.Size != proposal.PieceSize.Unpadded() {
return &ErrInvalidDeals{xerrors.Errorf("piece %d (or %d) of sector %d refers deal %d with different size: %d != %d", i, len(si.Pieces), si.SectorNumber, piece.DealID, piece.Size, proposal.PieceSize)}
if p.Piece.Size != proposal.PieceSize {
return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, p.Piece.Size, proposal.PieceSize)}
}
if height >= proposal.StartEpoch {
return &ErrExpiredDeals{xerrors.Errorf("piece %d (or %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(si.Pieces), si.SectorNumber, piece.DealID, proposal.StartEpoch, height)}
return &ErrExpiredDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, proposal.StartEpoch, height)}
}
}
@ -75,7 +73,7 @@ func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, a
return &ErrApi{xerrors.Errorf("getting chain head: %w", err)}
}
commD, err := api.StateComputeDataCommitment(ctx, maddr, si.SectorType, si.deals(), tok)
commD, err := api.StateComputeDataCommitment(ctx, maddr, si.SectorType, si.dealIDs(), tok)
if err != nil {
return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)}
}

View File

@ -59,10 +59,15 @@ func (evt SectorStart) apply(state *SectorInfo) {
state.SectorType = evt.SectorType
}
type SectorPacked struct{ Pieces []Piece }
type SectorPacked struct{ FillerPieces []abi.PieceInfo }
func (evt SectorPacked) apply(state *SectorInfo) {
state.Pieces = append(state.Pieces, evt.Pieces...)
for idx := range evt.FillerPieces {
state.Pieces = append(state.Pieces, Piece{
Piece: evt.FillerPieces[idx],
DealInfo: nil, // filler pieces don't have deals associated with them
})
}
}
type SectorPackingFailed struct{ error }

View File

@ -16,14 +16,14 @@ func (m *Sealing) pledgeReader(size abi.UnpaddedPieceSize) io.Reader {
return io.LimitReader(&nr.Reader{}, int64(size))
}
func (m *Sealing) pledgeSector(ctx context.Context, sectorID abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]Piece, error) {
func (m *Sealing) pledgeSector(ctx context.Context, sectorID abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) {
if len(sizes) == 0 {
return nil, nil
}
log.Infof("Pledge %d, contains %+v", sectorID, existingPieceSizes)
out := make([]Piece, len(sizes))
out := make([]abi.PieceInfo, len(sizes))
for i, size := range sizes {
ppi, err := m.sealer.AddPiece(ctx, sectorID, existingPieceSizes, size, m.pledgeReader(size))
if err != nil {
@ -32,10 +32,7 @@ func (m *Sealing) pledgeSector(ctx context.Context, sectorID abi.SectorID, exist
existingPieceSizes = append(existingPieceSizes, size)
out[i] = Piece{
Size: ppi.Size.Unpadded(),
CommP: ppi.PieceCID,
}
out[i] = ppi
}
return out, nil
@ -72,7 +69,15 @@ func (m *Sealing) PledgeSector() error {
return
}
if err := m.newSector(sid, rt, pieces); err != nil {
ps := make([]Piece, len(pieces))
for idx := range ps {
ps[idx] = Piece{
Piece: pieces[idx],
DealInfo: nil,
}
}
if err := m.newSector(sid, rt, ps); err != nil {
log.Errorf("%+v", err)
return
}

View File

@ -12,6 +12,8 @@ import (
func main() {
err := gen.WriteMapEncodersToFile("./cbor_gen.go", "sealing",
sealing.Piece{},
sealing.DealInfo{},
sealing.DealSchedule{},
sealing.SectorInfo{},
sealing.Log{},
)

1
go.mod
View File

@ -5,6 +5,7 @@ go 1.13
require (
github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5
github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 // indirect
github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9

76
precommit_policy.go Normal file
View File

@ -0,0 +1,76 @@
package sealing
import (
"context"
"github.com/filecoin-project/specs-actors/actors/abi"
)
type PreCommitPolicy interface {
Expiration(ctx context.Context, ps ...Piece) (abi.ChainEpoch, error)
}
type Chain interface {
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
}
// BasicPreCommitPolicy satisfies PreCommitPolicy. It has two modes:
//
// Mode 1: The sector contains a non-zero quantity of pieces with deal info
// Mode 2: The sector contains no pieces with deal info
//
// The BasicPreCommitPolicy#Expiration method is given a slice of the pieces
// which the miner has encoded into the sector, and from that slice picks either
// the first or second mode.
//
// If we're in Mode 1: The pre-commit expiration epoch will be the maximum
// deal end epoch of a piece in the sector.
//
// If we're in Mode 2: The pre-commit expiration epoch will be set to the
// current epoch + the provided default duration.
type BasicPreCommitPolicy struct {
api Chain
duration abi.ChainEpoch
}
// NewBasicPreCommitPolicy produces a BasicPreCommitPolicy
func NewBasicPreCommitPolicy(api Chain, duration abi.ChainEpoch) BasicPreCommitPolicy {
return BasicPreCommitPolicy{
api: api,
duration: duration,
}
}
// Expiration produces the pre-commit sector expiration epoch for an encoded
// replica containing the provided enumeration of pieces and deals.
func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...Piece) (abi.ChainEpoch, error) {
_, epoch, err := p.api.ChainHead(ctx)
if err != nil {
return 0, nil
}
var end *abi.ChainEpoch
for _, p := range ps {
if p.DealInfo == nil {
continue
}
if p.DealInfo.DealSchedule.EndEpoch < epoch {
log.Warnf("piece schedule %+v ended before current epoch %d", p, epoch)
continue
}
if end == nil || *end < p.DealInfo.DealSchedule.EndEpoch {
end = &p.DealInfo.DealSchedule.EndEpoch
}
}
if end == nil {
tmp := epoch + p.duration
end = &tmp
}
return *end, nil
}

134
precommit_policy_test.go Normal file
View File

@ -0,0 +1,134 @@
package sealing_test
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/specs-actors/actors/abi"
sealing "github.com/filecoin-project/storage-fsm"
)
type fakeChain struct {
h abi.ChainEpoch
}
func (f *fakeChain) ChainHead(ctx context.Context) (sealing.TipSetToken, abi.ChainEpoch, error) {
return []byte{1, 2, 3}, f.h, nil
}
func TestBasicPolicyEmptySector(t *testing.T) {
policy := sealing.NewBasicPreCommitPolicy(&fakeChain{
h: abi.ChainEpoch(55),
}, 10)
exp, err := policy.Expiration(context.Background())
require.NoError(t, err)
assert.Equal(t, 65, int(exp))
}
func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) {
policy := sealing.NewBasicPreCommitPolicy(&fakeChain{
h: abi.ChainEpoch(55),
}, 100)
pieces := []sealing.Piece{
{
Piece: abi.PieceInfo{
Size: abi.PaddedPieceSize(1024),
PieceCID: commcid.ReplicaCommitmentV1ToCID([]byte{1, 2, 3}),
},
DealInfo: &sealing.DealInfo{
DealID: abi.DealID(42),
DealSchedule: sealing.DealSchedule{
StartEpoch: abi.ChainEpoch(70),
EndEpoch: abi.ChainEpoch(75),
},
},
},
{
Piece: abi.PieceInfo{
Size: abi.PaddedPieceSize(1024),
PieceCID: commcid.ReplicaCommitmentV1ToCID([]byte{1, 2, 3}),
},
DealInfo: &sealing.DealInfo{
DealID: abi.DealID(43),
DealSchedule: sealing.DealSchedule{
StartEpoch: abi.ChainEpoch(80),
EndEpoch: abi.ChainEpoch(100),
},
},
},
}
exp, err := policy.Expiration(context.Background(), pieces...)
require.NoError(t, err)
assert.Equal(t, 100, int(exp))
}
func TestBasicPolicyIgnoresExistingScheduleIfExpired(t *testing.T) {
policy := sealing.NewBasicPreCommitPolicy(&fakeChain{
h: abi.ChainEpoch(55),
}, 100)
pieces := []sealing.Piece{
{
Piece: abi.PieceInfo{
Size: abi.PaddedPieceSize(1024),
PieceCID: commcid.ReplicaCommitmentV1ToCID([]byte{1, 2, 3}),
},
DealInfo: &sealing.DealInfo{
DealID: abi.DealID(44),
DealSchedule: sealing.DealSchedule{
StartEpoch: abi.ChainEpoch(1),
EndEpoch: abi.ChainEpoch(10),
},
},
},
}
exp, err := policy.Expiration(context.Background(), pieces...)
require.NoError(t, err)
assert.Equal(t, 155, int(exp))
}
func TestMissingDealIsIgnored(t *testing.T) {
policy := sealing.NewBasicPreCommitPolicy(&fakeChain{
h: abi.ChainEpoch(55),
}, 100)
pieces := []sealing.Piece{
{
Piece: abi.PieceInfo{
Size: abi.PaddedPieceSize(1024),
PieceCID: commcid.ReplicaCommitmentV1ToCID([]byte{1, 2, 3}),
},
DealInfo: &sealing.DealInfo{
DealID: abi.DealID(44),
DealSchedule: sealing.DealSchedule{
StartEpoch: abi.ChainEpoch(1),
EndEpoch: abi.ChainEpoch(10),
},
},
},
{
Piece: abi.PieceInfo{
Size: abi.PaddedPieceSize(1024),
PieceCID: commcid.ReplicaCommitmentV1ToCID([]byte{1, 2, 3}),
},
DealInfo: nil,
},
}
exp, err := policy.Expiration(context.Background(), pieces...)
require.NoError(t, err)
assert.Equal(t, 155, int(exp))
}

View File

@ -49,9 +49,11 @@ type Sealing struct {
sc SectorIDCounter
verif ffiwrapper.Verifier
tktFn TicketFn
pcp PreCommitPolicy
}
func New(api SealingAPI, events Events, maddr address.Address, worker address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, tktFn TicketFn) *Sealing {
func New(api SealingAPI, events Events, maddr address.Address, worker address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, tktFn TicketFn, pcp PreCommitPolicy) *Sealing {
s := &Sealing{
api: api,
events: events,
@ -62,6 +64,7 @@ func New(api SealingAPI, events Events, maddr address.Address, worker address.Ad
sc: sc,
verif: verif,
tktFn: tktFn,
pcp: pcp,
}
s.sectors = statemachine.New(namespace.Wrap(ds, datastore.NewKey(SectorStorePrefix)), s, SectorInfo{})
@ -101,8 +104,8 @@ func (m *Sealing) AllocatePiece(size abi.UnpaddedPieceSize) (sectorID abi.Sector
return sid, 0, nil
}
func (m *Sealing) SealPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, sectorID abi.SectorNumber, dealID abi.DealID) error {
log.Infof("Seal piece for deal %d", dealID)
func (m *Sealing) SealPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, sectorID abi.SectorNumber, d DealInfo) error {
log.Infof("Seal piece for deal %d", d.DealID)
ppi, err := m.sealer.AddPiece(ctx, m.minerSector(sectorID), []abi.UnpaddedPieceSize{}, size, r)
if err != nil {
@ -116,14 +119,15 @@ func (m *Sealing) SealPiece(ctx context.Context, size abi.UnpaddedPieceSize, r i
return m.newSector(sectorID, rt, []Piece{
{
DealID: &dealID,
Size: ppi.Size.Unpadded(),
CommP: ppi.PieceCID,
Piece: ppi,
DealInfo: &d,
},
})
}
// newSector accepts a slice of pieces which will have a deal associated with
// them (in the event of a storage deal) or no deal (in the event of sealing
// garbage data)
func (m *Sealing) newSector(sid abi.SectorNumber, rt abi.RegisteredProof, pieces []Piece) error {
log.Infof("Start sealing %d", sid)
return m.sectors.Send(uint64(sid), SectorStart{

View File

@ -20,7 +20,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err
var allocated abi.UnpaddedPieceSize
for _, piece := range sector.Pieces {
allocated += piece.Size
allocated += piece.Piece.Size.Unpadded()
}
ubytes := abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded()
@ -38,12 +38,12 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err
log.Warnf("Creating %d filler pieces for sector %d", len(fillerSizes), sector.SectorNumber)
}
pieces, err := m.pledgeSector(ctx.Context(), m.minerSector(sector.SectorNumber), sector.existingPieces(), fillerSizes...)
fillerPieces, err := m.pledgeSector(ctx.Context(), m.minerSector(sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...)
if err != nil {
return xerrors.Errorf("filling up the sector (%v): %w", fillerSizes, err)
}
return ctx.Send(SectorPacked{Pieces: pieces})
return ctx.Send(SectorPacked{FillerPieces: fillerPieces})
}
func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error {
@ -59,9 +59,9 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo)
log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err)
return nil
case *ErrInvalidDeals:
return ctx.Send(SectorPackingFailed{xerrors.Errorf("invalid deals in sector: %w", err)})
return ctx.Send(SectorPackingFailed{xerrors.Errorf("invalid dealIDs in sector: %w", err)})
case *ErrExpiredDeals: // Probably not much we can do here, maybe re-pack the sector?
return ctx.Send(SectorPackingFailed{xerrors.Errorf("expired deals in sector: %w", err)})
return ctx.Send(SectorPackingFailed{xerrors.Errorf("expired dealIDs in sector: %w", err)})
default:
return xerrors.Errorf("checkPieces sanity check error: %w", err)
}
@ -112,14 +112,19 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
}
}
expiration, err := m.pcp.Expiration(ctx.Context(), sector.Pieces...)
if err != nil {
return ctx.Send(SectorSealPreCommitFailed{xerrors.Errorf("handlePreCommitting: failed to compute pre-commit expiry: %w", err)})
}
params := &miner.SectorPreCommitInfo{
Expiration: 10000000, // TODO: implement
Expiration: expiration,
SectorNumber: sector.SectorNumber,
RegisteredProof: sector.SectorType,
SealedCID: *sector.CommR,
SealRandEpoch: sector.TicketEpoch,
DealIDs: sector.deals(),
DealIDs: sector.dealIDs(),
}
enc := new(bytes.Buffer)

View File

@ -11,11 +11,30 @@ import (
"github.com/filecoin-project/specs-storage/storage"
)
type Piece struct {
DealID *abi.DealID
// Piece is a tuple of piece and deal info
type PieceWithDealInfo struct {
Piece abi.PieceInfo
DealInfo DealInfo
}
Size abi.UnpaddedPieceSize
CommP cid.Cid
// Piece is a tuple of piece info and optional deal
type Piece struct {
Piece abi.PieceInfo
DealInfo *DealInfo // nil for pieces which do not appear in deals (e.g. filler pieces)
}
// DealInfo is a tuple of deal identity and its schedule
type DealInfo struct {
DealID abi.DealID
DealSchedule DealSchedule
}
// DealSchedule communicates the time interval of a storage deal. The deal must
// appear in a sealed (proven) sector no later than StartEpoch, otherwise it
// is invalid.
type DealSchedule struct {
StartEpoch abi.ChainEpoch
EndEpoch abi.ChainEpoch
}
type Log struct {
@ -36,7 +55,6 @@ type SectorInfo struct {
SectorType abi.RegisteredProof
// Packing
Pieces []Piece
// PreCommit1
@ -70,30 +88,27 @@ type SectorInfo struct {
func (t *SectorInfo) pieceInfos() []abi.PieceInfo {
out := make([]abi.PieceInfo, len(t.Pieces))
for i, piece := range t.Pieces {
out[i] = abi.PieceInfo{
Size: piece.Size.Padded(),
PieceCID: piece.CommP,
}
for i, p := range t.Pieces {
out[i] = p.Piece
}
return out
}
func (t *SectorInfo) deals() []abi.DealID {
func (t *SectorInfo) dealIDs() []abi.DealID {
out := make([]abi.DealID, 0, len(t.Pieces))
for _, piece := range t.Pieces {
if piece.DealID == nil {
for _, p := range t.Pieces {
if p.DealInfo == nil {
continue
}
out = append(out, *piece.DealID)
out = append(out, p.DealInfo.DealID)
}
return out
}
func (t *SectorInfo) existingPieces() []abi.UnpaddedPieceSize {
func (t *SectorInfo) existingPieceSizes() []abi.UnpaddedPieceSize {
out := make([]abi.UnpaddedPieceSize, len(t.Pieces))
for i, piece := range t.Pieces {
out[i] = piece.Size
for i, p := range t.Pieces {
out[i] = p.Piece.Size.Unpadded()
}
return out
}

View File

@ -14,6 +14,14 @@ import (
func TestSectorInfoSelialization(t *testing.T) {
d := abi.DealID(1234)
dealInfo := DealInfo{
DealID: d,
DealSchedule: DealSchedule{
StartEpoch: 0,
EndEpoch: 100,
},
}
dummyCid := builtin.AccountActorCodeID
si := &SectorInfo{
@ -21,9 +29,11 @@ func TestSectorInfoSelialization(t *testing.T) {
SectorNumber: 234,
Nonce: 345,
Pieces: []Piece{{
DealID: &d,
Size: 5,
CommP: dummyCid,
Piece: abi.PieceInfo{
Size: 5,
PieceCID: dummyCid,
},
DealInfo: &dealInfo,
}},
CommD: &dummyCid,
CommR: nil,