diff --git a/.circleci/config.yml b/.circleci/config.yml index 70e435d3b..c8187fd87 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -663,6 +663,12 @@ workflows: - build suite: itest-decode_params target: "./itests/decode_params_test.go" + - test: + name: test-itest-direct_data_onboard + requires: + - build + suite: itest-direct_data_onboard + target: "./itests/direct_data_onboard_test.go" - test: name: test-itest-dup_mpool_messages requires: diff --git a/api/api_full.go b/api/api_full.go index 4ae2ea531..dc219718d 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -20,7 +20,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin/v8/paych" - "github.com/filecoin-project/go-state-types/builtin/v9/market" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" @@ -28,8 +27,10 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -552,6 +553,8 @@ type FullNode interface { // StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if // pending allocation is not found. StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read + // StateGetAllocationIdForPendingDeal is like StateGetAllocationForPendingDeal except it returns the allocation ID + StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) //perm:read // StateGetAllocation returns the allocation for a given address and allocation ID. StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read // StateGetAllocations returns the all the allocations for a given client. @@ -1117,9 +1120,47 @@ type MarketBalance struct { Locked big.Int } +type MarketDealState struct { + SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated + SlashEpoch abi.ChainEpoch // -1 if deal never slashed +} + +func MakeDealState(mds market.DealState) MarketDealState { + return MarketDealState{ + SectorStartEpoch: mds.SectorStartEpoch(), + LastUpdatedEpoch: mds.LastUpdatedEpoch(), + SlashEpoch: mds.SlashEpoch(), + } +} + +type mstate struct { + s MarketDealState +} + +func (m mstate) SectorStartEpoch() abi.ChainEpoch { + return m.s.SectorStartEpoch +} + +func (m mstate) LastUpdatedEpoch() abi.ChainEpoch { + return m.s.LastUpdatedEpoch +} + +func (m mstate) SlashEpoch() abi.ChainEpoch { + return m.s.SlashEpoch +} + +func (m mstate) Equals(o market.DealState) bool { + return market.DealStatesEqual(m, o) +} + +func (m MarketDealState) Iface() market.DealState { + return mstate{m} +} + type MarketDeal struct { Proposal market.DealProposal - State market.DealState + State MarketDealState } type RetrievalOrder struct { diff --git a/api/api_storage.go b/api/api_storage.go index d5b3d5c1d..b24ee2af3 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -24,6 +24,7 @@ import ( builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/storiface" @@ -75,7 +76,7 @@ type StorageMiner interface { // Add piece to an open sector. If no sectors with enough space are open, // either a new sector will be created, or this call will block until more // sectors can be created. - SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d piece.PieceDealInfo) (SectorOffset, error) //perm:admin SectorsUnsealPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin @@ -353,10 +354,21 @@ type SectorLog struct { } type SectorPiece struct { - Piece abi.PieceInfo - DealInfo *PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces) + Piece abi.PieceInfo + + // DealInfo is nil for pieces which do not appear in deals (e.g. filler pieces) + // NOTE: DDO pieces which aren't associated with a market deal and have no + // verified allocation will still have a non-nil DealInfo. + // nil DealInfo indicates that the piece is a filler, and has zero piece commitment. + DealInfo *piece.PieceDealInfo } +// DEPRECATED: Use piece.PieceDealInfo instead +type PieceDealInfo = piece.PieceDealInfo + +// DEPRECATED: Use piece.DealSchedule instead +type DealSchedule = piece.DealSchedule + type SectorInfo struct { SectorID abi.SectorNumber State SectorState @@ -459,28 +471,6 @@ type SectorOffset struct { Offset abi.PaddedPieceSize } -// DealInfo is a tuple of deal identity and its schedule -type PieceDealInfo struct { - // "Old" builtin-market deal info - PublishCid *cid.Cid - DealID abi.DealID - DealProposal *market.DealProposal - - // Common deal info - DealSchedule DealSchedule - - // Best-effort deal asks - KeepUnsealed bool -} - -// DealSchedule communicates the time interval of a storage deal. The deal must -// appear in a sealed (proven) sector no later than StartEpoch, otherwise it -// is invalid. -type DealSchedule struct { - StartEpoch abi.ChainEpoch - EndEpoch abi.ChainEpoch -} - // DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that // we expose through JSON-RPC to avoid clients having to depend on the // dagstore lib. diff --git a/api/cbor_gen.go b/api/cbor_gen.go index fd2cb30b4..ec5b09caf 100644 --- a/api/cbor_gen.go +++ b/api/cbor_gen.go @@ -14,7 +14,8 @@ import ( abi "github.com/filecoin-project/go-state-types/abi" paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" - market "github.com/filecoin-project/go-state-types/builtin/v9/market" + + piece "github.com/filecoin-project/lotus/storage/pipeline/piece" ) var _ = xerrors.Errorf @@ -73,6 +74,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.WaitSentinel (cid.Cid) (struct) @@ -188,9 +190,9 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.WaitSentinel (cid.Cid) (struct) case "WaitSentinel": @@ -403,6 +405,7 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -481,6 +484,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) { } } + } } @@ -546,9 +550,10 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Value[:]); err != nil { + if _, err := cw.Write(t.Value); err != nil { return err } + return nil } @@ -635,7 +640,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) { t.Value = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Value[:]); err != nil { + if _, err := io.ReadFull(cr, t.Value); err != nil { return err } @@ -701,9 +706,10 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Value[:]); err != nil { + if _, err := cw.Write(t.Value); err != nil { return err } + return nil } @@ -790,7 +796,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { t.Value = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Value[:]); err != nil { + if _, err := io.ReadFull(cr, t.Value); err != nil { return err } @@ -802,239 +808,6 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { return nil } -func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write([]byte{165}); err != nil { - return err - } - - // t.DealID (abi.DealID) (uint64) - if len("DealID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealID\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { - return err - } - if _, err := cw.WriteString(string("DealID")); err != nil { - return err - } - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { - return err - } - - // t.PublishCid (cid.Cid) (struct) - if len("PublishCid") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"PublishCid\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { - return err - } - if _, err := cw.WriteString(string("PublishCid")); err != nil { - return err - } - - if t.PublishCid == nil { - if _, err := cw.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { - return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) - } - } - - // t.DealProposal (market.DealProposal) (struct) - if len("DealProposal") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealProposal\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { - return err - } - if _, err := cw.WriteString(string("DealProposal")); err != nil { - return err - } - - if err := t.DealProposal.MarshalCBOR(cw); err != nil { - return err - } - - // t.DealSchedule (api.DealSchedule) (struct) - if len("DealSchedule") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealSchedule\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { - return err - } - if _, err := cw.WriteString(string("DealSchedule")); err != nil { - return err - } - - if err := t.DealSchedule.MarshalCBOR(cw); err != nil { - return err - } - - // t.KeepUnsealed (bool) (bool) - if len("KeepUnsealed") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { - return err - } - if _, err := cw.WriteString(string("KeepUnsealed")); err != nil { - return err - } - - if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { - return err - } - return nil -} - -func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) { - *t = PieceDealInfo{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadString(cr) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.DealID (abi.DealID) (uint64) - case "DealID": - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = abi.DealID(extra) - - } - // t.PublishCid (cid.Cid) (struct) - case "PublishCid": - - { - - b, err := cr.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := cr.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) - } - - t.PublishCid = &c - } - - } - // t.DealProposal (market.DealProposal) (struct) - case "DealProposal": - - { - - b, err := cr.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := cr.UnreadByte(); err != nil { - return err - } - t.DealProposal = new(market.DealProposal) - if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) - } - } - - } - // t.DealSchedule (api.DealSchedule) (struct) - case "DealSchedule": - - { - - if err := t.DealSchedule.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) - } - - } - // t.KeepUnsealed (bool) (bool) - case "KeepUnsealed": - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.KeepUnsealed = false - case 21: - t.KeepUnsealed = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - - default: - // Field doesn't exist on this type, so ignore it - cbg.ScanForLinks(r, func(cid.Cid) {}) - } - } - - return nil -} func (t *SectorPiece) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) @@ -1063,7 +836,7 @@ func (t *SectorPiece) MarshalCBOR(w io.Writer) error { return err } - // t.DealInfo (api.PieceDealInfo) (struct) + // t.DealInfo (piece.PieceDealInfo) (struct) if len("DealInfo") > cbg.MaxLength { return xerrors.Errorf("Value in field \"DealInfo\" was too long") } @@ -1129,7 +902,7 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) { } } - // t.DealInfo (api.PieceDealInfo) (struct) + // t.DealInfo (piece.PieceDealInfo) (struct) case "DealInfo": { @@ -1142,7 +915,7 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) { if err := cr.UnreadByte(); err != nil { return err } - t.DealInfo = new(PieceDealInfo) + t.DealInfo = new(piece.PieceDealInfo) if err := t.DealInfo.UnmarshalCBOR(cr); err != nil { return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err) } @@ -1158,160 +931,3 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) { return nil } -func (t *DealSchedule) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write([]byte{162}); err != nil { - return err - } - - // t.EndEpoch (abi.ChainEpoch) (int64) - if len("EndEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"EndEpoch\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { - return err - } - if _, err := cw.WriteString(string("EndEpoch")); err != nil { - return err - } - - if t.EndEpoch >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { - return err - } - } - - // t.StartEpoch (abi.ChainEpoch) (int64) - if len("StartEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"StartEpoch\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { - return err - } - if _, err := cw.WriteString(string("StartEpoch")); err != nil { - return err - } - - if t.StartEpoch >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { - return err - } - } - return nil -} - -func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) { - *t = DealSchedule{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadString(cr) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.EndEpoch (abi.ChainEpoch) (int64) - case "EndEpoch": - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative overflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EndEpoch = abi.ChainEpoch(extraI) - } - // t.StartEpoch (abi.ChainEpoch) (int64) - case "StartEpoch": - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative overflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.StartEpoch = abi.ChainEpoch(extraI) - } - - default: - // Field doesn't exist on this type, so ignore it - cbg.ScanForLinks(r, func(cid.Cid) {}) - } - } - - return nil -} diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 5a05c8d0e..e4574d157 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -40,6 +40,7 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -406,6 +407,13 @@ func init() { percent := types.Percent(123) addExample(percent) addExample(&percent) + + addExample(&miner.PieceActivationManifest{ + CID: c, + Size: 2032, + VerifiedAllocationKey: nil, + Notify: nil, + }) } func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) { diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index 92b719550..d141123e0 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -3218,6 +3218,21 @@ func (mr *MockFullNodeMockRecorder) StateGetAllocationForPendingDeal(arg0, arg1, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2) } +// StateGetAllocationIdForPendingDeal mocks base method. +func (m *MockFullNode) StateGetAllocationIdForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (verifreg.AllocationId, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocationIdForPendingDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(verifreg.AllocationId) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocationIdForPendingDeal indicates an expected call of StateGetAllocationIdForPendingDeal. +func (mr *MockFullNodeMockRecorder) StateGetAllocationIdForPendingDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationIdForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationIdForPendingDeal), arg0, arg1, arg2) +} + // StateGetAllocations mocks base method. func (m *MockFullNode) StateGetAllocations(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { m.ctrl.T.Helper() diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 6627a5afe..d89082b62 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -35,11 +35,13 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/journal/alerting" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/sealtasks" @@ -487,6 +489,8 @@ type FullNodeMethods struct { StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` + StateGetAllocationIdForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) `perm:"read"` + StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"` StateGetBeaconEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` @@ -1085,7 +1089,7 @@ type StorageMinerMethods struct { SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` - SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"` + SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) `perm:"admin"` SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` @@ -3423,6 +3427,17 @@ func (s *FullNodeStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 a return nil, ErrNotSupported } +func (s *FullNodeStruct) StateGetAllocationIdForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) { + if s.Internal.StateGetAllocationIdForPendingDeal == nil { + return *new(verifreg.AllocationId), ErrNotSupported + } + return s.Internal.StateGetAllocationIdForPendingDeal(p0, p1, p2) +} + +func (s *FullNodeStub) StateGetAllocationIdForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) { + return *new(verifreg.AllocationId), ErrNotSupported +} + func (s *FullNodeStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { if s.Internal.StateGetAllocations == nil { return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported @@ -6426,14 +6441,14 @@ func (s *StorageMinerStub) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorN return ErrNotSupported } -func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) { +func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) { if s.Internal.SectorAddPieceToAny == nil { return *new(SectorOffset), ErrNotSupported } return s.Internal.SectorAddPieceToAny(p0, p1, p2, p3) } -func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) { +func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) { return *new(SectorOffset), ErrNotSupported } diff --git a/blockstore/cbor_gen.go b/blockstore/cbor_gen.go index 221f13676..063aeccae 100644 --- a/blockstore/cbor_gen.go +++ b/blockstore/cbor_gen.go @@ -76,9 +76,10 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(v[:]); err != nil { + if _, err := cw.Write(v); err != nil { return err } + } return nil } @@ -171,9 +172,9 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { t.Cid[i] = c } + } } - // t.Data ([][]uint8) (slice) maj, extra, err = cr.ReadHeader() @@ -218,12 +219,12 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { t.Data[i] = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Data[i][:]); err != nil { + if _, err := io.ReadFull(cr, t.Data[i]); err != nil { return err } + } } - return nil } @@ -261,9 +262,10 @@ func (t *NetRpcResp) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Data[:]); err != nil { + if _, err := cw.Write(t.Data); err != nil { return err } + return nil } @@ -335,9 +337,10 @@ func (t *NetRpcResp) UnmarshalCBOR(r io.Reader) (err error) { t.Data = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Data[:]); err != nil { + if _, err := io.ReadFull(cr, t.Data); err != nil { return err } + return nil } diff --git a/build/actors/v13.tar.zst b/build/actors/v13.tar.zst index 1e3220f6b..16cad76b7 100644 Binary files a/build/actors/v13.tar.zst and b/build/actors/v13.tar.zst differ diff --git a/build/builtin_actors_gen.go b/build/builtin_actors_gen.go index 64e622c96..aa2f8965e 100644 --- a/build/builtin_actors_gen.go +++ b/build/builtin_actors_gen.go @@ -118,27 +118,27 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacedv2irkql7nil3w5v3ohqq3e54w62pxeoppjmaktzokolaaoh5ksu"), }, }, { - Network: "butterflynet", - Version: 13, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacectxvbk77ntedhztd6sszp2btrtvsmy7lp2ypnrk6yl74zb34t2cq"), + Network: "butterflynet", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzacea35zd7c7wh5d722l7chd233qqrulrtvm7kedbaqydn4tyvlxjf64"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzacebp7anjdtg2sohyt6lromx4xs7nujtwdfcsffnptphaayabx7ysxs"), - "cron": MustParseCid("bafk2bzacecu2y3awtemmglpkroiglulc2fj3gpdn6eazdqr6avcautiaighrg"), - "datacap": MustParseCid("bafk2bzacebbh5aynu3v3fluqqrcdsphleodoig42xkid2ccwdnff3avhbdop4"), - "eam": MustParseCid("bafk2bzacebzwt4v4hqoltiblhliwrnttxpr2dggbu3wsrvq4pwzisp7idu5w4"), - "ethaccount": MustParseCid("bafk2bzaceb5f6vgjkl7ic6ry5sjspqm2iij6qlcdovwi3haodb7wn37pgebii"), - "evm": MustParseCid("bafk2bzacebygt6zh6p52rkg2ugehm4k5yuu6f56i2pu6ywrmjez4n4zsje4p4"), - "init": MustParseCid("bafk2bzaceagyf3pwsthod7klfi25ow2zf2i5isfrrgr5ua3lvkgfojalrdbhw"), - "multisig": MustParseCid("bafk2bzacedgfo5mw2zqjwi37lah27sfxj4cw2abylgtxf3ucep4dyhgnppmqe"), - "paymentchannel": MustParseCid("bafk2bzacebm37tgu52cgzmiln6iip6etfmq73fd3qqz2j5gxlhtvachs7kw4c"), + "account": MustParseCid("bafk2bzacea4b7nlublru4zcbc4uaasukdyyrkpyvekgqg6fnkomai6imptryi"), + "cron": MustParseCid("bafk2bzacecffhtatvxepmd6bkrrx7zv6lnp6for56rh6aczop7hmnty35yyxi"), + "datacap": MustParseCid("bafk2bzaced47kr7ev7ykoo77d5a43vup7f575wnty6g5fwst57az4ynpbd5ga"), + "eam": MustParseCid("bafk2bzaceb6crz2c7vlsxwsejgm5dw7urzp3b657ubdfrf2ewbkit7um3oorc"), + "ethaccount": MustParseCid("bafk2bzaceaz2o5uyfqauablcwz7nbsskbjemijyve6qygdbc7vs22c2evtho2"), + "evm": MustParseCid("bafk2bzaceat5ykihn7fchzojn7abgvuciuh5z4qzywtv22ryokryfxxidd2ee"), + "init": MustParseCid("bafk2bzacecmr3pqvtb7uqhbkxt47fb2kouriu4xy56pd5bewkfxh4cghhvboy"), + "multisig": MustParseCid("bafk2bzacebyulvxhhzf7rpqfekizm755gsny5d65kfoyrqev37nbvneyj5bua"), + "paymentchannel": MustParseCid("bafk2bzacedyqv2skdlodckiysomi32po2jyqnrbmobliaqwlqfqgf6cmj7vhe"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzacedebvitdsztwebi44t5es4ls3p3hor252igzawr3s6uznmbvzh2ou"), - "storagemarket": MustParseCid("bafk2bzaceb2tdeqtt2eqpzeb3gezuchb7g7uzbd52bgvcdt6bg3ckq7oisb74"), - "storageminer": MustParseCid("bafk2bzaceb62clldtod2jimnri5k2koxttf6vqtlsvkjhnwduzs7sgsoakglw"), - "storagepower": MustParseCid("bafk2bzacedxvlj5xmhytdjrjqyonz37duvxb2ioyzk75c27yypkqalxuh3xh6"), - "system": MustParseCid("bafk2bzacec3vwj2chzaram3iqupkbfiein5h2l5qiltlrngbju2vg5umelclm"), - "verifiedregistry": MustParseCid("bafk2bzacedv2irkql7nil3w5v3ohqq3e54w62pxeoppjmaktzokolaaoh5ksu"), + "reward": MustParseCid("bafk2bzacebd5c5iwr4sxt74dv366tgbimp56grryxhz4aq6hr34evc43skjla"), + "storagemarket": MustParseCid("bafk2bzacebszj4mzar4esczavxhfu4exjyem7rnitqhahd7olwcnuynhzymno"), + "storageminer": MustParseCid("bafk2bzaceaxzedzy7ldeuqjznsdtvxfs7vzszrw5zaqh6riig53xsylow46z4"), + "storagepower": MustParseCid("bafk2bzaceazwjajfjm74gjpqz43hyshrnwzc456zvt7k45e6n2l4hkm45blps"), + "system": MustParseCid("bafk2bzaced4jahpma5nkeu7dvfznn5s4l24opwvxt3773shxoiqezpp3aode6"), + "verifiedregistry": MustParseCid("bafk2bzaced7ocpmvjrvdcxbr7elxswfsgkvajgcjhu7a3eion5u64t3gtjrc6"), }, }, { Network: "calibrationnet", @@ -247,27 +247,27 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo"), }, }, { - Network: "calibrationnet", - Version: 13, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacednzb3pkrfnbfhmoqtb3bc6dgvxszpqklf3qcc7qzcage4ewzxsca"), + Network: "calibrationnet", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzaceatm7ux4qfbfq4pyuybvoid27d4yrwvxmmz5fv2ojla3tj3esbdhq"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzacechwwxdqvggkdylm37zldjsra2ivkdzwp7fee56bzxbzs544wv6u6"), - "cron": MustParseCid("bafk2bzacec4gdxxkqwxqqodsv6ug5dmdbqdfqwyqfek3yhxc2wweh5psxaeq6"), - "datacap": MustParseCid("bafk2bzacecq5ppfskxgv3iea3jarsix6jdduuhwsn4fbvngtbmzelzmlygorm"), - "eam": MustParseCid("bafk2bzacecb6cnwftvavpph4p34zs4psuy5xvbrhf7vszkva4npw6mw3c42xe"), - "ethaccount": MustParseCid("bafk2bzaceajmc3y3sedsqymfla3dzzqzmbu5kmr2iskm26ga2u34ll5fpztfw"), - "evm": MustParseCid("bafk2bzaced4sozr7m6rzcgpobzeiupghthfw6afumysu3oz6bxxirv74uo3vw"), - "init": MustParseCid("bafk2bzaceaewh7b6zl2egclm7fqzx2lsqr57i75lb6cj43ndoa4mal3k5ld3m"), - "multisig": MustParseCid("bafk2bzacednkwcpw5yzxjceoaliajgupzj6iqxe7ks2ll3unspbprbo5f2now"), - "paymentchannel": MustParseCid("bafk2bzacebaxhk4itfiuvbftg7kz5zxugqnvdgerobitjq4vl6q4orcwk6wqg"), + "account": MustParseCid("bafk2bzacedx64yz4qxxvm6jmv27mza3srwvpipnx6s5okyihfstrycg6ptceu"), + "cron": MustParseCid("bafk2bzacecx5r7cvmch4c5pxfep7i3sxqsp6xeo7ndwvm7liclrttx6tfe7u2"), + "datacap": MustParseCid("bafk2bzacebfscmvgjpdna6wasyxcb4e2y2vudas6uqwjhclkcurt3ncwyrd7e"), + "eam": MustParseCid("bafk2bzacedulgctm6ewvh3u3nefbla6vws5cqcmsgqj6y6vu5qdzlgtb7n6lo"), + "ethaccount": MustParseCid("bafk2bzaceby4olugmdq4kferims77tb2blw43gwco55rmf5gg4g4vrz4iwmne"), + "evm": MustParseCid("bafk2bzaceab2zp3tlpoxymg63wgmso3qavbcobli422xrg5fw3vev673t6kcg"), + "init": MustParseCid("bafk2bzaceb3btlbmoacsfxtyrwwraqbsgywnbcyexc4o3htxwiksehwyc3jbm"), + "multisig": MustParseCid("bafk2bzaceayekkjqkdhnx4ytkomdxtgmv7h5xdfdkj5srryleciihguzbfsj2"), + "paymentchannel": MustParseCid("bafk2bzacecyq6fajtnujwsanv4mzg35vfxorvyu54mh2jhdc6zjxmpce57bh6"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzacedra77pcglf7vdca2itcaa4vd6xrxynxmgfgdjdxqxfwqyhtoxehy"), - "storagemarket": MustParseCid("bafk2bzacea7g46y7xxu2zjq2h75x6mmx3utz2uxnlvnwi6tzpsvulna3bmiva"), - "storageminer": MustParseCid("bafk2bzaceb7qzqsi5uyxe4o5iuasi47l2hnznvmqr2eu4pl3qscvarjqlnuxo"), - "storagepower": MustParseCid("bafk2bzacedd3ka44k7d46ckbinjhv3diyuu2epgbyvhqqyjkc64qlrg3wlgzi"), - "system": MustParseCid("bafk2bzacecioupndtcnyw6iq2hbrxag3aufvczlv5nobnfbkbywqzcyfaa376"), - "verifiedregistry": MustParseCid("bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo"), + "reward": MustParseCid("bafk2bzaceazjfvff2rrw3eztgoqz6bjf2qd4343bxlnar56xnr5ermdhwpn4w"), + "storagemarket": MustParseCid("bafk2bzacedhuaydx363gmzlipccgzf5cudvvnrvs7i2epyjd6wk7qrsaxpvr2"), + "storageminer": MustParseCid("bafk2bzacedj2ojiavonctzlzhqju5vmwjd4cr22kwp4l6d2lolru5tbtfm5q4"), + "storagepower": MustParseCid("bafk2bzacedtwgbxjvcrmhopxkqllt7g6b4v7rzih3otqe2ebjso4t5zqn46oc"), + "system": MustParseCid("bafk2bzaceaaun6ou6fuen4br5ctadl4hkhmojy6cuyzocndu43moh74gtom24"), + "verifiedregistry": MustParseCid("bafk2bzaceclvfei3lvzi4gf2uymluiunla656uaeeaicgd54hsr7yznd26ddw"), }, }, { Network: "caterpillarnet", @@ -385,27 +385,27 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacecduww5pirr7dvaijjijw4gf6ygf7vipgxh4scvv6vseo46gueb46"), }, }, { - Network: "caterpillarnet", - Version: 13, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacebxiub6qsy67asvl5cx33x5vjbuqinalmf3xtnbmokxmmklzdkvei"), + Network: "caterpillarnet", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzaceanynawqdwb5bjrlmjpk7lb5cvrxfw3gafzsw4h4yc5257pzckwzy"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzacecereuhejfvodut5357cai4lmhsyr7uenhcxvmw6jpmhe6auuly32"), - "cron": MustParseCid("bafk2bzacebo2whgy6jla4jsf5j4ovlqm2e4eepedlpw5wadas33yxmunis4b4"), - "datacap": MustParseCid("bafk2bzacecjjncl7ftgj4mrzxxfxld74pt3pyfrxmcru7a5auab25b3aoixm6"), - "eam": MustParseCid("bafk2bzacebyvawfzoxy7k4yxrj5nd3amg4rcopmnslxdwpzumfhsz5ezk4sws"), - "ethaccount": MustParseCid("bafk2bzaceaccs76uc6osvb2iy6w2pumqei3wdjtxq7rgtsotobncmqoi7kzcg"), - "evm": MustParseCid("bafk2bzaceawxgjzjkhbqwj36wzxeqbtngdh6y2tp4wsi27k7tbg2ujhw5rsjg"), - "init": MustParseCid("bafk2bzacedws5od7o6ktqyo2hudmipxuubsv2lwxz45xxjn2zguze72t6zoik"), - "multisig": MustParseCid("bafk2bzacecb4wk6n4lrmml3tssn6cszd4dc7ttux3kzjatrawhg4o6ovrng6w"), - "paymentchannel": MustParseCid("bafk2bzacea3eb556mkjvosfbqfbyfg6dgu52rfnuctwzjy3b2bh2azredxzbo"), + "account": MustParseCid("bafk2bzacecp4dlsadg5fv46xxif2oulrtzrfhjuly7q4vy3dpmgigbpqlg2z6"), + "cron": MustParseCid("bafk2bzaceazze5uouwmkxzxgsj3e736ylwmxovfucpce3xxhy4gbwksih57sk"), + "datacap": MustParseCid("bafk2bzacec3bfgl37yzz26vhlr42z7imq5k2vitu2tqmzycuvk5m2ghlwgb2c"), + "eam": MustParseCid("bafk2bzacedut6dy4cprz6dd4eema3cezinv3mwhhjp22d3tx27atfglmo2pk2"), + "ethaccount": MustParseCid("bafk2bzacea7gfdcbhiu2etdsksb4bjeo3rlf3fw6ezriiy2lbkbhou6cpyd74"), + "evm": MustParseCid("bafk2bzaceaniw7ampazv2b25f547k57zkbv2kdvmh3qev4huynu5l4gini4ie"), + "init": MustParseCid("bafk2bzacechtqqpu5g47xjkauwe7oa4vdz4oyuuqrevxssdsdcr2r6chopni4"), + "multisig": MustParseCid("bafk2bzacea56jz25vxku77uvkaohtenfqpp3k4eay7ztkzvjusgpg3icevkv2"), + "paymentchannel": MustParseCid("bafk2bzacebsk5kddkpzi5u3pjohldtmimplwtvz32bymc5tlzeqickvbrw46s"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzaceb2x5zgkrho373l3ippp6krs7brugssg6hj2tib22xmqjzdm2o25a"), - "storagemarket": MustParseCid("bafk2bzaced5j6drzmsebpxbf2dtptrc5tyidlbftdljqxavxqb57s2qpbvdek"), - "storageminer": MustParseCid("bafk2bzaceckgusfenkczxolfczvnygnuhxbou5to2skwwngbkihla7hgdv4yy"), - "storagepower": MustParseCid("bafk2bzaceagp6ilkltsltwii66nz6a4zen4qtfk7rdkvdv3gzq7fbv4ivox3u"), - "system": MustParseCid("bafk2bzacedye5j5uxox7knb6zlnhseaadztyav76mjbyk5qslhhbpiy5cdtt2"), - "verifiedregistry": MustParseCid("bafk2bzacecduww5pirr7dvaijjijw4gf6ygf7vipgxh4scvv6vseo46gueb46"), + "reward": MustParseCid("bafk2bzacec4s6jfppt4yw5eiecdwnmdylwaxnss26rnaiw2s5iswbcshhjjok"), + "storagemarket": MustParseCid("bafk2bzacearczb3o3qmbb47f6qkppx663gilnugz7yzchepm7mmlq4o7z4xhc"), + "storageminer": MustParseCid("bafk2bzaceacc2sh2pphzzjfdpvtafybx6mq5zods2av6bx62jly74fjja7ric"), + "storagepower": MustParseCid("bafk2bzaceden4chi3uhz4dw3yb4zljccdjhhh6pmrhm6wsk3xicjdytd4egjy"), + "system": MustParseCid("bafk2bzacecbr3eqyzyjaw7vrsg4i6i4okdn4go3q53twcm7hhsnezur43326u"), + "verifiedregistry": MustParseCid("bafk2bzacece6flukbmykezzig6xqrsnnmkra6qre3ybfh2tukgltsqyh6scz6"), }, }, { Network: "devnet", @@ -514,27 +514,27 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacec37mddea65nvh4htsagtryfa3sq6i67utcupslyhzbhjhoy6hopa"), }, }, { - Network: "devnet", - Version: 13, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzaceasjdukhhyjbegpli247vbf5h64f7uvxhhebdihuqsj2mwisdwa6o"), + Network: "devnet", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzacebjxqdpb2b5vdbn2tqgixzftljyxzchdiazscxfhjf36padliufmu"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzacedki4apynvdxxuoigmqkgaktgy2erjftoxqxqaklnelgveyaqknfu"), - "cron": MustParseCid("bafk2bzacebjpczf7qtcisy3zdp3sqoohxe75tgupmdo5dr26vh7orzrsjn3b2"), - "datacap": MustParseCid("bafk2bzacecz4esatk7gizdc7yvl6soigkelhix7izbc75q6eqtb7gjzavpcqc"), - "eam": MustParseCid("bafk2bzacebhtpd5mxfyovi7fgsfj62nhtmh4t5guob4sgq73ymgsk7473ltig"), - "ethaccount": MustParseCid("bafk2bzacebvdbbw5ag4qnxd7cif5mtakrw4wzv63diwl7awta5plaidfay4vg"), - "evm": MustParseCid("bafk2bzacebb7vrhprnshn52bzfmypjdpcrcfecapk232a6gapk3kghu2mp67q"), - "init": MustParseCid("bafk2bzaceaw4iouukgqxmwukfpt3sakdvsu75ftjvw47swnwtdftz5oszbt4w"), - "multisig": MustParseCid("bafk2bzaceahyjwf6re4mnuwhopglo3qzh6aboluboncpijm7vuiz3u4bkazho"), - "paymentchannel": MustParseCid("bafk2bzaceaupjw3djghaqw3g3hd4tw7uuas3njkszgzx2fhmgqh5eh4e6q2by"), + "account": MustParseCid("bafk2bzacedt7que2jrzjuxdwq62cnynvmtaatthxa7puucnb26o5h4q3k5aqi"), + "cron": MustParseCid("bafk2bzacedgu3nnjns2k5rsd3bab3nswbzyzsmtdvnbphqh5limxywjewt2l4"), + "datacap": MustParseCid("bafk2bzacebo727zrlatdyavsmcoo7wtplgtfohjtay4p2uz6b2pr547rbdlu2"), + "eam": MustParseCid("bafk2bzacebbuqb4l7nnjvon5o44pej6psfre4gxoiw4rn2fetpiyrtwbrnfuo"), + "ethaccount": MustParseCid("bafk2bzacearbfnsonhgevd4qzzezyx7sips5hxtbhpiugc3nogrw2uvwurgou"), + "evm": MustParseCid("bafk2bzacec3dcc3hj6u6bhayc7ndlufqabev7wzyl6oejt76tsqtedqhbjn4m"), + "init": MustParseCid("bafk2bzaceakfxom4oy4yreznnshxj4mhazjicisqbw5k6owsmz2pm7uozxg46"), + "multisig": MustParseCid("bafk2bzacebg45zhuk3litlqhmuitka7qznplhv4q5xpfxxhzyqkkwj7erbyyc"), + "paymentchannel": MustParseCid("bafk2bzaceaxidnvofdrvn4aan22jmqs4jg2h4cs5zxvr4lmmzfkgnxupqg5jw"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzacebzso6xkjxdscbpncw7el2d4hap6lfkgwqzrbc76lzp33vkwk6obc"), - "storagemarket": MustParseCid("bafk2bzacebzg74vyk3gzbhnz4zviwvxblyar574mtd6ayognmsvlkriejmunu"), - "storageminer": MustParseCid("bafk2bzacecs262232b3awcrilyzpdketeayyqzzwgoavtxilgjvayrz55ovk4"), - "storagepower": MustParseCid("bafk2bzacebbtj2m2ajawfuzxqz5nmdep7xevjo2qfjqa5tx3vr5m6qojolya4"), - "system": MustParseCid("bafk2bzacecnau5wddulbsvwn75tc3w75jrlvkybgrlxs4ngonqab6xq3eowvg"), - "verifiedregistry": MustParseCid("bafk2bzacec37mddea65nvh4htsagtryfa3sq6i67utcupslyhzbhjhoy6hopa"), + "reward": MustParseCid("bafk2bzacec66nz7fyiomevcw4c2zestvukrzyzbc7ue2xkipovtq5caxp7nke"), + "storagemarket": MustParseCid("bafk2bzacecspsjkpg4wvewee2mbrra7jd53r5mwrj2z6kg32juxj3por5zmd6"), + "storageminer": MustParseCid("bafk2bzacebux2uea2l4xkl4g3m7zjvy3pofxukzgejdqxnfs2tzatruqdw22k"), + "storagepower": MustParseCid("bafk2bzacebu4aq6eakjxinajnqxlsi3z7cuwky2aknpt45bbvwgz6skf2ufdi"), + "system": MustParseCid("bafk2bzacedgxsdxsgs5wpc4kjmbu3of6detontey7mvqprkxeiibh3lnkr2t2"), + "verifiedregistry": MustParseCid("bafk2bzacedrwcsij2o3qrxxw4rxia47u6bbz3ulojh6czh5stnzqea5hitk4w"), }, }, { Network: "hyperspace", @@ -666,27 +666,27 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q"), }, }, { - Network: "mainnet", - Version: 13, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzaceapkgfggvxyllnmuogtwasmsv5qi2qzhc2aybockd6kag2g5lzaio"), + Network: "mainnet", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzacecpuup2nsfmmrrkm4fb4bvm4gcmhvoslhzmnz36w5teecs2mrvhjg"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzaceboftg75mdiba7xbo2i3uvgtca4brhnr3u5ptihonixgpnrvhpxoa"), - "cron": MustParseCid("bafk2bzacechxjkfe2cehx4s7skj3wzfpzf7zolds64khrrrs66bhazsemktls"), - "datacap": MustParseCid("bafk2bzacebpiwb2ml4qbnnaayxumtk43ryhc63exdgnhivy3hwgmzemawsmpq"), - "eam": MustParseCid("bafk2bzaceb3elj4hfbbjp7g5bptc7su7mptszl4nlqfedilxvstjo5ungm6oe"), - "ethaccount": MustParseCid("bafk2bzaceb4gkau2vgsijcxpfuq33bd7w3efr2rrhxrwiacjmns2ntdiamswq"), - "evm": MustParseCid("bafk2bzacecmnyfiwb52tkbwmm2dsd7ysi3nvuxl3lmspy7pl26wxj4zj7w4wi"), - "init": MustParseCid("bafk2bzacebllyegx5r6lggf6ymyetbp7amacwpuxakhtjvjtvoy2bfkzk3vms"), - "multisig": MustParseCid("bafk2bzacecw5lyp3n3t67xdwrmo36h4z7afc3lobmmr6wg55w6yjzg5jhmh42"), - "paymentchannel": MustParseCid("bafk2bzacectv4cm47bnhga5febf3lo3fq47g72kmmp2xd5s6tcxz7hiqdywa4"), + "account": MustParseCid("bafk2bzacebjlvd6lhdoed6osyzpbof7nogfahudipqlmdfvm54hzjeqqzbjry"), + "cron": MustParseCid("bafk2bzacedjx64kfo4hhqgjb74insrvqiwuyrm7dhsoamytit2uv4mlchzflg"), + "datacap": MustParseCid("bafk2bzacedvticyjylvsz7uz4wnm4s4alvfpy4tshhavgy3ajf52tp4cnz3qk"), + "eam": MustParseCid("bafk2bzaced7zx4brdu5unp4em57x67byv44u6k2zd4cyli3chna5ap4f7aeay"), + "ethaccount": MustParseCid("bafk2bzacedpnqhsgtcn7iucuhsiymuovomf3lgxwakq5pyps2psby74f2h7fk"), + "evm": MustParseCid("bafk2bzaceahhnneu4u4e4oy6wqsj6edwdamf3r35wscl3mo6rmqdimprj7hso"), + "init": MustParseCid("bafk2bzacebve7ce3pv553c4wu4bzxwq5d4ddh3mbe2vlfru7cmvg4g4umgrxo"), + "multisig": MustParseCid("bafk2bzacebvlflha3vrkx4rkogkvhjgkxdxknalmlicatakm2dmp2ev6wfym4"), + "paymentchannel": MustParseCid("bafk2bzacebnkmtwvaqmdnf65jhbcvre3dzqkgdsal36xi4vclwkqoauuv5d5o"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzacealqnxn5lwzwexd6reav4dppypquklx2ujlnvaxiqk2tzstyvkp5u"), - "storagemarket": MustParseCid("bafk2bzacedylkg5am446lcuih4voyzdn4yjeqfsxfzh5b6mcuhx4mok5ph5c4"), - "storageminer": MustParseCid("bafk2bzacedo75pabe4i2l3hvhtsjmijrcytd2y76xwe573uku25fi7sugqld6"), - "storagepower": MustParseCid("bafk2bzacecsij5tpfzjpfuckxvccv2p3bdqjklkrfyyoei6lx5dyj5j4fvjm6"), - "system": MustParseCid("bafk2bzacebfqrja2hip7esf4eafxjmu6xcogoqu5xxtgdg7xa5szgvvdguchu"), - "verifiedregistry": MustParseCid("bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q"), + "reward": MustParseCid("bafk2bzacebwuj5uyu4efnpcp57hdxygps7dqoz42f22bxl6mcd25colierht4"), + "storagemarket": MustParseCid("bafk2bzacebgtlb2l4w6p2hlizh2f4gmu3mbc7zxvmn54fz4wyysafpiu42i3q"), + "storageminer": MustParseCid("bafk2bzaceaira6fenanp3r37ymowidzugpikl27uaf3azcb3q5gdmcbggqhu6"), + "storagepower": MustParseCid("bafk2bzacecer7i5uosgo2mlbczr7t7xnn7bfel7ry3rn5hs3ewqkgzy2ooj5w"), + "system": MustParseCid("bafk2bzacebz4q7shemeggjiyssjgnpwlptrnom2imclsokslwrxm2xof3evgg"), + "verifiedregistry": MustParseCid("bafk2bzacebckfv6swkac6r7jsgvvpameuxd6gu57slwatl6og3rddfbjgfdb6"), }, }, { Network: "testing", @@ -795,27 +795,27 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), }, }, { - Network: "testing", - Version: 13, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzaceaaxd6ytavsek5bi5soqo7qamezuqfyfjy42es2clpbzu3pwzcmye"), + Network: "testing", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzaceczgecr6o35gvyq7vkehtzxvdx3le2p6phfknx5asvqvjblkxcvk2"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"), - "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"), - "datacap": MustParseCid("bafk2bzaceduhmqcyailiwdupt2ottfzh5hcrjoyeyiaipf3idk3mu7y3uz2mc"), - "eam": MustParseCid("bafk2bzaceb2yzzw6dcmcmhnt3mqnm4kah66f23pc4escnto3vwa552t6ctr7i"), - "ethaccount": MustParseCid("bafk2bzacebwkvvbmttkcjjlicp4ineozc52i5sc6d46pcoq6lzzs2p5i2youa"), - "evm": MustParseCid("bafk2bzacedetwacs6wmoksxwjlbpp4442uav7fd3pagadejm2cph7ucym7eck"), - "init": MustParseCid("bafk2bzacedhpoycn4sz7dragmbo5yqjspqriydxhplqdeguaqck2hmq5hgwqg"), - "multisig": MustParseCid("bafk2bzaceacc3m23yvnpzoeekstqtr2acutfv4zvsgncorjdrsucymjohzxs4"), - "paymentchannel": MustParseCid("bafk2bzaceac6i76vfexefqf6qgebkhkf2cb4g664d5nmfh2dric5spgykevd2"), + "account": MustParseCid("bafk2bzacebr4e3gbvs7baqqo7jhdeucgjr4iuv3hei3535qbnyzonju3hbmgm"), + "cron": MustParseCid("bafk2bzacebvak2gdlg7wyhtepptenxduz76o23lfmydmosqfhatsye6es47hy"), + "datacap": MustParseCid("bafk2bzacedtv462jnotkphl5nobytqcuuojldvp47lic6kovrl5r7pygxehsa"), + "eam": MustParseCid("bafk2bzacecam3wdb2johbcziwl476fjg57cf4fbhj5atf3jivrhflfrtffanc"), + "ethaccount": MustParseCid("bafk2bzacedhipermy6nzqj7qfg77xv5zmmyz7hxfe2phhi6qhr7kxn52aaitk"), + "evm": MustParseCid("bafk2bzacedhyremrizglbzoemwtgunmege4nkbus4nbfhncyzl5cbbzvlcx36"), + "init": MustParseCid("bafk2bzaced2tmopdpbxscbimmhzzuyjx2jjrpz43m76f6tjrgklrc75lnmfee"), + "multisig": MustParseCid("bafk2bzacedm7t5lufrqxq7bkahkbzkismgappjck7ojzffwaec3tfvslq6kog"), + "paymentchannel": MustParseCid("bafk2bzaceavfuvy65lmjt5iqctuqoyekgpcetjwx4nhyymegonpqqrz533nxk"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzaceaajyncjxcrl7wbb6ukzkueyqz4uyekxpmtn4mpndkf7ksmggopzq"), - "storagemarket": MustParseCid("bafk2bzaced6cexdcinwjhtfvuxgkxukiejp3goylaxtvhqfd24rs5z7g2z7dm"), - "storageminer": MustParseCid("bafk2bzacecvkbsjhufq2zr2dojohukdnql3gkqzdkmtp2hxvn5kczxp3tu6ko"), - "storagepower": MustParseCid("bafk2bzacedexrf5qplrrl5xzijfrthjdqwodfs5e6zj5kpztc7qnywbqdyiii"), - "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), - "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), + "reward": MustParseCid("bafk2bzaceadkjfbj24mt5uqlsmsiw5c6cazkrz74rex5kzsmrzls2wrvsvqnk"), + "storagemarket": MustParseCid("bafk2bzacebeche2v3yd6xukgieypshyd64k2xilrcg4pamkfpvvzpdux6idsk"), + "storageminer": MustParseCid("bafk2bzacedo3fbjl3ez57hgeyppf7oyr6frze4ykqkbvuuab7cf6szzzpumeo"), + "storagepower": MustParseCid("bafk2bzacecv2eon42ikpzg4q7c4oc3ka3fus6aksmigxoiet7wguex723jolo"), + "system": MustParseCid("bafk2bzacecxmieemzxuma4lwc6ckufdirdvuvq4p2j5i6jrhbffgmvykkfnni"), + "verifiedregistry": MustParseCid("bafk2bzaceazq3c73cdvozvi7t247cjgylithdy7znwjkin7duasxsqjy5btxk"), }, }, { Network: "testing-fake-proofs", @@ -924,26 +924,26 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), }, }, { - Network: "testing-fake-proofs", - Version: 13, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacecver4l5d6jiuzubhrtcxjjfdx6jnxbmyp4bselol2atgkhz3e3um"), + Network: "testing-fake-proofs", + Version: 13, + + ManifestCid: MustParseCid("bafy2bzacec2zeunv5b4fiyah4uoawb5spkprwvku2otdoiw6pcnz5dkyekcdw"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"), - "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"), - "datacap": MustParseCid("bafk2bzaceduhmqcyailiwdupt2ottfzh5hcrjoyeyiaipf3idk3mu7y3uz2mc"), - "eam": MustParseCid("bafk2bzaceb2yzzw6dcmcmhnt3mqnm4kah66f23pc4escnto3vwa552t6ctr7i"), - "ethaccount": MustParseCid("bafk2bzacebwkvvbmttkcjjlicp4ineozc52i5sc6d46pcoq6lzzs2p5i2youa"), - "evm": MustParseCid("bafk2bzacedetwacs6wmoksxwjlbpp4442uav7fd3pagadejm2cph7ucym7eck"), - "init": MustParseCid("bafk2bzacedhpoycn4sz7dragmbo5yqjspqriydxhplqdeguaqck2hmq5hgwqg"), - "multisig": MustParseCid("bafk2bzaceacc3m23yvnpzoeekstqtr2acutfv4zvsgncorjdrsucymjohzxs4"), - "paymentchannel": MustParseCid("bafk2bzaceac6i76vfexefqf6qgebkhkf2cb4g664d5nmfh2dric5spgykevd2"), + "account": MustParseCid("bafk2bzacebr4e3gbvs7baqqo7jhdeucgjr4iuv3hei3535qbnyzonju3hbmgm"), + "cron": MustParseCid("bafk2bzacebvak2gdlg7wyhtepptenxduz76o23lfmydmosqfhatsye6es47hy"), + "datacap": MustParseCid("bafk2bzacedtv462jnotkphl5nobytqcuuojldvp47lic6kovrl5r7pygxehsa"), + "eam": MustParseCid("bafk2bzacecam3wdb2johbcziwl476fjg57cf4fbhj5atf3jivrhflfrtffanc"), + "ethaccount": MustParseCid("bafk2bzacedhipermy6nzqj7qfg77xv5zmmyz7hxfe2phhi6qhr7kxn52aaitk"), + "evm": MustParseCid("bafk2bzacedhyremrizglbzoemwtgunmege4nkbus4nbfhncyzl5cbbzvlcx36"), + "init": MustParseCid("bafk2bzaced2tmopdpbxscbimmhzzuyjx2jjrpz43m76f6tjrgklrc75lnmfee"), + "multisig": MustParseCid("bafk2bzacebmmawfoxi2eqgq5vbqzljk4jnvtepkkpmw56kl7ocuhghez5iebk"), + "paymentchannel": MustParseCid("bafk2bzaceavfuvy65lmjt5iqctuqoyekgpcetjwx4nhyymegonpqqrz533nxk"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzaceaajyncjxcrl7wbb6ukzkueyqz4uyekxpmtn4mpndkf7ksmggopzq"), - "storagemarket": MustParseCid("bafk2bzaced6cexdcinwjhtfvuxgkxukiejp3goylaxtvhqfd24rs5z7g2z7dm"), - "storageminer": MustParseCid("bafk2bzacedapzgrbc2rsmuqex76ftt2b62q6opi56gh2dr2oyyzuwin62rweg"), - "storagepower": MustParseCid("bafk2bzacecdwijcbbryinjtm27pdinqqkyzoskri24pwsvsadwcq2alkkjpnc"), - "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), - "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), + "reward": MustParseCid("bafk2bzaceadkjfbj24mt5uqlsmsiw5c6cazkrz74rex5kzsmrzls2wrvsvqnk"), + "storagemarket": MustParseCid("bafk2bzacebeche2v3yd6xukgieypshyd64k2xilrcg4pamkfpvvzpdux6idsk"), + "storageminer": MustParseCid("bafk2bzaceac36jmpyqhggwihp6fgfyq3j2lsmcla7tz7cig6iqs5wk5hqjqpu"), + "storagepower": MustParseCid("bafk2bzaceb3iijknmwrbh2lhp5a4g2yy6xk6dz7vpueulw2f6qel2kwfmjixy"), + "system": MustParseCid("bafk2bzacecxmieemzxuma4lwc6ckufdirdvuvq4p2j5i6jrhbffgmvykkfnni"), + "verifiedregistry": MustParseCid("bafk2bzaceazq3c73cdvozvi7t247cjgylithdy7znwjkin7duasxsqjy5btxk"), }, }} diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 24ab403a3..0f5a23788 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/gateway.json.gz b/build/openrpc/gateway.json.gz index d3287b108..04cbbbe5d 100644 Binary files a/build/openrpc/gateway.json.gz and b/build/openrpc/gateway.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 8527396a1..445340836 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 63b778855..a392751cd 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/version.go b/build/version.go index ebd744f8b..6ec1ecd7a 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.25.2" +const BuildVersion = "1.25.3-dev" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/builtin/market/actor.go.template b/chain/actors/builtin/market/actor.go.template index a84c04ab9..0604737b3 100644 --- a/chain/actors/builtin/market/actor.go.template +++ b/chain/actors/builtin/market/actor.go.template @@ -103,10 +103,10 @@ type BalanceTable interface { type DealStates interface { ForEach(cb func(id abi.DealID, ds DealState) error) error - Get(id abi.DealID) (*DealState, bool, error) + Get(id abi.DealID) (DealState, bool, error) array() adt.Array - decode(*cbg.Deferred) (*DealState, error) + decode(*cbg.Deferred) (DealState, error) } type DealProposals interface { @@ -142,7 +142,17 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora type DealProposal = markettypes.DealProposal type DealLabel = markettypes.DealLabel -type DealState = markettypes.DealState +type DealState interface { + SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated + SlashEpoch() abi.ChainEpoch // -1 if deal never slashed + + Equals(other DealState) bool +} + +func DealStatesEqual(a, b DealState) bool { + return DealStatesEqual(a, b) +} type DealStateChanges struct { Added []DealIDState @@ -158,8 +168,8 @@ type DealIDState struct { // DealStateChange is a change in deal state from -> to type DealStateChange struct { ID abi.DealID - From *DealState - To *DealState + From DealState + To DealState } type DealProposalChanges struct { @@ -172,12 +182,36 @@ type ProposalIDState struct { Proposal markettypes.DealProposal } -func EmptyDealState() *DealState { - return &DealState{ - SectorStartEpoch: -1, - SlashEpoch: -1, - LastUpdatedEpoch: -1, + +type emptyDealState struct{} + +func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) LastUpdatedEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) SlashEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) Equals(other DealState) bool { + if e.SectorStartEpoch() != other.SectorStartEpoch() { + return false } + if e.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if e.SlashEpoch() != other.SlashEpoch() { + return false + } + return true +} + +func EmptyDealState() DealState { + return &emptyDealState{} } // returns the earned fees and pending fees for a given deal @@ -196,8 +230,8 @@ func GetDealFees(deal markettypes.DealProposal, height abi.ChainEpoch) (abi.Toke return ef, big.Sub(tf, ef) } -func IsDealActive(state markettypes.DealState) bool { - return state.SectorStartEpoch > -1 && state.SlashEpoch == -1 +func IsDealActive(state DealState) bool { + return state.SectorStartEpoch() > -1 && state.SlashEpoch() == -1 } func labelFromGoString(s string) (markettypes.DealLabel, error) { diff --git a/chain/actors/builtin/market/diff.go b/chain/actors/builtin/market/diff.go index ef3c2c28d..292299790 100644 --- a/chain/actors/builtin/market/diff.go +++ b/chain/actors/builtin/market/diff.go @@ -64,7 +64,7 @@ func (d *marketStatesDiffer) Add(key uint64, val *cbg.Deferred) error { if err != nil { return err } - d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), *ds}) + d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), ds}) return nil } @@ -77,7 +77,7 @@ func (d *marketStatesDiffer) Modify(key uint64, from, to *cbg.Deferred) error { if err != nil { return err } - if *dsFrom != *dsTo { + if !dsFrom.Equals(dsTo) { d.Results.Modified = append(d.Results.Modified, DealStateChange{abi.DealID(key), dsFrom, dsTo}) } return nil @@ -88,6 +88,6 @@ func (d *marketStatesDiffer) Remove(key uint64, val *cbg.Deferred) error { if err != nil { return err } - d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), *ds}) + d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), ds}) return nil } diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index 95b4f5f19..13c09f91b 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -168,10 +168,10 @@ type BalanceTable interface { type DealStates interface { ForEach(cb func(id abi.DealID, ds DealState) error) error - Get(id abi.DealID) (*DealState, bool, error) + Get(id abi.DealID) (DealState, bool, error) array() adt.Array - decode(*cbg.Deferred) (*DealState, error) + decode(*cbg.Deferred) (DealState, error) } type DealProposals interface { @@ -242,7 +242,17 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora type DealProposal = markettypes.DealProposal type DealLabel = markettypes.DealLabel -type DealState = markettypes.DealState +type DealState interface { + SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated + SlashEpoch() abi.ChainEpoch // -1 if deal never slashed + + Equals(other DealState) bool +} + +func DealStatesEqual(a, b DealState) bool { + return DealStatesEqual(a, b) +} type DealStateChanges struct { Added []DealIDState @@ -258,8 +268,8 @@ type DealIDState struct { // DealStateChange is a change in deal state from -> to type DealStateChange struct { ID abi.DealID - From *DealState - To *DealState + From DealState + To DealState } type DealProposalChanges struct { @@ -272,12 +282,35 @@ type ProposalIDState struct { Proposal markettypes.DealProposal } -func EmptyDealState() *DealState { - return &DealState{ - SectorStartEpoch: -1, - SlashEpoch: -1, - LastUpdatedEpoch: -1, +type emptyDealState struct{} + +func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) LastUpdatedEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) SlashEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) Equals(other DealState) bool { + if e.SectorStartEpoch() != other.SectorStartEpoch() { + return false } + if e.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if e.SlashEpoch() != other.SlashEpoch() { + return false + } + return true +} + +func EmptyDealState() DealState { + return &emptyDealState{} } // returns the earned fees and pending fees for a given deal @@ -296,8 +329,8 @@ func GetDealFees(deal markettypes.DealProposal, height abi.ChainEpoch) (abi.Toke return ef, big.Sub(tf, ef) } -func IsDealActive(state markettypes.DealState) bool { - return state.SectorStartEpoch > -1 && state.SlashEpoch == -1 +func IsDealActive(state DealState) bool { + return state.SectorStartEpoch() > -1 && state.SlashEpoch() == -1 } func labelFromGoString(s string) (markettypes.DealLabel, error) { diff --git a/chain/actors/builtin/market/state.go.template b/chain/actors/builtin/market/state.go.template index 1eab9d743..467057660 100644 --- a/chain/actors/builtin/market/state.go.template +++ b/chain/actors/builtin/market/state.go.template @@ -175,7 +175,7 @@ type dealStates{{.v}} struct { adt.Array } -func (s *dealStates{{.v}}) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates{{.v}}) Get(dealID abi.DealID) (DealState, bool, error) { var deal{{.v}} market{{.v}}.DealState found, err := s.Array.Get(uint64(dealID), &deal{{.v}}) if err != nil { @@ -185,7 +185,7 @@ func (s *dealStates{{.v}}) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV{{.v}}DealState(deal{{.v}}) - return &deal, true, nil + return deal, true, nil } func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -195,31 +195,57 @@ func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) erro }) } -func (s *dealStates{{.v}}) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates{{.v}}) decode(val *cbg.Deferred) (DealState, error) { var ds{{.v}} market{{.v}}.DealState if err := ds{{.v}}.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV{{.v}}DealState(ds{{.v}}) - return &ds, nil + return ds, nil } func (s *dealStates{{.v}}) array() adt.Array { return s.Array } -func fromV{{.v}}DealState(v{{.v}} market{{.v}}.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v{{.v}}.SectorStartEpoch, - LastUpdatedEpoch: v{{.v}}.LastUpdatedEpoch, - SlashEpoch: v{{.v}}.SlashEpoch, - VerifiedClaim: 0, - } - {{if (ge .v 9)}} - ret.VerifiedClaim = verifregtypes.AllocationId(v{{.v}}.VerifiedClaim) - {{end}} +type dealStateV{{.v}} struct { + ds{{.v}} market{{.v}}.DealState +} - return ret +func (d dealStateV{{.v}}) SectorStartEpoch() abi.ChainEpoch { + return d.ds{{.v}}.SectorStartEpoch +} + +func (d dealStateV{{.v}}) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds{{.v}}.LastUpdatedEpoch +} + +func (d dealStateV{{.v}}) SlashEpoch() abi.ChainEpoch { + return d.ds{{.v}}.SlashEpoch +} + +func (d dealStateV{{.v}}) Equals(other DealState) bool { + if ov{{.v}}, ok := other.(dealStateV{{.v}}); ok { + return d.ds{{.v}} == ov{{.v}}.ds{{.v}} + } + + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV{{.v}})(nil) + +func fromV{{.v}}DealState(v{{.v}} market{{.v}}.DealState) DealState { + return dealStateV{{.v}}{v{{.v}}} } type dealProposals{{.v}} struct { diff --git a/chain/actors/builtin/market/v0.go b/chain/actors/builtin/market/v0.go index ca6970dfa..d797d53f8 100644 --- a/chain/actors/builtin/market/v0.go +++ b/chain/actors/builtin/market/v0.go @@ -154,7 +154,7 @@ type dealStates0 struct { adt.Array } -func (s *dealStates0) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates0) Get(dealID abi.DealID) (DealState, bool, error) { var deal0 market0.DealState found, err := s.Array.Get(uint64(dealID), &deal0) if err != nil { @@ -164,7 +164,7 @@ func (s *dealStates0) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV0DealState(deal0) - return &deal, true, nil + return deal, true, nil } func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -174,28 +174,57 @@ func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates0) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates0) decode(val *cbg.Deferred) (DealState, error) { var ds0 market0.DealState if err := ds0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV0DealState(ds0) - return &ds, nil + return ds, nil } func (s *dealStates0) array() adt.Array { return s.Array } -func fromV0DealState(v0 market0.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v0.SectorStartEpoch, - LastUpdatedEpoch: v0.LastUpdatedEpoch, - SlashEpoch: v0.SlashEpoch, - VerifiedClaim: 0, +type dealStateV0 struct { + ds0 market0.DealState +} + +func (d dealStateV0) SectorStartEpoch() abi.ChainEpoch { + return d.ds0.SectorStartEpoch +} + +func (d dealStateV0) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds0.LastUpdatedEpoch +} + +func (d dealStateV0) SlashEpoch() abi.ChainEpoch { + return d.ds0.SlashEpoch +} + +func (d dealStateV0) Equals(other DealState) bool { + if ov0, ok := other.(dealStateV0); ok { + return d.ds0 == ov0.ds0 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV0)(nil) + +func fromV0DealState(v0 market0.DealState) DealState { + return dealStateV0{v0} } type dealProposals0 struct { diff --git a/chain/actors/builtin/market/v10.go b/chain/actors/builtin/market/v10.go index 878f0d465..290c17d09 100644 --- a/chain/actors/builtin/market/v10.go +++ b/chain/actors/builtin/market/v10.go @@ -153,7 +153,7 @@ type dealStates10 struct { adt.Array } -func (s *dealStates10) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates10) Get(dealID abi.DealID) (DealState, bool, error) { var deal10 market10.DealState found, err := s.Array.Get(uint64(dealID), &deal10) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates10) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV10DealState(deal10) - return &deal, true, nil + return deal, true, nil } func (s *dealStates10) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates10) ForEach(cb func(dealID abi.DealID, ds DealState) error) e }) } -func (s *dealStates10) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates10) decode(val *cbg.Deferred) (DealState, error) { var ds10 market10.DealState if err := ds10.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV10DealState(ds10) - return &ds, nil + return ds, nil } func (s *dealStates10) array() adt.Array { return s.Array } -func fromV10DealState(v10 market10.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v10.SectorStartEpoch, - LastUpdatedEpoch: v10.LastUpdatedEpoch, - SlashEpoch: v10.SlashEpoch, - VerifiedClaim: 0, +type dealStateV10 struct { + ds10 market10.DealState +} + +func (d dealStateV10) SectorStartEpoch() abi.ChainEpoch { + return d.ds10.SectorStartEpoch +} + +func (d dealStateV10) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds10.LastUpdatedEpoch +} + +func (d dealStateV10) SlashEpoch() abi.ChainEpoch { + return d.ds10.SlashEpoch +} + +func (d dealStateV10) Equals(other DealState) bool { + if ov10, ok := other.(dealStateV10); ok { + return d.ds10 == ov10.ds10 } - ret.VerifiedClaim = verifregtypes.AllocationId(v10.VerifiedClaim) + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } - return ret + return true +} + +var _ DealState = (*dealStateV10)(nil) + +func fromV10DealState(v10 market10.DealState) DealState { + return dealStateV10{v10} } type dealProposals10 struct { diff --git a/chain/actors/builtin/market/v11.go b/chain/actors/builtin/market/v11.go index a64272209..56a4c6038 100644 --- a/chain/actors/builtin/market/v11.go +++ b/chain/actors/builtin/market/v11.go @@ -153,7 +153,7 @@ type dealStates11 struct { adt.Array } -func (s *dealStates11) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates11) Get(dealID abi.DealID) (DealState, bool, error) { var deal11 market11.DealState found, err := s.Array.Get(uint64(dealID), &deal11) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates11) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV11DealState(deal11) - return &deal, true, nil + return deal, true, nil } func (s *dealStates11) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates11) ForEach(cb func(dealID abi.DealID, ds DealState) error) e }) } -func (s *dealStates11) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates11) decode(val *cbg.Deferred) (DealState, error) { var ds11 market11.DealState if err := ds11.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV11DealState(ds11) - return &ds, nil + return ds, nil } func (s *dealStates11) array() adt.Array { return s.Array } -func fromV11DealState(v11 market11.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v11.SectorStartEpoch, - LastUpdatedEpoch: v11.LastUpdatedEpoch, - SlashEpoch: v11.SlashEpoch, - VerifiedClaim: 0, +type dealStateV11 struct { + ds11 market11.DealState +} + +func (d dealStateV11) SectorStartEpoch() abi.ChainEpoch { + return d.ds11.SectorStartEpoch +} + +func (d dealStateV11) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds11.LastUpdatedEpoch +} + +func (d dealStateV11) SlashEpoch() abi.ChainEpoch { + return d.ds11.SlashEpoch +} + +func (d dealStateV11) Equals(other DealState) bool { + if ov11, ok := other.(dealStateV11); ok { + return d.ds11 == ov11.ds11 } - ret.VerifiedClaim = verifregtypes.AllocationId(v11.VerifiedClaim) + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } - return ret + return true +} + +var _ DealState = (*dealStateV11)(nil) + +func fromV11DealState(v11 market11.DealState) DealState { + return dealStateV11{v11} } type dealProposals11 struct { diff --git a/chain/actors/builtin/market/v12.go b/chain/actors/builtin/market/v12.go index 56e651a9b..cf7687203 100644 --- a/chain/actors/builtin/market/v12.go +++ b/chain/actors/builtin/market/v12.go @@ -153,7 +153,7 @@ type dealStates12 struct { adt.Array } -func (s *dealStates12) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates12) Get(dealID abi.DealID) (DealState, bool, error) { var deal12 market12.DealState found, err := s.Array.Get(uint64(dealID), &deal12) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates12) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV12DealState(deal12) - return &deal, true, nil + return deal, true, nil } func (s *dealStates12) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates12) ForEach(cb func(dealID abi.DealID, ds DealState) error) e }) } -func (s *dealStates12) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates12) decode(val *cbg.Deferred) (DealState, error) { var ds12 market12.DealState if err := ds12.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV12DealState(ds12) - return &ds, nil + return ds, nil } func (s *dealStates12) array() adt.Array { return s.Array } -func fromV12DealState(v12 market12.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v12.SectorStartEpoch, - LastUpdatedEpoch: v12.LastUpdatedEpoch, - SlashEpoch: v12.SlashEpoch, - VerifiedClaim: 0, +type dealStateV12 struct { + ds12 market12.DealState +} + +func (d dealStateV12) SectorStartEpoch() abi.ChainEpoch { + return d.ds12.SectorStartEpoch +} + +func (d dealStateV12) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds12.LastUpdatedEpoch +} + +func (d dealStateV12) SlashEpoch() abi.ChainEpoch { + return d.ds12.SlashEpoch +} + +func (d dealStateV12) Equals(other DealState) bool { + if ov12, ok := other.(dealStateV12); ok { + return d.ds12 == ov12.ds12 } - ret.VerifiedClaim = verifregtypes.AllocationId(v12.VerifiedClaim) + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } - return ret + return true +} + +var _ DealState = (*dealStateV12)(nil) + +func fromV12DealState(v12 market12.DealState) DealState { + return dealStateV12{v12} } type dealProposals12 struct { diff --git a/chain/actors/builtin/market/v13.go b/chain/actors/builtin/market/v13.go index 23b58cc09..d270319ce 100644 --- a/chain/actors/builtin/market/v13.go +++ b/chain/actors/builtin/market/v13.go @@ -153,7 +153,7 @@ type dealStates13 struct { adt.Array } -func (s *dealStates13) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates13) Get(dealID abi.DealID) (DealState, bool, error) { var deal13 market13.DealState found, err := s.Array.Get(uint64(dealID), &deal13) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates13) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV13DealState(deal13) - return &deal, true, nil + return deal, true, nil } func (s *dealStates13) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates13) ForEach(cb func(dealID abi.DealID, ds DealState) error) e }) } -func (s *dealStates13) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates13) decode(val *cbg.Deferred) (DealState, error) { var ds13 market13.DealState if err := ds13.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV13DealState(ds13) - return &ds, nil + return ds, nil } func (s *dealStates13) array() adt.Array { return s.Array } -func fromV13DealState(v13 market13.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v13.SectorStartEpoch, - LastUpdatedEpoch: v13.LastUpdatedEpoch, - SlashEpoch: v13.SlashEpoch, - VerifiedClaim: 0, +type dealStateV13 struct { + ds13 market13.DealState +} + +func (d dealStateV13) SectorStartEpoch() abi.ChainEpoch { + return d.ds13.SectorStartEpoch +} + +func (d dealStateV13) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds13.LastUpdatedEpoch +} + +func (d dealStateV13) SlashEpoch() abi.ChainEpoch { + return d.ds13.SlashEpoch +} + +func (d dealStateV13) Equals(other DealState) bool { + if ov13, ok := other.(dealStateV13); ok { + return d.ds13 == ov13.ds13 } - ret.VerifiedClaim = verifregtypes.AllocationId(v13.VerifiedClaim) + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } - return ret + return true +} + +var _ DealState = (*dealStateV13)(nil) + +func fromV13DealState(v13 market13.DealState) DealState { + return dealStateV13{v13} } type dealProposals13 struct { diff --git a/chain/actors/builtin/market/v2.go b/chain/actors/builtin/market/v2.go index ba84e3b03..5ced3c8a3 100644 --- a/chain/actors/builtin/market/v2.go +++ b/chain/actors/builtin/market/v2.go @@ -154,7 +154,7 @@ type dealStates2 struct { adt.Array } -func (s *dealStates2) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates2) Get(dealID abi.DealID) (DealState, bool, error) { var deal2 market2.DealState found, err := s.Array.Get(uint64(dealID), &deal2) if err != nil { @@ -164,7 +164,7 @@ func (s *dealStates2) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV2DealState(deal2) - return &deal, true, nil + return deal, true, nil } func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -174,28 +174,57 @@ func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates2) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates2) decode(val *cbg.Deferred) (DealState, error) { var ds2 market2.DealState if err := ds2.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV2DealState(ds2) - return &ds, nil + return ds, nil } func (s *dealStates2) array() adt.Array { return s.Array } -func fromV2DealState(v2 market2.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v2.SectorStartEpoch, - LastUpdatedEpoch: v2.LastUpdatedEpoch, - SlashEpoch: v2.SlashEpoch, - VerifiedClaim: 0, +type dealStateV2 struct { + ds2 market2.DealState +} + +func (d dealStateV2) SectorStartEpoch() abi.ChainEpoch { + return d.ds2.SectorStartEpoch +} + +func (d dealStateV2) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds2.LastUpdatedEpoch +} + +func (d dealStateV2) SlashEpoch() abi.ChainEpoch { + return d.ds2.SlashEpoch +} + +func (d dealStateV2) Equals(other DealState) bool { + if ov2, ok := other.(dealStateV2); ok { + return d.ds2 == ov2.ds2 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV2)(nil) + +func fromV2DealState(v2 market2.DealState) DealState { + return dealStateV2{v2} } type dealProposals2 struct { diff --git a/chain/actors/builtin/market/v3.go b/chain/actors/builtin/market/v3.go index f6a0891e7..35dd9c29a 100644 --- a/chain/actors/builtin/market/v3.go +++ b/chain/actors/builtin/market/v3.go @@ -149,7 +149,7 @@ type dealStates3 struct { adt.Array } -func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates3) Get(dealID abi.DealID) (DealState, bool, error) { var deal3 market3.DealState found, err := s.Array.Get(uint64(dealID), &deal3) if err != nil { @@ -159,7 +159,7 @@ func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV3DealState(deal3) - return &deal, true, nil + return deal, true, nil } func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -169,28 +169,57 @@ func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates3) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates3) decode(val *cbg.Deferred) (DealState, error) { var ds3 market3.DealState if err := ds3.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV3DealState(ds3) - return &ds, nil + return ds, nil } func (s *dealStates3) array() adt.Array { return s.Array } -func fromV3DealState(v3 market3.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v3.SectorStartEpoch, - LastUpdatedEpoch: v3.LastUpdatedEpoch, - SlashEpoch: v3.SlashEpoch, - VerifiedClaim: 0, +type dealStateV3 struct { + ds3 market3.DealState +} + +func (d dealStateV3) SectorStartEpoch() abi.ChainEpoch { + return d.ds3.SectorStartEpoch +} + +func (d dealStateV3) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds3.LastUpdatedEpoch +} + +func (d dealStateV3) SlashEpoch() abi.ChainEpoch { + return d.ds3.SlashEpoch +} + +func (d dealStateV3) Equals(other DealState) bool { + if ov3, ok := other.(dealStateV3); ok { + return d.ds3 == ov3.ds3 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV3)(nil) + +func fromV3DealState(v3 market3.DealState) DealState { + return dealStateV3{v3} } type dealProposals3 struct { diff --git a/chain/actors/builtin/market/v4.go b/chain/actors/builtin/market/v4.go index 629e833b6..bc9e61c88 100644 --- a/chain/actors/builtin/market/v4.go +++ b/chain/actors/builtin/market/v4.go @@ -149,7 +149,7 @@ type dealStates4 struct { adt.Array } -func (s *dealStates4) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates4) Get(dealID abi.DealID) (DealState, bool, error) { var deal4 market4.DealState found, err := s.Array.Get(uint64(dealID), &deal4) if err != nil { @@ -159,7 +159,7 @@ func (s *dealStates4) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV4DealState(deal4) - return &deal, true, nil + return deal, true, nil } func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -169,28 +169,57 @@ func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates4) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates4) decode(val *cbg.Deferred) (DealState, error) { var ds4 market4.DealState if err := ds4.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV4DealState(ds4) - return &ds, nil + return ds, nil } func (s *dealStates4) array() adt.Array { return s.Array } -func fromV4DealState(v4 market4.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v4.SectorStartEpoch, - LastUpdatedEpoch: v4.LastUpdatedEpoch, - SlashEpoch: v4.SlashEpoch, - VerifiedClaim: 0, +type dealStateV4 struct { + ds4 market4.DealState +} + +func (d dealStateV4) SectorStartEpoch() abi.ChainEpoch { + return d.ds4.SectorStartEpoch +} + +func (d dealStateV4) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds4.LastUpdatedEpoch +} + +func (d dealStateV4) SlashEpoch() abi.ChainEpoch { + return d.ds4.SlashEpoch +} + +func (d dealStateV4) Equals(other DealState) bool { + if ov4, ok := other.(dealStateV4); ok { + return d.ds4 == ov4.ds4 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV4)(nil) + +func fromV4DealState(v4 market4.DealState) DealState { + return dealStateV4{v4} } type dealProposals4 struct { diff --git a/chain/actors/builtin/market/v5.go b/chain/actors/builtin/market/v5.go index 892588979..63743ba8d 100644 --- a/chain/actors/builtin/market/v5.go +++ b/chain/actors/builtin/market/v5.go @@ -149,7 +149,7 @@ type dealStates5 struct { adt.Array } -func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates5) Get(dealID abi.DealID) (DealState, bool, error) { var deal5 market5.DealState found, err := s.Array.Get(uint64(dealID), &deal5) if err != nil { @@ -159,7 +159,7 @@ func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV5DealState(deal5) - return &deal, true, nil + return deal, true, nil } func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -169,28 +169,57 @@ func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates5) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates5) decode(val *cbg.Deferred) (DealState, error) { var ds5 market5.DealState if err := ds5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV5DealState(ds5) - return &ds, nil + return ds, nil } func (s *dealStates5) array() adt.Array { return s.Array } -func fromV5DealState(v5 market5.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v5.SectorStartEpoch, - LastUpdatedEpoch: v5.LastUpdatedEpoch, - SlashEpoch: v5.SlashEpoch, - VerifiedClaim: 0, +type dealStateV5 struct { + ds5 market5.DealState +} + +func (d dealStateV5) SectorStartEpoch() abi.ChainEpoch { + return d.ds5.SectorStartEpoch +} + +func (d dealStateV5) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds5.LastUpdatedEpoch +} + +func (d dealStateV5) SlashEpoch() abi.ChainEpoch { + return d.ds5.SlashEpoch +} + +func (d dealStateV5) Equals(other DealState) bool { + if ov5, ok := other.(dealStateV5); ok { + return d.ds5 == ov5.ds5 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV5)(nil) + +func fromV5DealState(v5 market5.DealState) DealState { + return dealStateV5{v5} } type dealProposals5 struct { diff --git a/chain/actors/builtin/market/v6.go b/chain/actors/builtin/market/v6.go index b57d49f91..5900eace9 100644 --- a/chain/actors/builtin/market/v6.go +++ b/chain/actors/builtin/market/v6.go @@ -151,7 +151,7 @@ type dealStates6 struct { adt.Array } -func (s *dealStates6) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates6) Get(dealID abi.DealID) (DealState, bool, error) { var deal6 market6.DealState found, err := s.Array.Get(uint64(dealID), &deal6) if err != nil { @@ -161,7 +161,7 @@ func (s *dealStates6) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV6DealState(deal6) - return &deal, true, nil + return deal, true, nil } func (s *dealStates6) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -171,28 +171,57 @@ func (s *dealStates6) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates6) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates6) decode(val *cbg.Deferred) (DealState, error) { var ds6 market6.DealState if err := ds6.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV6DealState(ds6) - return &ds, nil + return ds, nil } func (s *dealStates6) array() adt.Array { return s.Array } -func fromV6DealState(v6 market6.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v6.SectorStartEpoch, - LastUpdatedEpoch: v6.LastUpdatedEpoch, - SlashEpoch: v6.SlashEpoch, - VerifiedClaim: 0, +type dealStateV6 struct { + ds6 market6.DealState +} + +func (d dealStateV6) SectorStartEpoch() abi.ChainEpoch { + return d.ds6.SectorStartEpoch +} + +func (d dealStateV6) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds6.LastUpdatedEpoch +} + +func (d dealStateV6) SlashEpoch() abi.ChainEpoch { + return d.ds6.SlashEpoch +} + +func (d dealStateV6) Equals(other DealState) bool { + if ov6, ok := other.(dealStateV6); ok { + return d.ds6 == ov6.ds6 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV6)(nil) + +func fromV6DealState(v6 market6.DealState) DealState { + return dealStateV6{v6} } type dealProposals6 struct { diff --git a/chain/actors/builtin/market/v7.go b/chain/actors/builtin/market/v7.go index 56a1db328..f51f070c7 100644 --- a/chain/actors/builtin/market/v7.go +++ b/chain/actors/builtin/market/v7.go @@ -151,7 +151,7 @@ type dealStates7 struct { adt.Array } -func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates7) Get(dealID abi.DealID) (DealState, bool, error) { var deal7 market7.DealState found, err := s.Array.Get(uint64(dealID), &deal7) if err != nil { @@ -161,7 +161,7 @@ func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV7DealState(deal7) - return &deal, true, nil + return deal, true, nil } func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -171,28 +171,57 @@ func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates7) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates7) decode(val *cbg.Deferred) (DealState, error) { var ds7 market7.DealState if err := ds7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV7DealState(ds7) - return &ds, nil + return ds, nil } func (s *dealStates7) array() adt.Array { return s.Array } -func fromV7DealState(v7 market7.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v7.SectorStartEpoch, - LastUpdatedEpoch: v7.LastUpdatedEpoch, - SlashEpoch: v7.SlashEpoch, - VerifiedClaim: 0, +type dealStateV7 struct { + ds7 market7.DealState +} + +func (d dealStateV7) SectorStartEpoch() abi.ChainEpoch { + return d.ds7.SectorStartEpoch +} + +func (d dealStateV7) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds7.LastUpdatedEpoch +} + +func (d dealStateV7) SlashEpoch() abi.ChainEpoch { + return d.ds7.SlashEpoch +} + +func (d dealStateV7) Equals(other DealState) bool { + if ov7, ok := other.(dealStateV7); ok { + return d.ds7 == ov7.ds7 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV7)(nil) + +func fromV7DealState(v7 market7.DealState) DealState { + return dealStateV7{v7} } type dealProposals7 struct { diff --git a/chain/actors/builtin/market/v8.go b/chain/actors/builtin/market/v8.go index 9c68ee1fd..f9bf25f9c 100644 --- a/chain/actors/builtin/market/v8.go +++ b/chain/actors/builtin/market/v8.go @@ -152,7 +152,7 @@ type dealStates8 struct { adt.Array } -func (s *dealStates8) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates8) Get(dealID abi.DealID) (DealState, bool, error) { var deal8 market8.DealState found, err := s.Array.Get(uint64(dealID), &deal8) if err != nil { @@ -162,7 +162,7 @@ func (s *dealStates8) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV8DealState(deal8) - return &deal, true, nil + return deal, true, nil } func (s *dealStates8) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -172,28 +172,57 @@ func (s *dealStates8) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates8) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates8) decode(val *cbg.Deferred) (DealState, error) { var ds8 market8.DealState if err := ds8.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV8DealState(ds8) - return &ds, nil + return ds, nil } func (s *dealStates8) array() adt.Array { return s.Array } -func fromV8DealState(v8 market8.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v8.SectorStartEpoch, - LastUpdatedEpoch: v8.LastUpdatedEpoch, - SlashEpoch: v8.SlashEpoch, - VerifiedClaim: 0, +type dealStateV8 struct { + ds8 market8.DealState +} + +func (d dealStateV8) SectorStartEpoch() abi.ChainEpoch { + return d.ds8.SectorStartEpoch +} + +func (d dealStateV8) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds8.LastUpdatedEpoch +} + +func (d dealStateV8) SlashEpoch() abi.ChainEpoch { + return d.ds8.SlashEpoch +} + +func (d dealStateV8) Equals(other DealState) bool { + if ov8, ok := other.(dealStateV8); ok { + return d.ds8 == ov8.ds8 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV8)(nil) + +func fromV8DealState(v8 market8.DealState) DealState { + return dealStateV8{v8} } type dealProposals8 struct { diff --git a/chain/actors/builtin/market/v9.go b/chain/actors/builtin/market/v9.go index d692c15cc..3b5be4dfa 100644 --- a/chain/actors/builtin/market/v9.go +++ b/chain/actors/builtin/market/v9.go @@ -153,7 +153,7 @@ type dealStates9 struct { adt.Array } -func (s *dealStates9) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates9) Get(dealID abi.DealID) (DealState, bool, error) { var deal9 market9.DealState found, err := s.Array.Get(uint64(dealID), &deal9) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates9) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV9DealState(deal9) - return &deal, true, nil + return deal, true, nil } func (s *dealStates9) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates9) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates9) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates9) decode(val *cbg.Deferred) (DealState, error) { var ds9 market9.DealState if err := ds9.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV9DealState(ds9) - return &ds, nil + return ds, nil } func (s *dealStates9) array() adt.Array { return s.Array } -func fromV9DealState(v9 market9.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v9.SectorStartEpoch, - LastUpdatedEpoch: v9.LastUpdatedEpoch, - SlashEpoch: v9.SlashEpoch, - VerifiedClaim: 0, +type dealStateV9 struct { + ds9 market9.DealState +} + +func (d dealStateV9) SectorStartEpoch() abi.ChainEpoch { + return d.ds9.SectorStartEpoch +} + +func (d dealStateV9) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds9.LastUpdatedEpoch +} + +func (d dealStateV9) SlashEpoch() abi.ChainEpoch { + return d.ds9.SlashEpoch +} + +func (d dealStateV9) Equals(other DealState) bool { + if ov9, ok := other.(dealStateV9); ok { + return d.ds9 == ov9.ds9 } - ret.VerifiedClaim = verifregtypes.AllocationId(v9.VerifiedClaim) + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } - return ret + return true +} + +var _ DealState = (*dealStateV9)(nil) + +func fromV9DealState(v9 market9.DealState) DealState { + return dealStateV9{v9} } type dealProposals9 struct { diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template index b4ad3a4b5..089e3dc68 100644 --- a/chain/actors/builtin/miner/actor.go.template +++ b/chain/actors/builtin/miner/actor.go.template @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" + minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/manifest" @@ -239,7 +240,9 @@ type DeclareFaultsParams = minertypes.DeclareFaultsParams type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams type ProveCommitSectorParams = minertypes.ProveCommitSectorParams type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams +type ProveReplicaUpdatesParams2 = minertypes.ProveReplicaUpdatesParams2 type ReplicaUpdate = minertypes.ReplicaUpdate +type ReplicaUpdate2 = minertypes.ReplicaUpdate2 type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2 type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params @@ -248,6 +251,12 @@ type ExpirationExtension2 = minertypes.ExpirationExtension2 type CompactPartitionsParams = minertypes.CompactPartitionsParams type WithdrawBalanceParams = minertypes.WithdrawBalanceParams +type PieceActivationManifest = minertypes13.PieceActivationManifest +type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params +type SectorActivationManifest = minertypes13.SectorActivationManifest +type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params +type SectorUpdateManifest = minertypes13.SectorUpdateManifest + var QAPowerMax = minertypes.QAPowerMax type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 0ec287f30..6bb1028f3 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" + minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/dline" @@ -305,7 +306,9 @@ type DeclareFaultsParams = minertypes.DeclareFaultsParams type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams type ProveCommitSectorParams = minertypes.ProveCommitSectorParams type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams +type ProveReplicaUpdatesParams2 = minertypes.ProveReplicaUpdatesParams2 type ReplicaUpdate = minertypes.ReplicaUpdate +type ReplicaUpdate2 = minertypes.ReplicaUpdate2 type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2 type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params @@ -314,6 +317,12 @@ type ExpirationExtension2 = minertypes.ExpirationExtension2 type CompactPartitionsParams = minertypes.CompactPartitionsParams type WithdrawBalanceParams = minertypes.WithdrawBalanceParams +type PieceActivationManifest = minertypes13.PieceActivationManifest +type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params +type SectorActivationManifest = minertypes13.SectorActivationManifest +type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params +type SectorUpdateManifest = minertypes13.SectorUpdateManifest + var QAPowerMax = minertypes.QAPowerMax type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo diff --git a/chain/events/state/predicates.go b/chain/events/state/predicates.go index ff05156a6..e4e8b8f7e 100644 --- a/chain/events/state/predicates.go +++ b/chain/events/state/predicates.go @@ -242,7 +242,7 @@ func (sp *StatePredicates) DealStateChangedForIDs(dealIds []abi.DealID) DiffDeal } existenceChanged := oldFound != newFound - valueChanged := (oldFound && newFound) && *oldDeal != *newDeal + valueChanged := (oldFound && newFound) && !oldDeal.Equals(newDeal) if existenceChanged || valueChanged { changedDeals[dealID] = market.DealStateChange{ID: dealID, From: oldDeal, To: newDeal} } diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go index 52fc2668a..79c1d2e0e 100644 --- a/chain/events/state/predicates_test.go +++ b/chain/events/state/predicates_test.go @@ -177,11 +177,11 @@ func TestMarketPredicates(t *testing.T) { require.Contains(t, changedDealIDs, abi.DealID(1)) require.Contains(t, changedDealIDs, abi.DealID(2)) deal1 := changedDealIDs[abi.DealID(1)] - if deal1.From.LastUpdatedEpoch != 2 || deal1.To.LastUpdatedEpoch != 3 { + if deal1.From.LastUpdatedEpoch() != 2 || deal1.To.LastUpdatedEpoch() != 3 { t.Fatal("Unexpected change to LastUpdatedEpoch") } deal2 := changedDealIDs[abi.DealID(2)] - if deal2.From.LastUpdatedEpoch != 5 || deal2.To != nil { + if deal2.From.LastUpdatedEpoch() != 5 || deal2.To != nil { t.Fatal("Expected To to be nil") } @@ -243,8 +243,8 @@ func TestMarketPredicates(t *testing.T) { require.Len(t, changedDeals.Modified, 1) require.Equal(t, abi.DealID(1), changedDeals.Modified[0].ID) - require.True(t, dealEquality(*newDeal1, *changedDeals.Modified[0].To)) - require.True(t, dealEquality(*oldDeal1, *changedDeals.Modified[0].From)) + require.True(t, dealEquality(*newDeal1, changedDeals.Modified[0].To)) + require.True(t, dealEquality(*oldDeal1, changedDeals.Modified[0].From)) require.Equal(t, abi.DealID(2), changedDeals.Removed[0].ID) }) @@ -579,7 +579,7 @@ func newSectorPreCommitInfo(sectorNo abi.SectorNumber, sealed cid.Cid, expiratio } func dealEquality(expected market2.DealState, actual market.DealState) bool { - return expected.LastUpdatedEpoch == actual.LastUpdatedEpoch && - expected.SectorStartEpoch == actual.SectorStartEpoch && - expected.SlashEpoch == actual.SlashEpoch + return expected.LastUpdatedEpoch == actual.LastUpdatedEpoch() && + expected.SectorStartEpoch == actual.SectorStartEpoch() && + expected.SlashEpoch == actual.SlashEpoch() } diff --git a/chain/exchange/cbor_gen.go b/chain/exchange/cbor_gen.go index 71c75869d..7d13fd336 100644 --- a/chain/exchange/cbor_gen.go +++ b/chain/exchange/cbor_gen.go @@ -126,9 +126,9 @@ func (t *Request) UnmarshalCBOR(r io.Reader) (err error) { t.Head[i] = c } + } } - // t.Length (uint64) (uint64) { @@ -204,6 +204,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -300,9 +301,9 @@ func (t *Response) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -332,6 +333,7 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.BlsIncludes ([]exchange.messageIndices) (slice) @@ -346,6 +348,7 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Secpk ([]*types.SignedMessage) (slice) @@ -360,6 +363,7 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.SecpkIncludes ([]exchange.messageIndices) (slice) @@ -374,6 +378,7 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -446,9 +451,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.BlsIncludes ([]exchange.messageIndices) (slice) maj, extra, err = cr.ReadHeader() @@ -484,9 +489,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Secpk ([]*types.SignedMessage) (slice) maj, extra, err = cr.ReadHeader() @@ -532,9 +537,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.SecpkIncludes ([]exchange.messageIndices) (slice) maj, extra, err = cr.ReadHeader() @@ -570,9 +575,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -602,6 +607,7 @@ func (t *BSTipSet) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Messages (exchange.CompactedMessages) (struct) @@ -679,9 +685,9 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Messages (exchange.CompactedMessages) (struct) { diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index df8900cab..9ae39cf35 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -374,13 +374,33 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal // Commit sectors { for pi, preseal := range m.Sectors { - params := &minertypes.SectorPreCommitInfo{ - SealProof: preseal.ProofType, - SectorNumber: preseal.SectorID, - SealedCID: preseal.CommR, - SealRandEpoch: -1, - DealIDs: []abi.DealID{minerInfos[i].dealIDs[pi]}, - Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! + var paramEnc []byte + var preCommitMethodNum abi.MethodNum + if nv >= network.Version22 { + paramEnc = mustEnc(&miner.PreCommitSectorBatchParams2{ + Sectors: []miner.SectorPreCommitInfo{ + { + SealProof: preseal.ProofType, + SectorNumber: preseal.SectorID, + SealedCID: preseal.CommR, + SealRandEpoch: -1, + DealIDs: []abi.DealID{minerInfos[i].dealIDs[pi]}, + Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! + UnsealedCid: &preseal.CommD, + }, + }, + }) + preCommitMethodNum = builtintypes.MethodsMiner.PreCommitSectorBatch2 + } else { + paramEnc = mustEnc(&minertypes.SectorPreCommitInfo{ + SealProof: preseal.ProofType, + SectorNumber: preseal.SectorID, + SealedCID: preseal.CommR, + SealRandEpoch: -1, + DealIDs: []abi.DealID{minerInfos[i].dealIDs[pi]}, + Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! + }) + preCommitMethodNum = builtintypes.MethodsMiner.PreCommitSector } sectorWeight := minerInfos[i].sectorWeight[pi] @@ -463,7 +483,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal pledge = big.Add(pcd, pledge) - _, err = doExecValue(ctx, genesisVm, minerInfos[i].maddr, m.Worker, pledge, builtintypes.MethodsMiner.PreCommitSector, mustEnc(params)) + _, err = doExecValue(ctx, genesisVm, minerInfos[i].maddr, m.Worker, pledge, preCommitMethodNum, paramEnc) if err != nil { return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) } diff --git a/chain/stmgr/actors.go b/chain/stmgr/actors.go index 56744fa74..f1d615e8d 100644 --- a/chain/stmgr/actors.go +++ b/chain/stmgr/actors.go @@ -284,7 +284,7 @@ func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts return &api.MarketDeal{ Proposal: *proposal, - State: *st, + State: api.MakeDealState(st), }, nil } diff --git a/chain/types/cbor_gen.go b/chain/types/cbor_gen.go index fe8e7e3fe..a097c72de 100644 --- a/chain/types/cbor_gen.go +++ b/chain/types/cbor_gen.go @@ -66,6 +66,7 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.WinPoStProof ([]proof.PoStProof) (slice) @@ -80,6 +81,7 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Parents ([]cid.Cid) (slice) @@ -266,9 +268,9 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.WinPoStProof ([]proof.PoStProof) (slice) maj, extra, err = cr.ReadHeader() @@ -304,9 +306,9 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Parents ([]cid.Cid) (slice) maj, extra, err = cr.ReadHeader() @@ -345,9 +347,9 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { t.Parents[i] = c } + } } - // t.ParentWeight (big.Int) (struct) { @@ -519,9 +521,10 @@ func (t *Ticket) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.VRFProof[:]); err != nil { + if _, err := cw.Write(t.VRFProof); err != nil { return err } + return nil } @@ -566,9 +569,10 @@ func (t *Ticket) UnmarshalCBOR(r io.Reader) (err error) { t.VRFProof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.VRFProof[:]); err != nil { + if _, err := io.ReadFull(cr, t.VRFProof); err != nil { return err } + return nil } @@ -606,9 +610,10 @@ func (t *ElectionProof) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.VRFProof[:]); err != nil { + if _, err := cw.Write(t.VRFProof); err != nil { return err } + return nil } @@ -678,9 +683,10 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) { t.VRFProof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.VRFProof[:]); err != nil { + if _, err := io.ReadFull(cr, t.VRFProof); err != nil { return err } + return nil } @@ -761,9 +767,10 @@ func (t *Message) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Params[:]); err != nil { + if _, err := cw.Write(t.Params); err != nil { return err } + return nil } @@ -920,9 +927,10 @@ func (t *Message) UnmarshalCBOR(r io.Reader) (err error) { t.Params = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Params[:]); err != nil { + if _, err := io.ReadFull(cr, t.Params); err != nil { return err } + return nil } @@ -1456,9 +1464,9 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { t.BlsMessages[i] = c } + } } - // t.SecpkMessages ([]cid.Cid) (slice) maj, extra, err = cr.ReadHeader() @@ -1497,9 +1505,9 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { t.SecpkMessages[i] = c } + } } - return nil } @@ -1545,6 +1553,7 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Height (abi.ChainEpoch) (int64) @@ -1621,9 +1630,9 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { t.Cids[i] = c } + } } - // t.Blocks ([]*types.BlockHeader) (slice) maj, extra, err = cr.ReadHeader() @@ -1669,9 +1678,9 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Height (abi.ChainEpoch) (int64) { maj, extra, err := cr.ReadHeader() @@ -1729,9 +1738,10 @@ func (t *BeaconEntry) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Data[:]); err != nil { + if _, err := cw.Write(t.Data); err != nil { return err } + return nil } @@ -1790,9 +1800,10 @@ func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) (err error) { t.Data = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Data[:]); err != nil { + if _, err := io.ReadFull(cr, t.Data); err != nil { return err } + return nil } @@ -1908,6 +1919,7 @@ func (t *StateInfo0) MarshalCBOR(w io.Writer) error { if _, err := cw.Write(lengthBufStateInfo0); err != nil { return err } + return nil } @@ -1969,6 +1981,7 @@ func (t *Event) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -2045,9 +2058,9 @@ func (t *Event) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -2097,9 +2110,10 @@ func (t *EventEntry) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Value[:]); err != nil { + if _, err := cw.Write(t.Value); err != nil { return err } + return nil } @@ -2181,9 +2195,10 @@ func (t *EventEntry) UnmarshalCBOR(r io.Reader) (err error) { t.Value = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Value[:]); err != nil { + if _, err := io.ReadFull(cr, t.Value); err != nil { return err } + return nil } @@ -2439,7 +2454,7 @@ func (t *MessageTrace) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Params[:]); err != nil { + if _, err := cw.Write(t.Params); err != nil { return err } @@ -2551,9 +2566,10 @@ func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) { t.Params = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Params[:]); err != nil { + if _, err := io.ReadFull(cr, t.Params); err != nil { return err } + // t.ParamsCodec (uint64) (uint64) { @@ -2648,7 +2664,7 @@ func (t *ReturnTrace) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Return[:]); err != nil { + if _, err := cw.Write(t.Return); err != nil { return err } @@ -2727,9 +2743,10 @@ func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) { t.Return = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Return[:]); err != nil { + if _, err := io.ReadFull(cr, t.Return); err != nil { return err } + // t.ReturnCodec (uint64) (uint64) { @@ -2783,6 +2800,7 @@ func (t *ExecutionTrace) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Subcalls ([]types.ExecutionTrace) (slice) @@ -2797,6 +2815,7 @@ func (t *ExecutionTrace) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -2887,9 +2906,9 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Subcalls ([]types.ExecutionTrace) (slice) maj, extra, err = cr.ReadHeader() @@ -2925,8 +2944,8 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } diff --git a/cli/client.go b/cli/client.go index 88f7ed208..81299b8fb 100644 --- a/cli/client.go +++ b/cli/client.go @@ -1770,7 +1770,7 @@ func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipS if v.DealID == 0 { return deal{ LocalDeal: v, - OnChainDealState: *market.EmptyDealState(), + OnChainDealState: market.EmptyDealState(), } } @@ -1781,7 +1781,7 @@ func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipS return deal{ LocalDeal: v, - OnChainDealState: onChain.State, + OnChainDealState: onChain.State.Iface(), } } @@ -1807,13 +1807,13 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tTransferChannelID\tTransferStatus\tVerified\tMessage\n") for _, d := range deals { onChain := "N" - if d.OnChainDealState.SectorStartEpoch != -1 { - onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch) + if d.OnChainDealState.SectorStartEpoch() != -1 { + onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch()) } slashed := "N" - if d.OnChainDealState.SlashEpoch != -1 { - slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch) + if d.OnChainDealState.SlashEpoch() != -1 { + slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch()) } price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration))) @@ -1869,13 +1869,13 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, propcid := ellipsis(d.LocalDeal.ProposalCid.String(), 8) onChain := "N" - if d.OnChainDealState.SectorStartEpoch != -1 { - onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch) + if d.OnChainDealState.SectorStartEpoch() != -1 { + onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch()) } slashed := "N" - if d.OnChainDealState.SlashEpoch != -1 { - slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch) + if d.OnChainDealState.SlashEpoch() != -1 { + slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch()) } piece := ellipsis(d.LocalDeal.PieceCID.String(), 8) diff --git a/cmd/lotus-bench/amt_internal.go b/cmd/lotus-bench/amt_internal.go deleted file mode 100644 index f0e3035b7..000000000 --- a/cmd/lotus-bench/amt_internal.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copied from go-amt-ipld https://github.com/filecoin-project/go-amt-ipld/tree/master/internal -// which for some reason is a go internal package and therefore cannot be imported - -package main - -import ( - "fmt" - "io" - "math" - "sort" - - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -type AMTRoot struct { - BitWidth uint64 - Height uint64 - Count uint64 - AMTNode AMTNode -} - -type AMTNode struct { - Bmap []byte - Links []cid.Cid - Values []*cbg.Deferred -} - -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -var _ = xerrors.Errorf -var _ = cid.Undef -var _ = math.E -var _ = sort.Sort - -var lengthBufAMTRoot = []byte{132} - -func (t *AMTRoot) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write(lengthBufAMTRoot); err != nil { - return err - } - - // t.BitWidth (uint64) (uint64) - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.BitWidth); err != nil { - return err - } - - // t.Height (uint64) (uint64) - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Height); err != nil { - return err - } - - // t.Count (uint64) (uint64) - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Count); err != nil { - return err - } - - // t.AMTNode (internal.AMTNode) (struct) - if err := t.AMTNode.MarshalCBOR(cw); err != nil { - return err - } - return nil -} - -func (t *AMTRoot) UnmarshalCBOR(r io.Reader) (err error) { - *t = AMTRoot{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.BitWidth (uint64) (uint64) - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.BitWidth = extra - - } - // t.Height (uint64) (uint64) - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Height = extra - - } - // t.Count (uint64) (uint64) - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Count = extra - - } - // t.AMTNode (internal.AMTNode) (struct) - - { - - if err := t.AMTNode.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.AMTNode: %w", err) - } - - } - return nil -} - -var lengthBufAMTNode = []byte{131} - -func (t *AMTNode) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write(lengthBufAMTNode); err != nil { - return err - } - - // t.Bmap ([]uint8) (slice) - if len(t.Bmap) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Bmap was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Bmap))); err != nil { - return err - } - - if _, err := cw.Write(t.Bmap[:]); err != nil { - return err - } - - // t.Links ([]cid.Cid) (slice) - if len(t.Links) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Links was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Links))); err != nil { - return err - } - for _, v := range t.Links { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Links: %w", err) - } - } - - // t.Values ([]*typegen.Deferred) (slice) - if len(t.Values) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Values was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Values))); err != nil { - return err - } - for _, v := range t.Values { - if err := v.MarshalCBOR(cw); err != nil { - return err - } - } - return nil -} - -func (t *AMTNode) UnmarshalCBOR(r io.Reader) (err error) { - *t = AMTNode{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Bmap ([]uint8) (slice) - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Bmap: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Bmap = make([]uint8, extra) - } - - if _, err := io.ReadFull(cr, t.Bmap[:]); err != nil { - return err - } - // t.Links ([]cid.Cid) (slice) - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Links: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Links = make([]cid.Cid, extra) - } - - for i := 0; i < int(extra); i++ { - - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.Links failed: %w", err) - } - t.Links[i] = c - } - - // t.Values ([]*typegen.Deferred) (slice) - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Values: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Values = make([]*cbg.Deferred, extra) - } - - for i := 0; i < int(extra); i++ { - - var v cbg.Deferred - if err := v.UnmarshalCBOR(cr); err != nil { - return err - } - - t.Values[i] = &v - } - - return nil -} diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 7d3c0cde0..545ed1eb9 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -1,7 +1,6 @@ package main import ( - "bytes" "context" "crypto/rand" "encoding/json" @@ -9,16 +8,9 @@ import ( "math/big" "os" "path/filepath" - "sync" "time" "github.com/docker/go-units" - "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/go-cid" - offline "github.com/ipfs/go-ipfs-exchange-offline" - cbor "github.com/ipfs/go-ipld-cbor" - format "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" "github.com/minio/blake2b-simd" "github.com/mitchellh/go-homedir" @@ -28,14 +20,10 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" prooftypes "github.com/filecoin-project/go-state-types/proof" - adt "github.com/filecoin-project/specs-actors/v6/actors/util/adt" lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" @@ -116,7 +104,6 @@ func main() { DisableSliceFlagSeparator: true, Commands: []*cli.Command{ proveCmd, - amtBenchCmd, sealBenchCmd, simpleCmd, importBenchCmd, @@ -131,211 +118,6 @@ func main() { } } -type amtStatCollector struct { - ds format.NodeGetter - walk func(format.Node) ([]*format.Link, error) - - statsLk sync.Mutex - totalAMTLinks int - totalAMTValues int - totalAMTLinkNodes int - totalAMTValueNodes int - totalAMTLinkNodeSize int - totalAMTValueNodeSize int -} - -func (asc *amtStatCollector) String() string { - asc.statsLk.Lock() - defer asc.statsLk.Unlock() - - str := "\n------------\n" - str += fmt.Sprintf("Link Count: %d\n", asc.totalAMTLinks) - str += fmt.Sprintf("Value Count: %d\n", asc.totalAMTValues) - str += fmt.Sprintf("%d link nodes %d bytes\n", asc.totalAMTLinkNodes, asc.totalAMTLinkNodeSize) - str += fmt.Sprintf("%d value nodes %d bytes\n", asc.totalAMTValueNodes, asc.totalAMTValueNodeSize) - str += fmt.Sprintf("Total bytes: %d\n------------\n", asc.totalAMTLinkNodeSize+asc.totalAMTValueNodeSize) - return str -} - -func (asc *amtStatCollector) record(ctx context.Context, nd format.Node) error { - size, err := nd.Size() - if err != nil { - return err - } - - var node AMTNode - if err := node.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil { - // try to deserialize root - var root AMTRoot - if err := root.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil { - return err - } - node = root.AMTNode - } - - asc.statsLk.Lock() - defer asc.statsLk.Unlock() - - link := len(node.Links) > 0 - value := len(node.Values) > 0 - - if link { - asc.totalAMTLinks += len(node.Links) - asc.totalAMTLinkNodes++ - asc.totalAMTLinkNodeSize += int(size) - } else if value { - asc.totalAMTValues += len(node.Values) - asc.totalAMTValueNodes++ - asc.totalAMTValueNodeSize += int(size) - } else { - return xerrors.Errorf("unexpected AMT node %x: neither link nor value", nd.RawData()) - } - - return nil -} - -func (asc *amtStatCollector) walkLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) { - nd, err := asc.ds.Get(ctx, c) - if err != nil { - return nil, err - } - - if err := asc.record(ctx, nd); err != nil { - return nil, err - } - - return asc.walk(nd) -} - -func carWalkFunc(nd format.Node) (out []*format.Link, err error) { - for _, link := range nd.Links() { - if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed { - continue - } - out = append(out, link) - } - return out, nil -} - -var amtBenchCmd = &cli.Command{ - Name: "amt", - Usage: "Benchmark AMT churn", - Flags: []cli.Flag{ - &cli.IntFlag{ - Name: "rounds", - Usage: "rounds of churn to measure", - Value: 1, - }, - &cli.IntFlag{ - Name: "interval", - Usage: "AMT idx interval for churning values", - Value: 2880, - }, - &cli.IntFlag{ - Name: "bitwidth", - Usage: "AMT bitwidth", - Value: 6, - }, - }, - Action: func(c *cli.Context) error { - bs := blockstore.NewMemory() - ctx := c.Context - store := adt.WrapStore(ctx, cbor.NewCborStore(bs)) - - // Setup in memory blockstore - bitwidth := c.Int("bitwidth") - array, err := adt.MakeEmptyArray(store, bitwidth) - if err != nil { - return err - } - - // Using motivating empirical example: market actor states AMT - // Create 40,000,000 states for realistic workload - fmt.Printf("Populating AMT\n") - for i := 0; i < 40000000; i++ { - if err := array.Set(uint64(i), &market.DealState{ - SectorStartEpoch: abi.ChainEpoch(2000000 + i), - LastUpdatedEpoch: abi.ChainEpoch(-1), - SlashEpoch: -1, - VerifiedClaim: verifreg.AllocationId(i), - }); err != nil { - return err - } - } - - r, err := array.Root() - if err != nil { - return err - } - - // Measure ratio of internal / leaf nodes / sizes - dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) - asc := &amtStatCollector{ - ds: dag, - walk: carWalkFunc, - } - - fmt.Printf("Measuring AMT\n") - seen := cid.NewSet() - if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil { - return err - } - - fmt.Printf("%s\n", asc) - - // Overwrite ids with idx % interval: one epoch of market cron - rounds := c.Int("rounds") - interval := c.Int("interval") - - fmt.Printf("Overwrite 1 out of %d values for %d rounds\n", interval, rounds) - array, err = adt.AsArray(store, r, bitwidth) - if err != nil { - return err - } - roots := make([]cid.Cid, rounds) - for j := 0; j < rounds; j++ { - if j%10 == 0 { - fmt.Printf("round: %d\n", j) - } - for i := j; i < 40000000; i += interval { - if i%interval == j { - if err := array.Set(uint64(i), &market.DealState{ - SectorStartEpoch: abi.ChainEpoch(2000000 + i), - LastUpdatedEpoch: abi.ChainEpoch(1), - SlashEpoch: -1, - VerifiedClaim: verifreg.AllocationId(i), - }); err != nil { - return err - } - } - } - roots[j], err = array.Root() - if err != nil { - return err - } - - } - - // Measure churn - dag = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) - asc = &amtStatCollector{ - ds: dag, - walk: carWalkFunc, - } - - fmt.Printf("Measuring %d rounds of churn\n", rounds) - - for _, r := range roots { - if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil { - return err - } - } - - fmt.Printf("%s\n", asc) - return nil - }, -} - var sealBenchCmd = &cli.Command{ Name: "sealing", Usage: "Benchmark seal and winning post and window post", diff --git a/cmd/lotus-miner/init.go b/cmd/lotus-miner/init.go index 1b76960e9..e27b2d716 100644 --- a/cmd/lotus-miner/init.go +++ b/cmd/lotus-miner/init.go @@ -55,6 +55,7 @@ import ( "github.com/filecoin-project/lotus/storage" "github.com/filecoin-project/lotus/storage/paths" pipeline "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" "github.com/filecoin-project/lotus/storage/sealer/storiface" @@ -320,21 +321,21 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string info := &pipeline.SectorInfo{ State: pipeline.Proving, SectorNumber: sector.SectorID, - Pieces: []lapi.SectorPiece{ - { + Pieces: []pipeline.SafeSectorPiece{ + pipeline.SafePiece(lapi.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(meta.SectorSize), PieceCID: commD, }, - DealInfo: &lapi.PieceDealInfo{ + DealInfo: &piece.PieceDealInfo{ DealID: dealID, DealProposal: §or.Deal, - DealSchedule: lapi.DealSchedule{ + DealSchedule: piece.DealSchedule{ StartEpoch: sector.Deal.StartEpoch, EndEpoch: sector.Deal.EndEpoch, }, }, - }, + }), }, CommD: &commD, CommR: &commR, diff --git a/cmd/lotus-shed/fip-0036.go b/cmd/lotus-shed/fip-0036.go deleted file mode 100644 index 4c8456c04..000000000 --- a/cmd/lotus-shed/fip-0036.go +++ /dev/null @@ -1,554 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "io" - "os" - "sort" - "strconv" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/chain/actors/builtin" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" - "github.com/filecoin-project/lotus/chain/actors/builtin/power" - "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/repo" -) - -type Option uint64 - -const ( - Approve Option = 49 - Reject Option = 50 -) - -type Vote struct { - ID uint64 - OptionID Option - SignerAddress address.Address -} - -type msigVote struct { - Multisig msigBriefInfo - ApproveCount uint64 - RejectCount uint64 -} - -// https://filpoll.io/poll/16 -// snapshot height: 2162760 -// state root: bafy2bzacebdnzh43hw66bmvguk65wiwr5ssaejlq44fpdei2ysfh3eefpdlqs -var fip36PollCmd = &cli.Command{ - Name: "fip36poll", - Usage: "Process the FIP0036 FilPoll result", - ArgsUsage: "[state root, votes]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "repo", - Value: "~/.lotus", - }, - }, - Subcommands: []*cli.Command{ - finalResultCmd, - }, -} - -var finalResultCmd = &cli.Command{ - Name: "results", - Usage: "get poll results", - ArgsUsage: "[state root] [height] [votes json]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "repo", - Value: "~/.lotus", - }, - }, - - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 3 { - return xerrors.New("filpoll0036 results [state root] [height] [votes.json]") - } - - ctx := context.TODO() - if !cctx.Args().Present() { - return fmt.Errorf("must pass state root") - } - - sroot, err := cid.Decode(cctx.Args().First()) - if err != nil { - return fmt.Errorf("failed to parse input: %w", err) - } - - fsrepo, err := repo.NewFS(cctx.String("repo")) - if err != nil { - return err - } - - lkrepo, err := fsrepo.Lock(repo.FullNode) - if err != nil { - return err - } - - defer lkrepo.Close() //nolint:errcheck - - bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) - if err != nil { - return fmt.Errorf("failed to open blockstore: %w", err) - } - - defer func() { - if c, ok := bs.(io.Closer); ok { - if err := c.Close(); err != nil { - log.Warnf("failed to close blockstore: %s", err) - } - } - }() - - mds, err := lkrepo.Datastore(context.Background(), "/metadata") - if err != nil { - return err - } - - cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) - defer cs.Close() //nolint:errcheck - - cst := cbor.NewCborStore(bs) - store := adt.WrapStore(ctx, cst) - - st, err := state.LoadStateTree(cst, sroot) - if err != nil { - return err - } - - height, err := strconv.Atoi(cctx.Args().Get(1)) - if err != nil { - return err - } - - //get all the votes' signer ID address && their vote - vj, err := homedir.Expand(cctx.Args().Get(2)) - if err != nil { - return xerrors.Errorf("fail to get votes json") - } - votes, err := getVotesMap(vj) - if err != nil { - return xerrors.Errorf("failed to get voters: %w\n", err) - } - - type minerBriefInfo struct { - rawBytePower abi.StoragePower - dealPower abi.StoragePower - balance abi.TokenAmount - } - - // power actor - pa, err := st.GetActor(power.Address) - if err != nil { - return xerrors.Errorf("failed to get power actor: %w\n", err) - } - - powerState, err := power.Load(store, pa) - if err != nil { - return xerrors.Errorf("failed to get power state: %w\n", err) - } - - //market actor - ma, err := st.GetActor(market.Address) - if err != nil { - return xerrors.Errorf("fail to get market actor: %w\n", err) - } - - marketState, err := market.Load(store, ma) - if err != nil { - return xerrors.Errorf("fail to load market state: %w\n", err) - } - - lookupId := func(addr address.Address) address.Address { - ret, err := st.LookupID(addr) - if err != nil { - panic(err) - } - - return ret - } - - // we need to build several pieces of information, as we traverse the state tree: - // a map of accounts to every msig that they are a signer of - accountsToMultisigs := make(map[address.Address][]address.Address) - // a map of multisigs to some info about them for quick lookup - msigActorsInfo := make(map[address.Address]msigBriefInfo) - // a map of actors (accounts+multisigs) to every miner that they are an owner of - ownerMap := make(map[address.Address][]address.Address) - // a map of accounts to every miner that they are a worker of - workerMap := make(map[address.Address][]address.Address) - // a map of miners to some info about them for quick lookup - minerActorsInfo := make(map[address.Address]minerBriefInfo) - // a map of client addresses to deal data stored in proposals - clientToDealStorage := make(map[address.Address]abi.StoragePower) - - fmt.Println("iterating over all actors") - count := 0 - err = st.ForEach(func(addr address.Address, act *types.Actor) error { - if count%200000 == 0 { - fmt.Println("processed ", count, " actors building maps") - } - count++ - if builtin.IsMultisigActor(act.Code) { - ms, err := multisig.Load(store, act) - if err != nil { - return fmt.Errorf("load msig failed %v", err) - - } - - // TODO: Confirm that these are always ID addresses - signers, err := ms.Signers() - if err != nil { - return xerrors.Errorf("fail to get msig signers: %w", err) - } - for _, s := range signers { - signerId := lookupId(s) - accountsToMultisigs[signerId] = append(accountsToMultisigs[signerId], addr) - } - - locked, err := ms.LockedBalance(abi.ChainEpoch(height)) - if err != nil { - return xerrors.Errorf("failed to compute locked multisig balance: %w", err) - } - - threshold, _ := ms.Threshold() - info := msigBriefInfo{ - ID: addr, - Signer: signers, - Balance: big.Max(big.Zero(), types.BigSub(act.Balance, locked)), - Threshold: threshold, - } - msigActorsInfo[addr] = info - } - - if builtin.IsStorageMinerActor(act.Code) { - m, err := miner.Load(store, act) - if err != nil { - return xerrors.Errorf("fail to load miner actor: %w", err) - } - - info, err := m.Info() - if err != nil { - return xerrors.Errorf("fail to get miner info: %w\n", err) - } - - ownerId := lookupId(info.Owner) - ownerMap[ownerId] = append(ownerMap[ownerId], addr) - - workerId := lookupId(info.Worker) - workerMap[workerId] = append(workerMap[workerId], addr) - - lockedFunds, err := m.LockedFunds() - if err != nil { - return err - } - - bal := big.Sub(act.Balance, lockedFunds.TotalLockedFunds()) - bal = big.Max(big.Zero(), bal) - - pow, ok, err := powerState.MinerPower(addr) - if err != nil { - return err - } - - if !ok { - pow.RawBytePower = big.Zero() - } - - minerActorsInfo[addr] = minerBriefInfo{ - rawBytePower: pow.RawBytePower, - // gets added up outside this loop - dealPower: big.Zero(), - balance: bal, - } - } - - return nil - }) - - if err != nil { - return err - } - - fmt.Println("iterating over proposals") - dealProposals, err := marketState.Proposals() - if err != nil { - return err - } - - dealStates, err := marketState.States() - if err != nil { - return err - } - - if err := dealProposals.ForEach(func(dealID abi.DealID, d market.DealProposal) error { - - dealState, ok, err := dealStates.Get(dealID) - if err != nil { - return err - } - if !ok || dealState.SectorStartEpoch == -1 { - // effectively a continue - return nil - } - - clientId := lookupId(d.Client) - if cd, found := clientToDealStorage[clientId]; found { - clientToDealStorage[clientId] = big.Add(cd, big.NewInt(int64(d.PieceSize))) - } else { - clientToDealStorage[clientId] = big.NewInt(int64(d.PieceSize)) - } - - providerId := lookupId(d.Provider) - mai, found := minerActorsInfo[providerId] - - if !found { - return xerrors.Errorf("didn't find miner %s", providerId) - } - - mai.dealPower = big.Add(mai.dealPower, big.NewInt(int64(d.PieceSize))) - minerActorsInfo[providerId] = mai - return nil - }); err != nil { - return xerrors.Errorf("fail to get deals") - } - - // now tabulate votes - - approveBalance := abi.NewTokenAmount(0) - rejectionBalance := abi.NewTokenAmount(0) - clientApproveBytes := big.Zero() - clientRejectBytes := big.Zero() - msigPendingVotes := make(map[address.Address]msigVote) //map[msig ID]msigVote - msigVotes := make(map[address.Address]Option) - minerVotes := make(map[address.Address]Option) - fmt.Println("counting account and multisig votes") - for _, vote := range votes { - signerId, err := st.LookupID(vote.SignerAddress) - if err != nil { - fmt.Println("voter ", vote.SignerAddress, " not found in state tree, skipping") - continue - } - - //process votes for regular accounts - accountActor, err := st.GetActor(signerId) - if err != nil { - return xerrors.Errorf("fail to get account account for signer: %w\n", err) - } - - clientBytes, ok := clientToDealStorage[signerId] - if !ok { - clientBytes = big.Zero() - } - - if vote.OptionID == Approve { - approveBalance = types.BigAdd(approveBalance, accountActor.Balance) - clientApproveBytes = big.Add(clientApproveBytes, clientBytes) - } else { - rejectionBalance = types.BigAdd(rejectionBalance, accountActor.Balance) - clientRejectBytes = big.Add(clientRejectBytes, clientBytes) - } - - if minerInfos, found := ownerMap[signerId]; found { - for _, minerInfo := range minerInfos { - minerVotes[minerInfo] = vote.OptionID - } - } - if minerInfos, found := workerMap[signerId]; found { - for _, minerInfo := range minerInfos { - if _, ok := minerVotes[minerInfo]; !ok { - minerVotes[minerInfo] = vote.OptionID - } - } - } - - //process msigs - // There is a possibility that enough signers have voted for BOTH options in the poll to be above the threshold - // Because we are iterating over votes in order they arrived, the first option to go over the threshold will win - // This is in line with onchain behaviour (consider a case where signers are competing to withdraw all the funds - // in an msig into 2 different accounts) - if mss, found := accountsToMultisigs[signerId]; found { - for _, ms := range mss { //get all the msig signer has - if _, ok := msigVotes[ms]; ok { - // msig has already voted, skip - continue - } - if mpv, found := msigPendingVotes[ms]; found { //other signers of the multisig have voted, yet the threshold has not met - if vote.OptionID == Approve { - if mpv.ApproveCount+1 == mpv.Multisig.Threshold { //met threshold - approveBalance = types.BigAdd(approveBalance, mpv.Multisig.Balance) - delete(msigPendingVotes, ms) //threshold, can skip later signer votes - msigVotes[ms] = vote.OptionID - - } else { - mpv.ApproveCount++ - msigPendingVotes[ms] = mpv - } - } else { - if mpv.RejectCount+1 == mpv.Multisig.Threshold { //met threshold - rejectionBalance = types.BigAdd(rejectionBalance, mpv.Multisig.Balance) - delete(msigPendingVotes, ms) //threshold, can skip later signer votes - msigVotes[ms] = vote.OptionID - - } else { - mpv.RejectCount++ - msigPendingVotes[ms] = mpv - } - } - } else { //first vote received from one of the signers of the msig - msi, ok := msigActorsInfo[ms] - if !ok { - return xerrors.Errorf("didn't find msig %s in msig map", ms) - } - - if msi.Threshold == 1 { //met threshold with this signer's single vote - if vote.OptionID == Approve { - approveBalance = types.BigAdd(approveBalance, msi.Balance) - msigVotes[ms] = Approve - - } else { - rejectionBalance = types.BigAdd(rejectionBalance, msi.Balance) - msigVotes[ms] = Reject - } - } else { //threshold not met, add to pending vote - if vote.OptionID == Approve { - msigPendingVotes[ms] = msigVote{ - Multisig: msi, - ApproveCount: 1, - } - } else { - msigPendingVotes[ms] = msigVote{ - Multisig: msi, - RejectCount: 1, - } - } - } - } - } - } - } - - for s, v := range msigVotes { - if minerInfos, found := ownerMap[s]; found { - for _, minerInfo := range minerInfos { - minerVotes[minerInfo] = v - } - } - if minerInfos, found := workerMap[s]; found { - for _, minerInfo := range minerInfos { - if _, ok := minerVotes[minerInfo]; !ok { - minerVotes[minerInfo] = v - } - } - } - } - - approveRBP := big.Zero() - approveDealPower := big.Zero() - rejectionRBP := big.Zero() - rejectionDealPower := big.Zero() - fmt.Println("adding up miner votes") - for minerAddr, vote := range minerVotes { - mbi, ok := minerActorsInfo[minerAddr] - if !ok { - return xerrors.Errorf("failed to find miner info for %s", minerAddr) - } - - if vote == Approve { - approveBalance = big.Add(approveBalance, mbi.balance) - approveRBP = big.Add(approveRBP, mbi.rawBytePower) - approveDealPower = big.Add(approveDealPower, mbi.dealPower) - } else { - rejectionBalance = big.Add(rejectionBalance, mbi.balance) - rejectionRBP = big.Add(rejectionRBP, mbi.rawBytePower) - rejectionDealPower = big.Add(rejectionDealPower, mbi.dealPower) - } - } - - fmt.Println("Total acceptance token: ", approveBalance) - fmt.Println("Total rejection token: ", rejectionBalance) - - fmt.Println("Total acceptance SP deal power: ", approveDealPower) - fmt.Println("Total rejection SP deal power: ", rejectionDealPower) - - fmt.Println("Total acceptance SP rb power: ", approveRBP) - fmt.Println("Total rejection SP rb power: ", rejectionRBP) - - fmt.Println("Total acceptance Client rb power: ", clientApproveBytes) - fmt.Println("Total rejection Client rb power: ", clientRejectBytes) - - fmt.Println("\n\nFinal results **drumroll**") - if rejectionBalance.GreaterThanEqual(big.Mul(approveBalance, big.NewInt(3))) { - fmt.Println("token holders VETO FIP-0036!") - } else if approveBalance.LessThanEqual(rejectionBalance) { - fmt.Println("token holders REJECT FIP-0036") - } else { - fmt.Println("token holders ACCEPT FIP-0036") - } - - if rejectionDealPower.GreaterThanEqual(big.Mul(approveDealPower, big.NewInt(3))) { - fmt.Println("SPs by deal data stored VETO FIP-0036!") - } else if approveDealPower.LessThanEqual(rejectionDealPower) { - fmt.Println("SPs by deal data stored REJECT FIP-0036") - } else { - fmt.Println("SPs by deal data stored ACCEPT FIP-0036") - } - - if rejectionRBP.GreaterThanEqual(big.Mul(approveRBP, big.NewInt(3))) { - fmt.Println("SPs by total raw byte power VETO FIP-0036!") - } else if approveRBP.LessThanEqual(rejectionRBP) { - fmt.Println("SPs by total raw byte power REJECT FIP-0036") - } else { - fmt.Println("SPs by total raw byte power ACCEPT FIP-0036") - } - - if clientRejectBytes.GreaterThanEqual(big.Mul(clientApproveBytes, big.NewInt(3))) { - fmt.Println("Storage Clients VETO FIP-0036!") - } else if clientApproveBytes.LessThanEqual(clientRejectBytes) { - fmt.Println("Storage Clients REJECT FIP-0036") - } else { - fmt.Println("Storage Clients ACCEPT FIP-0036") - } - - return nil - }, -} - -// Returns voted sorted by votes from earliest to latest -func getVotesMap(file string) ([]Vote, error) { - var votes []Vote - vb, err := os.ReadFile(file) - if err != nil { - return nil, xerrors.Errorf("read vote: %w", err) - } - - if err := json.Unmarshal(vb, &votes); err != nil { - return nil, xerrors.Errorf("unmarshal vote: %w", err) - } - - sort.SliceStable(votes, func(i, j int) bool { - return votes[i].ID < votes[j].ID - }) - - return votes, nil -} diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index a5b66a096..e3b1333ed 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -84,7 +84,6 @@ func main() { diffCmd, itestdCmd, msigCmd, - fip36PollCmd, invariantsCmd, gasTraceCmd, replayOfflineCmd, diff --git a/cmd/lotus-shed/market.go b/cmd/lotus-shed/market.go index 4436e3c40..6fb1566b6 100644 --- a/cmd/lotus-shed/market.go +++ b/cmd/lotus-shed/market.go @@ -387,7 +387,7 @@ var marketDealsTotalStorageCmd = &cli.Command{ count := 0 for _, deal := range deals { - if market.IsDealActive(deal.State) { + if market.IsDealActive(deal.State.Iface()) { dealStorage := big.NewIntUnsigned(uint64(deal.Proposal.PieceSize)) total = big.Add(total, dealStorage) count++ diff --git a/cmd/lotus-shed/shedgen/cbor_gen.go b/cmd/lotus-shed/shedgen/cbor_gen.go index f2a79fe7d..af0a42fd9 100644 --- a/cmd/lotus-shed/shedgen/cbor_gen.go +++ b/cmd/lotus-shed/shedgen/cbor_gen.go @@ -136,6 +136,7 @@ func (t *CarbNode) UnmarshalCBOR(r io.Reader) (err error) { t.Sub[i] = c } + } } diff --git a/conformance/chaos/cbor_gen.go b/conformance/chaos/cbor_gen.go index d74ae0946..a8561163e 100644 --- a/conformance/chaos/cbor_gen.go +++ b/conformance/chaos/cbor_gen.go @@ -60,6 +60,7 @@ func (t *State) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -142,9 +143,9 @@ func (t *State) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -185,6 +186,7 @@ func (t *CallerValidationArgs) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Types ([]cid.Cid) (slice) @@ -288,9 +290,9 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Types ([]cid.Cid) (slice) maj, extra, err = cr.ReadHeader() @@ -329,9 +331,9 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { t.Types[i] = c } + } } - return nil } @@ -570,9 +572,10 @@ func (t *SendArgs) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Params[:]); err != nil { + if _, err := cw.Write(t.Params); err != nil { return err } + return nil } @@ -649,9 +652,10 @@ func (t *SendArgs) UnmarshalCBOR(r io.Reader) (err error) { t.Params = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Params[:]); err != nil { + if _, err := io.ReadFull(cr, t.Params); err != nil { return err } + return nil } @@ -678,7 +682,7 @@ func (t *SendReturn) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Return[:]); err != nil { + if _, err := cw.Write(t.Return); err != nil { return err } @@ -736,9 +740,10 @@ func (t *SendReturn) UnmarshalCBOR(r io.Reader) (err error) { t.Return = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Return[:]); err != nil { + if _, err := io.ReadFull(cr, t.Return); err != nil { return err } + // t.Code (exitcode.ExitCode) (int64) { maj, extra, err := cr.ReadHeader() diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 9016d5a13..b133930bc 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -826,8 +826,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ] @@ -1426,8 +1425,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ] @@ -2910,6 +2908,14 @@ Inputs: "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } ] @@ -3212,6 +3218,14 @@ Inputs: "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } } @@ -3556,6 +3570,14 @@ Response: "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } } diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index 10b4f1f41..a799db39c 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -5751,8 +5751,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } } @@ -5830,8 +5829,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ``` diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index a43bcf1f0..049f54773 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -232,6 +232,7 @@ * [StateGetActor](#StateGetActor) * [StateGetAllocation](#StateGetAllocation) * [StateGetAllocationForPendingDeal](#StateGetAllocationForPendingDeal) + * [StateGetAllocationIdForPendingDeal](#StateGetAllocationIdForPendingDeal) * [StateGetAllocations](#StateGetAllocations) * [StateGetBeaconEntry](#StateGetBeaconEntry) * [StateGetClaim](#StateGetClaim) @@ -6921,6 +6922,29 @@ Response: } ``` +### StateGetAllocationIdForPendingDeal +StateGetAllocationIdForPendingDeal is like StateGetAllocationForPendingDeal except it returns the allocation ID + + +Perms: read + +Inputs: +```json +[ + 5432, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `0` + ### StateGetAllocations StateGetAllocations returns the all the allocations for a given client. @@ -7379,8 +7403,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } } @@ -7458,8 +7481,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ``` diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 7f1c0a01a..dbc5a6ee2 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,7 +7,7 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.25.2 + 1.25.3-dev COMMANDS: init Initialize a lotus miner repo diff --git a/documentation/en/cli-lotus-provider.md b/documentation/en/cli-lotus-provider.md index 932350fe2..de77697e3 100644 --- a/documentation/en/cli-lotus-provider.md +++ b/documentation/en/cli-lotus-provider.md @@ -7,7 +7,7 @@ USAGE: lotus-provider [global options] command [command options] [arguments...] VERSION: - 1.25.2 + 1.25.3-dev COMMANDS: run Start a lotus provider process diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md index 1da1165cf..0e0fee157 100644 --- a/documentation/en/cli-lotus-worker.md +++ b/documentation/en/cli-lotus-worker.md @@ -7,7 +7,7 @@ USAGE: lotus-worker [global options] command [command options] [arguments...] VERSION: - 1.25.2 + 1.25.3-dev COMMANDS: run Start lotus worker diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index 5e451dcad..ff62980dc 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.25.2 + 1.25.3-dev COMMANDS: daemon Start a lotus daemon process diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index a65e82e95..17fd24fa3 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -702,6 +702,30 @@ # env var: LOTUS_SEALING_USESYNTHETICPOREP #UseSyntheticPoRep = false + # Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIREACTIVATIONSUCCESS + #RequireActivationSuccess = false + + # Whether to abort if any piece activation notification returns a non-zero exit code (newly sealed sectors, only with ProveCommitSectors3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIREACTIVATIONSUCCESSUPDATE + #RequireActivationSuccessUpdate = false + + # Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIRENOTIFICATIONSUCCESS + #RequireNotificationSuccess = false + + # Whether to abort if any piece activation notification returns a non-zero exit code (updating sectors, only with ProveReplicaUpdates3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIRENOTIFICATIONSUCCESSUPDATE + #RequireNotificationSuccessUpdate = false + [Storage] # type: int diff --git a/gen/main.go b/gen/main.go index 942b3ac2c..447822893 100644 --- a/gen/main.go +++ b/gen/main.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/cmd/lotus-shed/shedgen" "github.com/filecoin-project/lotus/node/hello" "github.com/filecoin-project/lotus/paychmgr" + "github.com/filecoin-project/lotus/storage/pipeline/piece" sectorstorage "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -63,9 +64,7 @@ func main() { api.SealedRefs{}, api.SealTicket{}, api.SealSeed{}, - api.PieceDealInfo{}, api.SectorPiece{}, - api.DealSchedule{}, ) if err != nil { fmt.Println(err) @@ -110,6 +109,15 @@ func main() { os.Exit(1) } + err = gen.WriteMapEncodersToFile("./storage/pipeline/piece/cbor_gen.go", "piece", + piece.PieceDealInfo{}, + piece.DealSchedule{}, + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = gen.WriteMapEncodersToFile("./storage/sealer/cbor_gen.go", "sealer", sectorstorage.Call{}, sectorstorage.WorkState{}, diff --git a/go.mod b/go.mod index 7893d053b..9f6d7209b 100644 --- a/go.mod +++ b/go.mod @@ -37,6 +37,7 @@ require ( github.com/filecoin-project/go-bitfield v0.2.4 github.com/filecoin-project/go-cbor-util v0.0.1 github.com/filecoin-project/go-commp-utils v0.1.3 + github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 github.com/filecoin-project/go-crypto v0.0.1 github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7 github.com/filecoin-project/go-fil-commcid v0.1.0 @@ -45,7 +46,7 @@ require ( github.com/filecoin-project/go-jsonrpc v0.3.1 github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-paramfetch v0.0.4 - github.com/filecoin-project/go-state-types v0.12.9-0.20231205164216-231a5d9537a1 + github.com/filecoin-project/go-state-types v0.12.9-0.20240125111137-dbf4b5b126e2 github.com/filecoin-project/go-statemachine v1.0.3 github.com/filecoin-project/go-statestore v0.2.0 github.com/filecoin-project/go-storedcounter v0.1.0 @@ -91,7 +92,6 @@ require ( github.com/ipfs/go-fs-lock v0.0.7 github.com/ipfs/go-graphsync v0.14.6 github.com/ipfs/go-ipfs-blocksutil v0.0.1 - github.com/ipfs/go-ipfs-exchange-offline v0.3.0 github.com/ipfs/go-ipld-cbor v0.0.6 github.com/ipfs/go-ipld-format v0.5.0 github.com/ipfs/go-log/v2 v2.5.1 @@ -145,7 +145,7 @@ require ( github.com/triplewz/poseidon v0.0.0-20220525065023-a7cdb0e183e7 github.com/urfave/cli/v2 v2.25.5 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba - github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f + github.com/whyrusleeping/cbor-gen v0.0.0-20240124134906-0efd71a8493b github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/xeipuuv/gojsonschema v1.2.0 @@ -202,7 +202,6 @@ require ( github.com/etclabscore/go-jsonschema-walk v0.0.6 // indirect github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect - github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 // indirect github.com/filecoin-project/go-ds-versioning v0.1.2 // indirect github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect diff --git a/go.sum b/go.sum index 43eaf0207..13496a5b5 100644 --- a/go.sum +++ b/go.sum @@ -339,8 +339,8 @@ github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psS github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.11.2-0.20230712101859-8f37624fa540/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= -github.com/filecoin-project/go-state-types v0.12.9-0.20231205164216-231a5d9537a1 h1:XwF+/Aucg1wrYs5b/zV/m6BmKrftdGj4Ya/8iSkW0t8= -github.com/filecoin-project/go-state-types v0.12.9-0.20231205164216-231a5d9537a1/go.mod h1:gR2NV0CSGSQwopxF+3In9nDh1sqvoYukLcs5vK0AHCA= +github.com/filecoin-project/go-state-types v0.12.9-0.20240125111137-dbf4b5b126e2 h1:kl0+C3V2skRVuCDEMLv6A0h52JudeB8S8b8DbhEH36g= +github.com/filecoin-project/go-state-types v0.12.9-0.20240125111137-dbf4b5b126e2/go.mod h1:m/6/3fAMykSC9eCWCGlwh89SN/+pDVapo+jRDov1JXk= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v1.0.3 h1:N07o6alys+V1tNoSTi4WuuoeNC4erS/6jE74+NsgQuk= github.com/filecoin-project/go-statemachine v1.0.3/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= @@ -750,7 +750,6 @@ github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSO github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ= @@ -933,7 +932,6 @@ github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQs github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koalacxr/quantile v0.0.1 h1:wAW+SQ286Erny9wOjVww96t8ws+x5Zj6AKHDULUK+o0= @@ -1657,8 +1655,8 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:f github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f h1:SBuSxXJL0/ZJMtTxbXZgHZkThl9dNrzyaNhlyaqscRo= -github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20240124134906-0efd71a8493b h1:+Yd0HVs6f9v2sRlpqcODhojlOd2PiBtWZTPcYmaSm5A= +github.com/whyrusleeping/cbor-gen v0.0.0-20240124134906-0efd71a8493b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -2045,7 +2043,6 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/itests/deadlines_test.go b/itests/deadlines_test.go index fb28f4509..70da4be5a 100644 --- a/itests/deadlines_test.go +++ b/itests/deadlines_test.go @@ -4,6 +4,7 @@ package itests import ( "bytes" "context" + "strings" "testing" "time" @@ -16,7 +17,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" - minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner" "github.com/filecoin-project/go-state-types/exitcode" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" @@ -183,13 +183,17 @@ func TestDeadlineToggling(t *testing.T) { cr, err := cid.Parse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz") require.NoError(t, err) - params := &minertypes.SectorPreCommitInfo{ - Expiration: 2880 * 300, - SectorNumber: 22, - SealProof: kit.TestSpt, + params := &miner.PreCommitSectorBatchParams2{ + Sectors: []miner.SectorPreCommitInfo{ + { + Expiration: 2880 * 300, + SectorNumber: 22, + SealProof: kit.TestSpt, - SealedCID: cr, - SealRandEpoch: head.Height() - 200, + SealedCID: cr, + SealRandEpoch: head.Height() - 200, + }, + }, } enc := new(bytes.Buffer) @@ -199,7 +203,7 @@ func TestDeadlineToggling(t *testing.T) { To: maddrE, From: defaultFrom, Value: types.FromFil(1), - Method: builtin.MethodsMiner.PreCommitSector, + Method: builtin.MethodsMiner.PreCommitSectorBatch2, Params: enc.Bytes(), }, nil) require.NoError(t, err) @@ -286,14 +290,18 @@ func TestDeadlineToggling(t *testing.T) { sp, aerr := actors.SerializeParams(terminateSectorParams) require.NoError(t, aerr) - smsg, err := client.MpoolPushMessage(ctx, &types.Message{ - From: defaultFrom, - To: maddrD, - Method: builtin.MethodsMiner.TerminateSectors, + var smsg *types.SignedMessage + require.Eventually(t, func() bool { + smsg, err = client.MpoolPushMessage(ctx, &types.Message{ + From: defaultFrom, + To: maddrD, + Method: builtin.MethodsMiner.TerminateSectors, - Value: big.Zero(), - Params: sp, - }, nil) + Value: big.Zero(), + Params: sp, + }, nil) + return err == nil || !strings.Contains(err.Error(), "cannot terminate sectors in immutable deadline") + }, 60*time.Second, 100*time.Millisecond) require.NoError(t, err) t.Log("sent termination message:", smsg.Cid()) diff --git a/itests/direct_data_onboard_test.go b/itests/direct_data_onboard_test.go new file mode 100644 index 000000000..90dde1207 --- /dev/null +++ b/itests/direct_data_onboard_test.go @@ -0,0 +1,347 @@ +package itests + +import ( + "bytes" + "context" + "crypto/rand" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-commp-utils/nonffi" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + market2 "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + minertypes "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/lib/must" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/storage/pipeline/piece" +) + +func TestActors13Migration(t *testing.T) { + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + client, _, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.UpgradeSchedule(stmgr.Upgrade{ + Network: network.Version21, + Height: -1, + }, stmgr.Upgrade{ + Network: network.Version22, + Height: 10, + Migration: filcns.UpgradeActorsV13, + })) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + // mine until 15 + client.WaitTillChain(ctx, kit.HeightAtLeast(15)) +} + +func TestOnboardRawPiece(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC()) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + pieceSize := abi.PaddedPieceSize(2048).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: nil, + DealID: 0, + DealProposal: nil, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, + }, + KeepUnsealed: true, + PieceActivationManifest: &minertypes.PieceActivationManifest{ + CID: dc.PieceCID, + Size: dc.Size, + VerifiedAllocationKey: nil, + Notify: nil, + }, + }) + require.NoError(t, err) + + // wait for sector to commit + + // wait for sector to commit and enter proving state + toCheck := map[abi.SectorNumber]struct{}{ + so.Sector: {}, + } + + miner.WaitSectorsProving(ctx, toCheck) + + si, err := miner.SectorsStatus(ctx, so.Sector, false) + require.NoError(t, err) + require.Equal(t, dc.PieceCID, *si.CommD) +} + +func makeMarketDealProposal(t *testing.T, client *kit.TestFullNode, miner *kit.TestMiner, data cid.Cid, ps abi.PaddedPieceSize, start, end abi.ChainEpoch) market2.ClientDealProposal { + ca, err := client.WalletDefaultAddress(context.Background()) + require.NoError(t, err) + + ma, err := miner.ActorAddress(context.Background()) + require.NoError(t, err) + + dp := market2.DealProposal{ + PieceCID: data, + PieceSize: ps, + VerifiedDeal: false, + Client: ca, + Provider: ma, + Label: must.One(market2.NewLabelFromString("wat")), + StartEpoch: start, + EndEpoch: end, + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: abi.TokenAmount{}, // below + ClientCollateral: big.Zero(), + } + + cb, err := client.StateDealProviderCollateralBounds(context.Background(), dp.PieceSize, dp.VerifiedDeal, types.EmptyTSK) + require.NoError(t, err) + dp.ProviderCollateral = big.Div(big.Mul(cb.Min, big.NewInt(2)), big.NewInt(2)) + + buf, err := cborutil.Dump(&dp) + require.NoError(t, err) + sig, err := client.WalletSign(context.Background(), ca, buf) + require.NoError(t, err) + + return market2.ClientDealProposal{ + Proposal: dp, + ClientSignature: *sig, + } + +} + +func TestOnboardMixedMarketDDO(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) { + sc.RequireActivationSuccess = true + sc.RequireNotificationSuccess = true + })) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + var pieces []abi.PieceInfo + var dealID abi.DealID + + { + // market piece + pieceSize := abi.PaddedPieceSize(2048 / 2).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + pieces = append(pieces, dc) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + // PSD + + psdParams := market2.PublishStorageDealsParams{ + Deals: []market2.ClientDealProposal{ + makeMarketDealProposal(t, client, miner, dc.PieceCID, pieceSize.Padded(), head.Height()+2880*2, head.Height()+2880*400), + }, + } + + psdMsg := &types.Message{ + To: market.Address, + From: mi.Worker, + + Method: market.Methods.PublishStorageDeals, + Params: must.One(cborutil.Dump(&psdParams)), + } + + smsg, err := client.MpoolPushMessage(ctx, psdMsg, nil) + require.NoError(t, err) + + r, err := client.StateWaitMsg(ctx, smsg.Cid(), 1, stmgr.LookbackNoLimit, true) + require.NoError(t, err) + + require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) + + nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK) + require.NoError(t, err) + + res, err := market.DecodePublishStorageDealsReturn(r.Receipt.Return, nv) + require.NoError(t, err) + dealID = must.One(res.DealIDs())[0] + + mcid := smsg.Cid() + + so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: &mcid, + DealID: dealID, + DealProposal: &psdParams.Deals[0].Proposal, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, + }, + PieceActivationManifest: nil, + KeepUnsealed: true, + }) + require.NoError(t, err) + + require.Equal(t, abi.PaddedPieceSize(0), so.Offset) + require.Equal(t, abi.SectorNumber(2), so.Sector) + } + + { + // raw ddo piece + + pieceSize := abi.PaddedPieceSize(2048 / 2).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + pieces = append(pieces, dc) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: nil, + DealID: 0, + DealProposal: nil, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, + }, + KeepUnsealed: false, + PieceActivationManifest: &minertypes.PieceActivationManifest{ + CID: dc.PieceCID, + Size: dc.Size, + VerifiedAllocationKey: nil, + Notify: nil, + }, + }) + require.NoError(t, err) + + require.Equal(t, abi.PaddedPieceSize(1024), so.Offset) + require.Equal(t, abi.SectorNumber(2), so.Sector) + } + + toCheck := map[abi.SectorNumber]struct{}{ + 2: {}, + } + + miner.WaitSectorsProving(ctx, toCheck) + + expectCommD, err := nonffi.GenerateUnsealedCID(abi.RegisteredSealProof_StackedDrg2KiBV1_1, pieces) + require.NoError(t, err) + + si, err := miner.SectorsStatus(ctx, 2, false) + require.NoError(t, err) + require.Equal(t, expectCommD, *si.CommD) + + ds, err := client.StateMarketStorageDeal(ctx, dealID, types.EmptyTSK) + require.NoError(t, err) + + require.NotEqual(t, -1, ds.State.SectorStartEpoch) +} + +func TestOnboardRawPieceSnap(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) { + sc.PreferNewSectorsForDeals = false + sc.MakeNewSectorForDeals = false + sc.MakeCCSectorsAvailable = true + sc.AggregateCommits = false + })) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + miner.PledgeSectors(ctx, 1, 0, nil) + sl, err := miner.SectorsListNonGenesis(ctx) + require.NoError(t, err) + require.Len(t, sl, 1, "expected 1 sector") + + snum := sl[0] + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + client.WaitForSectorActive(ctx, t, snum, maddr) + + pieceSize := abi.PaddedPieceSize(2048).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: nil, + DealID: 0, + DealProposal: nil, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, // todo set so that it works with the sector + }, + KeepUnsealed: false, + PieceActivationManifest: &minertypes.PieceActivationManifest{ + CID: dc.PieceCID, + Size: dc.Size, + VerifiedAllocationKey: nil, + Notify: nil, + }, + }) + require.NoError(t, err) + + // wait for sector to commit + + // wait for sector to commit and enter proving state + toCheck := map[abi.SectorNumber]struct{}{ + so.Sector: {}, + } + + miner.WaitSectorsProving(ctx, toCheck) +} diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index 3c83ba896..ee17df237 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -1061,14 +1061,14 @@ func importPreSealMeta(ctx context.Context, meta genesis.Miner, mds dtypes.Metad info := &pipeline.SectorInfo{ State: pipeline.Proving, SectorNumber: sector.SectorID, - Pieces: []api.SectorPiece{ - { + Pieces: []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(meta.SectorSize), PieceCID: commD, }, DealInfo: nil, // todo: likely possible to get, but not really that useful - }, + }), }, CommD: &commD, CommR: &commR, diff --git a/itests/kit/log.go b/itests/kit/log.go index 0c66427f9..2cb597095 100644 --- a/itests/kit/log.go +++ b/itests/kit/log.go @@ -23,6 +23,7 @@ func QuietMiningLogs() { _ = logging.SetLogLevel("rpc", "ERROR") _ = logging.SetLogLevel("consensus-common", "ERROR") _ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR") + _ = logging.SetLogLevel("consensus-common", "WARN") } func QuietAllLogsExcept(names ...string) { diff --git a/itests/pending_deal_allocation_test.go b/itests/pending_deal_allocation_test.go index c1e0531cf..60b755ac0 100644 --- a/itests/pending_deal_allocation_test.go +++ b/itests/pending_deal_allocation_test.go @@ -180,10 +180,6 @@ func TestGetAllocationForPendingDeal(t *testing.T) { dealIds, err := ret.DealIDs() require.NoError(t, err) - dealInfo, err := api.StateMarketStorageDeal(ctx, dealIds[0], types.EmptyTSK) - require.NoError(t, err) - require.Equal(t, verifregtypes.AllocationId(0), dealInfo.State.VerifiedClaim) // Allocation in State should not be set yet, because it's in the allocation map - allocation, err := api.StateGetAllocationForPendingDeal(ctx, dealIds[0], types.EmptyTSK) require.NoError(t, err) require.Equal(t, dealProposal.PieceCID, allocation.Data) diff --git a/lib/result/result.go b/lib/result/result.go index 56a9ffab7..4f3a12ee8 100644 --- a/lib/result/result.go +++ b/lib/result/result.go @@ -1,5 +1,7 @@ package result +import "encoding/json" + // Result is a small wrapper type encapsulating Value/Error tuples, mostly for // use when sending values across channels // NOTE: Avoid adding any functionality to this, any "nice" things added here will @@ -39,3 +41,13 @@ func (r Result[T]) Assert(noErrFn func(err error, msgAndArgs ...interface{})) T return r.Value } + +// MarshalJSON implements the json.Marshaler interface, marshalling string error correctly +// this method makes the display in log.Infow nicer +func (r Result[T]) MarshalJSON() ([]byte, error) { + if r.Error != nil { + return json.Marshal(map[string]string{"Error": r.Error.Error()}) + } + + return json.Marshal(map[string]interface{}{"Value": r.Value}) +} diff --git a/markets/storageadapter/ondealsectorcommitted_test.go b/markets/storageadapter/ondealsectorcommitted_test.go index 1d7519ff9..e3d318780 100644 --- a/markets/storageadapter/ondealsectorcommitted_test.go +++ b/markets/storageadapter/ondealsectorcommitted_test.go @@ -55,21 +55,21 @@ func TestOnDealSectorPreCommitted(t *testing.T) { } unfinishedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: -1, LastUpdatedEpoch: 2, }, } activeDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } slashedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, SlashEpoch: 2, @@ -277,21 +277,21 @@ func TestOnDealSectorCommitted(t *testing.T) { } unfinishedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: -1, LastUpdatedEpoch: 2, }, } activeDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } slashedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, SlashEpoch: 2, diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index bdfce6f55..11742c879 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -32,6 +32,7 @@ import ( "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/helpers" pipeline "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sectorblocks" ) @@ -92,11 +93,11 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema return nil, xerrors.Errorf("deal.PublishCid can't be nil") } - sdInfo := api.PieceDealInfo{ + sdInfo := piece.PieceDealInfo{ DealID: deal.DealID, DealProposal: &deal.Proposal, PublishCid: deal.PublishCid, - DealSchedule: api.DealSchedule{ + DealSchedule: piece.DealSchedule{ StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch, EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch, }, diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 13ecb2706..ef58dc457 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -1550,6 +1550,30 @@ Submitting a smaller number of prove commits per epoch would reduce the possibil Comment: `UseSyntheticPoRep, when set to true, will reduce the amount of cache data held on disk after the completion of PreCommit 2 to 11GiB.`, }, + { + Name: "RequireActivationSuccess", + Type: "bool", + + Comment: `Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3).`, + }, + { + Name: "RequireActivationSuccessUpdate", + Type: "bool", + + Comment: `Whether to abort if any piece activation notification returns a non-zero exit code (newly sealed sectors, only with ProveCommitSectors3).`, + }, + { + Name: "RequireNotificationSuccess", + Type: "bool", + + Comment: `Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3).`, + }, + { + Name: "RequireNotificationSuccessUpdate", + Type: "bool", + + Comment: `Whether to abort if any piece activation notification returns a non-zero exit code (updating sectors, only with ProveReplicaUpdates3).`, + }, }, "Splitstore": { { diff --git a/node/config/types.go b/node/config/types.go index 2152e0795..4e78b109f 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -487,6 +487,15 @@ type SealingConfig struct { // UseSyntheticPoRep, when set to true, will reduce the amount of cache data held on disk after the completion of PreCommit 2 to 11GiB. UseSyntheticPoRep bool + + // Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3). + RequireActivationSuccess bool + // Whether to abort if any piece activation notification returns a non-zero exit code (newly sealed sectors, only with ProveCommitSectors3). + RequireActivationSuccessUpdate bool + // Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3). + RequireNotificationSuccess bool + // Whether to abort if any piece activation notification returns a non-zero exit code (updating sectors, only with ProveReplicaUpdates3). + RequireNotificationSuccessUpdate bool } type SealerConfig struct { diff --git a/node/hello/cbor_gen.go b/node/hello/cbor_gen.go index 78e950f6f..dd36110d1 100644 --- a/node/hello/cbor_gen.go +++ b/node/hello/cbor_gen.go @@ -136,9 +136,9 @@ func (t *HelloMessage) UnmarshalCBOR(r io.Reader) (err error) { t.HeaviestTipSet[i] = c } + } } - // t.HeaviestTipSetHeight (abi.ChainEpoch) (int64) { maj, extra, err := cr.ReadHeader() diff --git a/node/impl/full/state.go b/node/impl/full/state.go index 03b6ef6cc..59946c5ec 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -762,7 +762,7 @@ func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (m } out[strconv.FormatInt(int64(dealID), 10)] = &api.MarketDeal{ Proposal: d, - State: *s, + State: api.MakeDealState(s), } return nil }); err != nil { @@ -779,18 +779,27 @@ func (m *StateModule) StateMarketStorageDeal(ctx context.Context, dealId abi.Dea return stmgr.GetStorageDeal(ctx, m.StateManager, dealId, ts) } -func (a *StateAPI) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifreg.Allocation, error) { +func (a *StateAPI) StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) { ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { - return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + return verifreg.NoAllocationID, xerrors.Errorf("loading tipset %s: %w", tsk, err) } st, err := a.StateManager.GetMarketState(ctx, ts) if err != nil { - return nil, err + return verifreg.NoAllocationID, err } allocationId, err := st.GetAllocationIdForPendingDeal(dealId) + if err != nil { + return verifreg.NoAllocationID, err + } + + return allocationId, nil +} + +func (a *StateAPI) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifreg.Allocation, error) { + allocationId, err := a.StateGetAllocationIdForPendingDeal(ctx, dealId, tsk) if err != nil { return nil, err } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 2ce42c327..90248a355 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -54,6 +54,7 @@ import ( "github.com/filecoin-project/lotus/storage/ctladdr" "github.com/filecoin-project/lotus/storage/paths" sealing "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/fsutil" @@ -243,7 +244,7 @@ func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumb return sInfo, nil } -func (sm *StorageMinerAPI) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) { +func (sm *StorageMinerAPI) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d piece.PieceDealInfo) (api.SectorOffset, error) { so, err := sm.Miner.SectorAddPieceToAny(ctx, size, r, d) if err != nil { // jsonrpc doesn't support returning values with errors, make sure we never do that @@ -506,7 +507,7 @@ func (sm *StorageMinerAPI) ComputeWindowPoSt(ctx context.Context, dlIdx uint64, } func (sm *StorageMinerAPI) ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) { - return sm.StorageMgr.DataCid(ctx, pieceSize, pieceData) + return sm.IStorageMgr.DataCid(ctx, pieceSize, pieceData) } func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error { diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 0680029bf..e27a497bb 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -1017,6 +1017,11 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error TerminateBatchWait: config.Duration(cfg.TerminateBatchWait), MaxSectorProveCommitsSubmittedPerEpoch: cfg.MaxSectorProveCommitsSubmittedPerEpoch, UseSyntheticPoRep: cfg.UseSyntheticPoRep, + + RequireActivationSuccess: cfg.RequireActivationSuccess, + RequireActivationSuccessUpdate: cfg.RequireActivationSuccessUpdate, + RequireNotificationSuccess: cfg.RequireNotificationSuccess, + RequireNotificationSuccessUpdate: cfg.RequireNotificationSuccessUpdate, } c.SetSealingConfig(newCfg) }) @@ -1062,6 +1067,11 @@ func ToSealingConfig(dealmakingCfg config.DealmakingConfig, sealingCfg config.Se TerminateBatchMin: sealingCfg.TerminateBatchMin, TerminateBatchWait: time.Duration(sealingCfg.TerminateBatchWait), UseSyntheticPoRep: sealingCfg.UseSyntheticPoRep, + + RequireActivationSuccess: sealingCfg.RequireActivationSuccess, + RequireActivationSuccessUpdate: sealingCfg.RequireActivationSuccessUpdate, + RequireNotificationSuccess: sealingCfg.RequireNotificationSuccess, + RequireNotificationSuccessUpdate: sealingCfg.RequireNotificationSuccessUpdate, } } diff --git a/paychmgr/cbor_gen.go b/paychmgr/cbor_gen.go index f97c176a3..46a6aa6b3 100644 --- a/paychmgr/cbor_gen.go +++ b/paychmgr/cbor_gen.go @@ -53,7 +53,7 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Proof[:]); err != nil { + if _, err := cw.Write(t.Proof); err != nil { return err } @@ -148,9 +148,10 @@ func (t *VoucherInfo) UnmarshalCBOR(r io.Reader) (err error) { t.Proof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Proof[:]); err != nil { + if _, err := io.ReadFull(cr, t.Proof); err != nil { return err } + // t.Voucher (paych.SignedVoucher) (struct) case "Voucher": @@ -329,6 +330,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.ChannelID (string) (string) @@ -631,9 +633,9 @@ func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.ChannelID (string) (string) case "ChannelID": diff --git a/storage/pipeline/cbor_gen.go b/storage/pipeline/cbor_gen.go index c832f8a14..b9c9c2a55 100644 --- a/storage/pipeline/cbor_gen.go +++ b/storage/pipeline/cbor_gen.go @@ -14,7 +14,6 @@ import ( abi "github.com/filecoin-project/go-state-types/abi" - api "github.com/filecoin-project/lotus/api" storiface "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -58,6 +57,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.CommD (cid.Cid) (struct) @@ -124,7 +124,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Proof[:]); err != nil { + if _, err := cw.Write(t.Proof); err != nil { return err } @@ -151,7 +151,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - // t.Pieces ([]api.SectorPiece) (slice) + // t.Pieces ([]sealing.SafeSectorPiece) (slice) if len("Pieces") > cbg.MaxLength { return xerrors.Errorf("Value in field \"Pieces\" was too long") } @@ -174,6 +174,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Return (sealing.ReturnState) (string) @@ -222,7 +223,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - // t.CCPieces ([]api.SectorPiece) (slice) + // t.CCPieces ([]sealing.SafeSectorPiece) (slice) if len("CCPieces") > cbg.MaxLength { return xerrors.Errorf("Value in field \"CCPieces\" was too long") } @@ -245,6 +246,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.CCUpdate (bool) (bool) @@ -305,7 +307,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.SeedValue[:]); err != nil { + if _, err := cw.Write(t.SeedValue); err != nil { return err } @@ -373,7 +375,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.TicketValue[:]); err != nil { + if _, err := cw.Write(t.TicketValue); err != nil { return err } @@ -517,7 +519,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.PreCommit1Out[:]); err != nil { + if _, err := cw.Write(t.PreCommit1Out); err != nil { return err } @@ -741,7 +743,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.ReplicaUpdateProof[:]); err != nil { + if _, err := cw.Write(t.ReplicaUpdateProof); err != nil { return err } @@ -928,9 +930,9 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.CommD (cid.Cid) (struct) case "CommD": @@ -996,9 +998,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.Proof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Proof[:]); err != nil { + if _, err := io.ReadFull(cr, t.Proof); err != nil { return err } + // t.State (sealing.SectorState) (string) case "State": @@ -1010,7 +1013,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.State = SectorState(sval) } - // t.Pieces ([]api.SectorPiece) (slice) + // t.Pieces ([]sealing.SafeSectorPiece) (slice) case "Pieces": maj, extra, err = cr.ReadHeader() @@ -1027,7 +1030,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } if extra > 0 { - t.Pieces = make([]api.SectorPiece, extra) + t.Pieces = make([]SafeSectorPiece, extra) } for i := 0; i < int(extra); i++ { @@ -1046,9 +1049,9 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Return (sealing.ReturnState) (string) case "Return": @@ -1071,7 +1074,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.LastErr = string(sval) } - // t.CCPieces ([]api.SectorPiece) (slice) + // t.CCPieces ([]sealing.SafeSectorPiece) (slice) case "CCPieces": maj, extra, err = cr.ReadHeader() @@ -1088,7 +1091,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } if extra > 0 { - t.CCPieces = make([]api.SectorPiece, extra) + t.CCPieces = make([]SafeSectorPiece, extra) } for i := 0; i < int(extra); i++ { @@ -1107,9 +1110,9 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.CCUpdate (bool) (bool) case "CCUpdate": @@ -1173,9 +1176,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.SeedValue = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.SeedValue[:]); err != nil { + if _, err := io.ReadFull(cr, t.SeedValue); err != nil { return err } + // t.SectorType (abi.RegisteredSealProof) (int64) case "SectorType": { @@ -1247,9 +1251,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.TicketValue = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.TicketValue[:]); err != nil { + if _, err := io.ReadFull(cr, t.TicketValue); err != nil { return err } + // t.CreationTime (int64) (int64) case "CreationTime": { @@ -1397,9 +1402,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.PreCommit1Out = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.PreCommit1Out[:]); err != nil { + if _, err := io.ReadFull(cr, t.PreCommit1Out); err != nil { return err } + // t.FaultReportMsg (cid.Cid) (struct) case "FaultReportMsg": @@ -1621,9 +1627,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.ReplicaUpdateProof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.ReplicaUpdateProof[:]); err != nil { + if _, err := io.ReadFull(cr, t.ReplicaUpdateProof); err != nil { return err } + // t.RemoteDataFinalized (bool) (bool) case "RemoteDataFinalized": diff --git a/storage/pipeline/checks.go b/storage/pipeline/checks.go index ecd160231..1f21b9c63 100644 --- a/storage/pipeline/checks.go +++ b/storage/pipeline/checks.go @@ -4,6 +4,7 @@ import ( "bytes" "context" + "github.com/ipfs/go-cid" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -12,9 +13,9 @@ import ( "github.com/filecoin-project/go-state-types/crypto" prooftypes "github.com/filecoin-project/go-state-types/proof" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" ) // TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting @@ -41,7 +42,7 @@ type ErrCommitWaitFailed struct{ error } type ErrBadRU struct{ error } type ErrBadPR struct{ error } -func checkPieces(ctx context.Context, maddr address.Address, sn abi.SectorNumber, pieces []api.SectorPiece, api SealingAPI, mustHaveDeals bool) error { +func checkPieces(ctx context.Context, maddr address.Address, sn abi.SectorNumber, pieces []SafeSectorPiece, api SealingAPI, mustHaveDeals bool) error { ts, err := api.ChainHead(ctx) if err != nil { return &ErrApi{xerrors.Errorf("getting chain head: %w", err)} @@ -51,43 +52,84 @@ func checkPieces(ctx context.Context, maddr address.Address, sn abi.SectorNumber var offset abi.PaddedPieceSize for i, p := range pieces { + p, i := p, i + // check that the piece is correctly aligned - if offset%p.Piece.Size != 0 { - return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d is not aligned: size=%xh offset=%xh off-by=%xh", sn, i, p.Piece.Size, offset, offset%p.Piece.Size)} + if offset%p.Piece().Size != 0 { + return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d is not aligned: size=%xh offset=%xh off-by=%xh", sn, i, p.Piece().Size, offset, offset%p.Piece().Size)} } - offset += p.Piece.Size + offset += p.Piece().Size - // if no deal is associated with the piece, ensure that we added it as - // filler (i.e. ensure that it has a zero PieceCID) - if p.DealInfo == nil { - exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded()) - if !p.Piece.PieceCID.Equals(exp) { - return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sn, i, p.Piece.PieceCID)} - } - continue - } + err := p.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(pi UniversalPieceInfo) error { + // if no deal is associated with the piece, ensure that we added it as + // filler (i.e. ensure that it has a zero PieceCID) - dealCount++ + exp := zerocomm.ZeroPieceCommitment(p.Piece().Size.Unpadded()) + if !p.Piece().PieceCID.Equals(exp) { + return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sn, i, p.Piece().PieceCID)} + } - deal, err := api.StateMarketStorageDeal(ctx, p.DealInfo.DealID, ts.Key()) + return nil + }, + BuiltinMarketHandler: func(pi UniversalPieceInfo) error { + dealCount++ + + deal, err := api.StateMarketStorageDeal(ctx, p.Impl().DealID, ts.Key()) + if err != nil { + return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.Impl().DealID, i, err)} + } + + if deal.Proposal.Provider != maddr { + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(pieces), sn, p.Impl().DealID, deal.Proposal.Provider, maddr)} + } + + if deal.Proposal.PieceCID != p.Piece().PieceCID { + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(pieces), sn, p.Impl().DealID, p.Impl().DealProposal.PieceCID, deal.Proposal.PieceCID)} + } + + if p.Piece().Size != deal.Proposal.PieceSize { + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(pieces), sn, p.Impl().DealID, p.Piece().Size, deal.Proposal.PieceSize)} + } + + if ts.Height() >= deal.Proposal.StartEpoch { + return &ErrExpiredDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(pieces), sn, p.Impl().DealID, deal.Proposal.StartEpoch, ts.Height())} + } + + return nil + }, + DDOHandler: func(pi UniversalPieceInfo) error { + dealCount++ + + // try to get allocation to see if that still works + all, err := pi.GetAllocation(ctx, api, ts.Key()) + if err != nil { + return xerrors.Errorf("getting deal %d allocation: %w", p.Impl().DealID, err) + } + if all != nil { + mid, err := address.IDFromAddress(maddr) + if err != nil { + return xerrors.Errorf("getting miner id: %w", err) + } + + if all.Provider != abi.ActorID(mid) { + return xerrors.Errorf("allocation provider doesn't match miner") + } + + if ts.Height() >= all.Expiration { + return &ErrExpiredDeals{xerrors.Errorf("piece allocation %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(pieces), sn, p.Impl().DealID, all.Expiration, ts.Height())} + } + + if all.Size < p.Piece().Size { + return &ErrInvalidDeals{xerrors.Errorf("piece allocation %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(pieces), sn, p.Impl().DealID, p.Piece().Size, all.Size)} + } + } + + return nil + }, + }) if err != nil { - return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)} - } - - if deal.Proposal.Provider != maddr { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(pieces), sn, p.DealInfo.DealID, deal.Proposal.Provider, maddr)} - } - - if deal.Proposal.PieceCID != p.Piece.PieceCID { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(pieces), sn, p.DealInfo.DealID, p.Piece.PieceCID, deal.Proposal.PieceCID)} - } - - if p.Piece.Size != deal.Proposal.PieceSize { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(pieces), sn, p.DealInfo.DealID, p.Piece.Size, deal.Proposal.PieceSize)} - } - - if ts.Height() >= deal.Proposal.StartEpoch { - return &ErrExpiredDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(pieces), sn, p.DealInfo.DealID, deal.Proposal.StartEpoch, ts.Height())} + return err } } @@ -106,8 +148,8 @@ func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, t return err } - if si.hasDeals() { - commD, err := api.StateComputeDataCID(ctx, maddr, si.SectorType, si.dealIDs(), tsk) + if si.hasData() { + commD, err := computeUnsealedCIDFromPieces(si) if err != nil { return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)} } @@ -223,8 +265,7 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte, } // check that sector info is good after running a replica update -func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, tsk types.TipSetKey, api SealingAPI) error { - +func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) error { if err := checkPieces(ctx, maddr, si.SectorNumber, si.Pieces, api, true); err != nil { return err } @@ -232,9 +273,9 @@ func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInf return xerrors.Errorf("replica update on sector not marked for update") } - commD, err := api.StateComputeDataCID(ctx, maddr, si.SectorType, si.dealIDs(), tsk) + commD, err := computeUnsealedCIDFromPieces(si) if err != nil { - return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)} + return xerrors.Errorf("computing unsealed CID from pieces: %w", err) } if si.UpdateUnsealed == nil { @@ -253,5 +294,9 @@ func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInf } return nil - +} + +func computeUnsealedCIDFromPieces(si SectorInfo) (cid.Cid, error) { + pcs := si.pieceInfos() + return ffiwrapper.GenerateUnsealedCID(si.SectorType, pcs) } diff --git a/storage/pipeline/commit_batch.go b/storage/pipeline/commit_batch.go index 754f31763..cd770c265 100644 --- a/storage/pipeline/commit_batch.go +++ b/storage/pipeline/commit_batch.go @@ -23,6 +23,7 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -57,6 +58,9 @@ type AggregateInput struct { Spt abi.RegisteredSealProof Info proof.AggregateSealVerifyInfo Proof []byte + + ActivationManifest miner.SectorActivationManifest + DealIDPrecommit bool } type CommitBatcher struct { @@ -209,13 +213,18 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, return nil, nil } - var res []sealiface.CommitBatchRes + var res, resV1 []sealiface.CommitBatchRes ts, err := b.api.ChainHead(b.mctx) if err != nil { return nil, err } + nv, err := b.api.StateNetworkVersion(b.mctx, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("getting network version: %s", err) + } + blackedOut := func() bool { const nv16BlackoutWindow = abi.ChainEpoch(20) // a magik number if ts.Height() <= build.UpgradeSkyrHeight && build.UpgradeSkyrHeight-ts.Height() < nv16BlackoutWindow { @@ -232,25 +241,67 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, } } - if individual { - res, err = b.processIndividually(cfg) - } else { + if nv >= MinDDONetworkVersion { + // After nv21, we have a new ProveCommitSectors2 method, which supports + // batching without aggregation, but it doesn't support onboarding + // sectors which were precommitted with DealIDs in the precommit message. + // We prefer it for all other sectors, so first we use the new processBatchV2 + var sectors []abi.SectorNumber for sn := range b.todo { sectors = append(sectors, sn) } - res, err = b.processBatch(cfg, sectors) + res, err = b.processBatchV2(cfg, sectors, nv, !individual) + if err != nil { + err = xerrors.Errorf("processBatchV2: %w", err) + } + + // Mark sectors as done + for _, r := range res { + if err != nil { + r.Error = err.Error() + } + + for _, sn := range r.Sectors { + for _, ch := range b.waiting[sn] { + ch <- r // buffered + } + + delete(b.waiting, sn) + delete(b.todo, sn) + delete(b.cutoffs, sn) + } + } } if err != nil { - log.Warnf("CommitBatcher maybeStartBatch individual:%v processBatch %v", individual, err) + log.Warnf("CommitBatcher maybeStartBatch processBatch-ddo %v", err) } if err != nil && len(res) == 0 { return nil, err } - for _, r := range res { + if individual { + resV1, err = b.processIndividually(cfg) + } else { + var sectors []abi.SectorNumber + for sn := range b.todo { + sectors = append(sectors, sn) + } + resV1, err = b.processBatchV1(cfg, sectors, nv) + } + + if err != nil { + log.Warnf("CommitBatcher maybeStartBatch individual:%v processBatch %v", individual, err) + } + + if err != nil && len(resV1) == 0 { + return nil, err + } + + // Mark the rest as processed + for _, r := range resV1 { if err != nil { r.Error = err.Error() } @@ -266,10 +317,171 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, } } + res = append(res, resV1...) + return res, nil } -func (b *CommitBatcher) processBatch(cfg sealiface.Config, sectors []abi.SectorNumber) ([]sealiface.CommitBatchRes, error) { +// processBatchV2 processes a batch of sectors after nv22. It will always send +// ProveCommitSectors3Params which may contain either individual proofs or an +// aggregate proof depending on SP condition and network conditions. +func (b *CommitBatcher) processBatchV2(cfg sealiface.Config, sectors []abi.SectorNumber, nv network.Version, aggregate bool) ([]sealiface.CommitBatchRes, error) { + ts, err := b.api.ChainHead(b.mctx) + if err != nil { + return nil, err + } + + total := len(sectors) + + res := sealiface.CommitBatchRes{ + FailedSectors: map[abi.SectorNumber]string{}, + } + + params := miner.ProveCommitSectors3Params{ + RequireActivationSuccess: cfg.RequireActivationSuccess, + RequireNotificationSuccess: cfg.RequireNotificationSuccess, + } + + infos := make([]proof.AggregateSealVerifyInfo, 0, total) + collateral := big.Zero() + + for _, sector := range sectors { + if b.todo[sector].DealIDPrecommit { + // can't process sectors precommitted with deal IDs with ProveCommitSectors2 + continue + } + + res.Sectors = append(res.Sectors, sector) + + sc, err := b.getSectorCollateral(sector, ts.Key()) + if err != nil { + res.FailedSectors[sector] = err.Error() + continue + } + + collateral = big.Add(collateral, sc) + + params.SectorActivations = append(params.SectorActivations, b.todo[sector].ActivationManifest) + params.SectorProofs = append(params.SectorProofs, b.todo[sector].Proof) + + infos = append(infos, b.todo[sector].Info) + } + + if len(infos) == 0 { + return nil, nil + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Number < infos[j].Number + }) + + proofs := make([][]byte, 0, total) + for _, info := range infos { + proofs = append(proofs, b.todo[info.Number].Proof) + } + + needFunds := collateral + + arp, err := b.aggregateProofType(nv) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting aggregate proof type: %w", err) + } + params.AggregateProofType = arp + + if aggregate { + params.SectorProofs = nil // can't be set when aggregating + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting miner id: %w", err) + } + + params.AggregateProof, err = b.prover.AggregateSealProofs(proof.AggregateSealVerifyProofAndInfos{ + Miner: abi.ActorID(mid), + SealProof: b.todo[infos[0].Number].Spt, + AggregateProof: arp, + Infos: infos, + }, proofs) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("aggregating proofs: %w", err) + } + + aggFeeRaw, err := policy.AggregateProveCommitNetworkFee(nv, len(infos), ts.MinTicketBlock().ParentBaseFee) + if err != nil { + res.Error = err.Error() + log.Errorf("getting aggregate commit network fee: %s", err) + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting aggregate commit network fee: %s", err) + } + + aggFee := big.Div(big.Mul(aggFeeRaw, aggFeeNum), aggFeeDen) + + needFunds = big.Add(collateral, aggFee) + } + + needFunds, err = collateralSendAmount(b.mctx, b.api, b.maddr, cfg, needFunds) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, err + } + + maxFee := b.feeCfg.MaxCommitBatchGasFee.FeeForSectors(len(infos)) + goodFunds := big.Add(maxFee, needFunds) + + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, types.EmptyTSK) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err) + } + + from, _, err := b.addrSel.AddressFor(b.mctx, b.api, mi, api.CommitAddr, goodFunds, needFunds) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err) + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't serialize ProveCommitSectors2Params: %w", err) + } + + _, err = simulateMsgGas(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.ProveCommitSectors3, needFunds, maxFee, enc.Bytes()) + + if err != nil && (!api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) || len(sectors) < miner.MinAggregatedSectors*2) { + log.Errorf("simulating CommitBatch message failed: %s", err) + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("simulating CommitBatch message failed: %w", err) + } + + msgTooLarge := len(enc.Bytes()) > (messagepool.MaxMessageSize - 128) + + // If we're out of gas, split the batch in half and evaluate again + if api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) || msgTooLarge { + log.Warnf("CommitAggregate message ran out of gas or is too large, splitting batch in half and trying again (sectors: %d, params: %d)", len(sectors), len(enc.Bytes())) + mid := len(sectors) / 2 + ret0, _ := b.processBatchV2(cfg, sectors[:mid], nv, aggregate) + ret1, _ := b.processBatchV2(cfg, sectors[mid:], nv, aggregate) + + return append(ret0, ret1...), nil + } + + mcid, err := sendMsg(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.ProveCommitSectors3, needFunds, maxFee, enc.Bytes()) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("sending message failed (params size: %d, sectors: %d, agg: %t): %w", len(enc.Bytes()), len(sectors), aggregate, err) + } + + res.Msg = &mcid + + log.Infow("Sent ProveCommitSectors2 message", "cid", mcid, "from", from, "todo", total, "sectors", len(infos)) + + return []sealiface.CommitBatchRes{res}, nil +} + +// processBatchV1 processes a batch of sectors before nv22. It always sends out an aggregate message. +func (b *CommitBatcher) processBatchV1(cfg sealiface.Config, sectors []abi.SectorNumber, nv network.Version) ([]sealiface.CommitBatchRes, error) { ts, err := b.api.ChainHead(b.mctx) if err != nil { return nil, err @@ -322,13 +534,6 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config, sectors []abi.SectorN return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting miner id: %w", err) } - nv, err := b.api.StateNetworkVersion(b.mctx, ts.Key()) - if err != nil { - res.Error = err.Error() - log.Errorf("getting network version: %s", err) - return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting network version: %s", err) - } - arp, err := b.aggregateProofType(nv) if err != nil { res.Error = err.Error() @@ -396,8 +601,8 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config, sectors []abi.SectorN if api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) { log.Warnf("CommitAggregate message ran out of gas, splitting batch in half and trying again (sectors: %d)", len(sectors)) mid := len(sectors) / 2 - ret0, _ := b.processBatch(cfg, sectors[:mid]) - ret1, _ := b.processBatch(cfg, sectors[mid:]) + ret0, _ := b.processBatchV1(cfg, sectors[:mid], nv) + ret1, _ := b.processBatchV1(cfg, sectors[mid:], nv) return append(ret0, ret1...), nil } @@ -484,6 +689,10 @@ func (b *CommitBatcher) processIndividually(cfg sealiface.Config) ([]sealiface.C } func (b *CommitBatcher) processSingle(cfg sealiface.Config, mi api.MinerInfo, avail *abi.TokenAmount, sn abi.SectorNumber, info AggregateInput, tsk types.TipSetKey) (cid.Cid, error) { + return b.processSingleV1(cfg, mi, avail, sn, info, tsk) +} + +func (b *CommitBatcher) processSingleV1(cfg sealiface.Config, mi api.MinerInfo, avail *abi.TokenAmount, sn abi.SectorNumber, info AggregateInput, tsk types.TipSetKey) (cid.Cid, error) { enc := new(bytes.Buffer) params := &miner.ProveCommitSectorParams{ SectorNumber: sn, @@ -646,11 +855,15 @@ func (b *CommitBatcher) getCommitCutoff(si SectorInfo) (time.Time, error) { cutoffEpoch := pci.PreCommitEpoch + mpcd for _, p := range si.Pieces { - if p.DealInfo == nil { + if !p.HasDealInfo() { continue } - startEpoch := p.DealInfo.DealSchedule.StartEpoch + startEpoch, err := p.StartEpoch() + if err != nil { + log.Errorf("getting deal start epoch: %s", err) + return time.Now(), err + } if startEpoch < cutoffEpoch { cutoffEpoch = startEpoch } diff --git a/storage/pipeline/commit_batch_test.go b/storage/pipeline/commit_batch_test.go deleted file mode 100644 index 5ae2f171a..000000000 --- a/storage/pipeline/commit_batch_test.go +++ /dev/null @@ -1,498 +0,0 @@ -// stm: #unit -package sealing_test - -import ( - "bytes" - "context" - "sort" - "sync" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/network" - prooftypes "github.com/filecoin-project/go-state-types/proof" - miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/ctladdr" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" - "github.com/filecoin-project/lotus/storage/pipeline/mocks" - "github.com/filecoin-project/lotus/storage/pipeline/sealiface" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -func TestCommitBatcher(t *testing.T) { - //stm: @CHAIN_STATE_MINER_PRE_COM_INFO_001, @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 - t0123, err := address.NewFromString("t0123") - require.NoError(t, err) - - ctx := context.Background() - - as := asel(func(ctx context.Context, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - return t0123, big.Zero(), nil - }) - - maxBatch := miner5.MaxAggregatedSectors - minBatch := miner5.MinAggregatedSectors - - cfg := func() (sealiface.Config, error) { - return sealiface.Config{ - MaxWaitDealsSectors: 2, - MaxSealingSectors: 0, - MaxSealingSectorsForDeals: 0, - WaitDealsDelay: time.Hour * 6, - AlwaysKeepUnsealedCopy: true, - - MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, - PreCommitBatchWait: 24 * time.Hour, - PreCommitBatchSlack: 3 * time.Hour, - - AggregateCommits: true, - MinCommitBatch: minBatch, - MaxCommitBatch: maxBatch, - CommitBatchWait: 24 * time.Hour, - CommitBatchSlack: 1 * time.Hour, - - AggregateAboveBaseFee: types.BigMul(types.PicoFil, types.NewInt(150)), // 0.15 nFIL - BatchPreCommitAboveBaseFee: types.BigMul(types.PicoFil, types.NewInt(150)), // 0.15 nFIL - - TerminateBatchMin: 1, - TerminateBatchMax: 100, - TerminateBatchWait: 5 * time.Minute, - }, nil - } - - type promise func(t *testing.T) - type action func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise - - actions := func(as ...action) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - var ps []promise - for _, a := range as { - p := a(t, s, pcb) - if p != nil { - ps = append(ps, p) - } - } - - if len(ps) > 0 { - return func(t *testing.T) { - for _, p := range ps { - p(t) - } - } - } - return nil - } - } - - addSector := func(sn abi.SectorNumber, aboveBalancer bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - var pcres sealiface.CommitBatchRes - var pcerr error - done := sync.Mutex{} - done.Lock() - - si := pipeline.SectorInfo{ - SectorNumber: sn, - } - - basefee := types.PicoFil - if aboveBalancer { - basefee = types.NanoFil - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil) - s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&minertypes.SectorPreCommitOnChainInfo{ - PreCommitDeposit: big.Zero(), - }, nil) - - go func() { - defer done.Unlock() - pcres, pcerr = pcb.AddCommit(ctx, si, pipeline.AggregateInput{ - Info: prooftypes.AggregateSealVerifyInfo{ - Number: sn, - }, - }) - }() - - return func(t *testing.T) { - done.Lock() - require.NoError(t, pcerr) - require.Empty(t, pcres.Error) - require.Contains(t, pcres.Sectors, si.SectorNumber) - } - } - } - - addSectors := func(sectors []abi.SectorNumber, aboveBalancer bool) action { - as := make([]action, len(sectors)) - for i, sector := range sectors { - as[i] = addSector(sector, aboveBalancer) - } - return actions(as...) - } - - waitPending := func(n int) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - require.Eventually(t, func() bool { - p, err := pcb.Pending(ctx) - require.NoError(t, err) - return len(p) == n - }, time.Second*5, 10*time.Millisecond) - - return nil - } - } - - //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001, @CHAIN_STATE_MINER_GET_COLLATERAL_001 - expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{Owner: t0123, Worker: t0123}, nil) - - ti := len(expect) - batch := false - if ti >= minBatch { - batch = true - ti = 1 - } - - basefee := types.PicoFil - if aboveBalancer { - basefee = types.NanoFil - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil) - /*if batch { - s.EXPECT().ChainBaseFee(gomock.Any(), gomock.Any()).Return(basefee, nil) - }*/ - - if !aboveBalancer { - batch = false - ti = len(expect) - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil) - - pciC := len(expect) - if failOnePCI { - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), abi.SectorNumber(1), gomock.Any()).Return(nil, nil).Times(1) // not found - pciC = len(expect) - 1 - if !batch { - ti-- - } - } - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&minertypes.SectorPreCommitOnChainInfo{ - PreCommitDeposit: big.Zero(), - }, nil).Times(pciC) - s.EXPECT().StateMinerInitialPledgeCollateral(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(big.Zero(), nil).Times(pciC) - - if batch { - s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.Message{GasLimit: 100000}, nil) - } - - s.EXPECT().MpoolPushMessage(gomock.Any(), funMatcher(func(i interface{}) bool { - b := i.(*types.Message) - if batch { - var params miner5.ProveCommitAggregateParams - require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b.Params))) - for _, number := range expect { - set, err := params.SectorNumbers.IsSet(uint64(number)) - require.NoError(t, err) - require.True(t, set) - } - } else { - var params miner5.ProveCommitSectorParams - require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b.Params))) - } - return true - }), gomock.Any()).Return(dummySmsg, nil).Times(ti) - return nil - } - } - - expectProcessBatch := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool, gasOverLimit bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{Owner: t0123, Worker: t0123}, nil) - - ti := len(expect) - batch := false - if ti >= minBatch { - batch = true - ti = 1 - } - - if !aboveBalancer { - batch = false - ti = len(expect) - } - - pciC := len(expect) - if failOnePCI { - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), abi.SectorNumber(1), gomock.Any()).Return(nil, nil).Times(1) // not found - pciC = len(expect) - 1 - if !batch { - ti-- - } - } - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&minertypes.SectorPreCommitOnChainInfo{ - PreCommitDeposit: big.Zero(), - }, nil).Times(pciC) - s.EXPECT().StateMinerInitialPledgeCollateral(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(big.Zero(), nil).Times(pciC) - - if batch { - s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version18, nil) - if gasOverLimit { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, &api.ErrOutOfGas{}) - } else { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.Message{GasLimit: 100000}, nil) - } - - } - return nil - } - } - - flush := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - _ = expectSend(expect, aboveBalancer, failOnePCI)(t, s, pcb) - - batch := len(expect) >= minBatch && aboveBalancer - - r, err := pcb.Flush(ctx) - require.NoError(t, err) - if batch { - require.Len(t, r, 1) - require.Empty(t, r[0].Error) - sort.Slice(r[0].Sectors, func(i, j int) bool { - return r[0].Sectors[i] < r[0].Sectors[j] - }) - require.Equal(t, expect, r[0].Sectors) - if !failOnePCI { - require.Len(t, r[0].FailedSectors, 0) - } else { - require.Len(t, r[0].FailedSectors, 1) - _, found := r[0].FailedSectors[1] - require.True(t, found) - } - } else { - require.Len(t, r, len(expect)) - for _, res := range r { - require.Len(t, res.Sectors, 1) - require.Empty(t, res.Error) - } - sort.Slice(r, func(i, j int) bool { - return r[i].Sectors[0] < r[j].Sectors[0] - }) - for i, res := range r { - require.Equal(t, abi.SectorNumber(i), res.Sectors[0]) - if failOnePCI && res.Sectors[0] == 1 { - require.Len(t, res.FailedSectors, 1) - _, found := res.FailedSectors[1] - require.True(t, found) - } else { - require.Empty(t, res.FailedSectors) - } - } - } - - return nil - } - } - - getSectors := func(n int) []abi.SectorNumber { - out := make([]abi.SectorNumber, n) - for i := range out { - out[i] = abi.SectorNumber(i) - } - return out - } - - tcs := map[string]struct { - actions []action - }{ - "addSingle-aboveBalancer": { - actions: []action{ - addSector(0, true), - waitPending(1), - flush([]abi.SectorNumber{0}, true, false), - }, - }, - "addTwo-aboveBalancer": { - actions: []action{ - addSectors(getSectors(2), true), - waitPending(2), - flush(getSectors(2), true, false), - }, - }, - "addAte-aboveBalancer": { - actions: []action{ - addSectors(getSectors(8), true), - waitPending(8), - flush(getSectors(8), true, false), - }, - }, - "addMax-aboveBalancer": { - actions: []action{ - expectSend(getSectors(maxBatch), true, false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addMax-aboveBalancer-gasAboveLimit": { - actions: []action{ - expectProcessBatch(getSectors(maxBatch), true, false, true), - expectSend(getSectors(maxBatch)[:maxBatch/2], true, false), - expectSend(getSectors(maxBatch)[maxBatch/2:], true, false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addSingle-belowBalancer": { - actions: []action{ - addSector(0, false), - waitPending(1), - flush([]abi.SectorNumber{0}, false, false), - }, - }, - "addTwo-belowBalancer": { - actions: []action{ - addSectors(getSectors(2), false), - waitPending(2), - flush(getSectors(2), false, false), - }, - }, - "addAte-belowBalancer": { - actions: []action{ - addSectors(getSectors(8), false), - waitPending(8), - flush(getSectors(8), false, false), - }, - }, - "addMax-belowBalancer": { - actions: []action{ - expectSend(getSectors(maxBatch), false, false), - addSectors(getSectors(maxBatch), false), - }, - }, - - "addAte-aboveBalancer-failOne": { - actions: []action{ - addSectors(getSectors(8), true), - waitPending(8), - flush(getSectors(8), true, true), - }, - }, - "addAte-belowBalancer-failOne": { - actions: []action{ - addSectors(getSectors(8), false), - waitPending(8), - flush(getSectors(8), false, true), - }, - }, - } - - for name, tc := range tcs { - tc := tc - - t.Run(name, func(t *testing.T) { - // create go mock controller here - mockCtrl := gomock.NewController(t) - // when test is done, assert expectations on all mock objects. - defer mockCtrl.Finish() - - // create them mocks - pcapi := mocks.NewMockCommitBatcherApi(mockCtrl) - - pcb := pipeline.NewCommitBatcher(ctx, t0123, pcapi, as, fc, cfg, &fakeProver{}) - - var promises []promise - - for _, a := range tc.actions { - p := a(t, pcapi, pcb) - if p != nil { - promises = append(promises, p) - } - } - - for _, p := range promises { - p(t) - } - - err := pcb.Stop(ctx) - require.NoError(t, err) - }) - } -} - -type fakeProver struct{} - -func (f fakeProver) AggregateSealProofs(aggregateInfo prooftypes.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { - return []byte("Trust me, I'm a proof"), nil -} - -var _ storiface.Prover = &fakeProver{} - -var dummyAddr = func() address.Address { - a, _ := address.NewFromString("t00") - return a -}() - -func makeBFTs(t *testing.T, basefee abi.TokenAmount, h abi.ChainEpoch) *types.TipSet { - dummyCid, _ := cid.Parse("bafkqaaa") - - var ts, err = types.NewTipSet([]*types.BlockHeader{ - { - Height: h, - Miner: dummyAddr, - - Parents: []cid.Cid{}, - - Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, - - ParentStateRoot: dummyCid, - Messages: dummyCid, - ParentMessageReceipts: dummyCid, - - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, - - ParentBaseFee: basefee, - }, - }) - if t != nil { - require.NoError(t, err) - } - - return ts -} - -func makeTs(t *testing.T, h abi.ChainEpoch) *types.TipSet { - return makeBFTs(t, big.NewInt(0), h) -} - -var dummySmsg = &types.SignedMessage{ - Message: types.Message{ - From: dummyAddr, - To: dummyAddr, - }, - Signature: crypto.Signature{Type: crypto.SigTypeBLS}, -} - -type asel func(ctx context.Context, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) - -func (s asel) AddressFor(ctx context.Context, _ ctladdr.NodeApi, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - return s(ctx, mi, use, goodFunds, minFunds) -} - -var _ pipeline.AddressSelector = asel(nil) diff --git a/storage/pipeline/currentdealinfo_test.go b/storage/pipeline/currentdealinfo_test.go index 21141a35d..1ea05dc35 100644 --- a/storage/pipeline/currentdealinfo_test.go +++ b/storage/pipeline/currentdealinfo_test.go @@ -80,21 +80,21 @@ func TestGetCurrentDealInfo(t *testing.T) { } successDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } earlierDeal := &api.MarketDeal{ Proposal: otherProposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } anotherDeal := &api.MarketDeal{ Proposal: anotherProposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, diff --git a/storage/pipeline/fsm_events.go b/storage/pipeline/fsm_events.go index a798a884b..94cd53e82 100644 --- a/storage/pipeline/fsm_events.go +++ b/storage/pipeline/fsm_events.go @@ -88,7 +88,7 @@ func (evt SectorAddPiece) apply(state *SectorInfo) { } type SectorPieceAdded struct { - NewPieces []api.SectorPiece + NewPieces []SafeSectorPiece } func (evt SectorPieceAdded) apply(state *SectorInfo) { @@ -114,9 +114,11 @@ type SectorPacked struct{ FillerPieces []abi.PieceInfo } func (evt SectorPacked) apply(state *SectorInfo) { for idx := range evt.FillerPieces { - state.Pieces = append(state.Pieces, api.SectorPiece{ - Piece: evt.FillerPieces[idx], - DealInfo: nil, // filler pieces don't have deals associated with them + state.Pieces = append(state.Pieces, SafeSectorPiece{ + real: api.SectorPiece{ + Piece: evt.FillerPieces[idx], + DealInfo: nil, // filler pieces don't have deals associated with them + }, }) } } @@ -419,7 +421,8 @@ type SectorUpdateDealIDs struct { func (evt SectorUpdateDealIDs) apply(state *SectorInfo) { for i, id := range evt.Updates { - state.Pieces[i].DealInfo.DealID = id + // NOTE: all update deals are builtin-market deals + state.Pieces[i].real.DealInfo.DealID = id } } diff --git a/storage/pipeline/input.go b/storage/pipeline/input.go index 6fdae03d9..2bc7c2f81 100644 --- a/storage/pipeline/input.go +++ b/storage/pipeline/input.go @@ -5,7 +5,6 @@ import ( "sort" "time" - "github.com/ipfs/go-cid" "go.uber.org/zap" "golang.org/x/xerrors" @@ -13,14 +12,15 @@ import ( "github.com/filecoin-project/go-padreader" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" @@ -32,10 +32,15 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e var used abi.UnpaddedPieceSize var lastDealEnd abi.ChainEpoch for _, piece := range sector.Pieces { - used += piece.Piece.Size.Unpadded() + used += piece.Piece().Size.Unpadded() - if piece.DealInfo != nil && piece.DealInfo.DealProposal.EndEpoch > lastDealEnd { - lastDealEnd = piece.DealInfo.DealProposal.EndEpoch + endEpoch, err := piece.EndEpoch() + if err != nil { + return xerrors.Errorf("piece.EndEpoch: %w", err) + } + + if piece.HasDealInfo() && endEpoch > lastDealEnd { + lastDealEnd = endEpoch } } @@ -65,9 +70,9 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e if _, has := m.openSectors[sid]; !has { m.openSectors[sid] = &openSector{ used: used, - maybeAccept: func(cid cid.Cid) error { + maybeAccept: func(pk piece.PieceKey) error { // todo check deal start deadline (configurable) - m.assignedPieces[sid] = append(m.assignedPieces[sid], cid) + m.assignedPieces[sid] = append(m.assignedPieces[sid], pk) return ctx.Send(SectorAddPiece{}) }, @@ -94,7 +99,7 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, used abi.UnpaddedPieceSize) (bool, error) { log := log.WithOptions(zap.Fields( zap.Uint64("sector", uint64(sector.SectorNumber)), - zap.Int("deals", len(sector.dealIDs())), + zap.Int("dataPieces", len(sector.nonPaddingPieceInfos())), )) now := time.Now() @@ -117,7 +122,7 @@ func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, return false, xerrors.Errorf("getting per-sector deal limit: %w", err) } - if len(sector.dealIDs()) >= maxDeals { + if len(sector.nonPaddingPieceInfos()) >= maxDeals { // can't accept more deals log.Infow("starting to seal deal sector", "trigger", "maxdeals") return true, ctx.Send(SectorStartPacking{}) @@ -146,13 +151,24 @@ func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, var dealSafeSealEpoch abi.ChainEpoch for _, piece := range sector.Pieces { - if piece.DealInfo == nil { + if !piece.HasDealInfo() { continue } - dealSafeSealEpoch = piece.DealInfo.DealProposal.StartEpoch - cfg.StartEpochSealingBuffer + startEpoch, err := piece.StartEpoch() + if err != nil { + log.Errorw("failed to get start epoch for deal", "piece", piece.String(), "error", err) + continue // not ideal, but skipping the check should break things less + } + + dealSafeSealEpoch = startEpoch - cfg.StartEpochSealingBuffer + + alloc, err := piece.GetAllocation(ctx.Context(), m.Api, types.EmptyTSK) + if err != nil { + log.Errorw("failed to get allocation for deal", "piece", piece.String(), "error", err) + continue // not ideal, but skipping the check should break things less + } - alloc, _ := m.Api.StateGetAllocationForPendingDeal(ctx.Context(), piece.DealInfo.DealID, types.EmptyTSK) // alloc is nil if this is not a verified deal in nv17 or later if alloc == nil { continue @@ -210,8 +226,8 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er var offset abi.UnpaddedPieceSize pieceSizes := make([]abi.UnpaddedPieceSize, len(sector.Pieces)) for i, p := range sector.Pieces { - pieceSizes[i] = p.Piece.Size.Unpadded() - offset += p.Piece.Size.Unpadded() + pieceSizes[i] = p.Piece().Size.Unpadded() + offset += p.Piece().Size.Unpadded() } maxDeals, err := getDealPerSectorLimit(ssize) @@ -227,7 +243,7 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er return xerrors.Errorf("piece %s assigned to sector %d not found", piece, sector.SectorNumber) } - if len(sector.dealIDs())+(i+1) > maxDeals { + if len(sector.nonPaddingPieceInfos())+(i+1) > maxDeals { // todo: this is rather unlikely to happen, but in case it does, return the deal to waiting queue instead of failing it deal.accepted(sector.SectorNumber, offset, xerrors.Errorf("too many deals assigned to sector %d, dropping deal", sector.SectorNumber)) continue @@ -263,8 +279,10 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er } pieceSizes = append(pieceSizes, p.Unpadded()) - res.NewPieces = append(res.NewPieces, api.SectorPiece{ - Piece: ppi, + res.NewPieces = append(res.NewPieces, SafeSectorPiece{ + api.SectorPiece{ + Piece: ppi, + }, }) } @@ -278,22 +296,26 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er deal.accepted(sector.SectorNumber, offset, err) return ctx.Send(SectorAddPieceFailed{err}) } - if !ppi.PieceCID.Equals(deal.deal.DealProposal.PieceCID) { - err = xerrors.Errorf("got unexpected piece CID: expected:%s, got:%s", deal.deal.DealProposal.PieceCID, ppi.PieceCID) + if !ppi.PieceCID.Equals(deal.deal.PieceCID()) { + err = xerrors.Errorf("got unexpected piece CID: expected:%s, got:%s", deal.deal.PieceCID(), ppi.PieceCID) deal.accepted(sector.SectorNumber, offset, err) return ctx.Send(SectorAddPieceFailed{err}) } - log.Infow("deal added to a sector", "deal", deal.deal.DealID, "sector", sector.SectorNumber, "piece", ppi.PieceCID) + log.Infow("deal added to a sector", "pieceID", deal.deal.String(), "sector", sector.SectorNumber, "piece", ppi.PieceCID) deal.accepted(sector.SectorNumber, offset, nil) offset += deal.size pieceSizes = append(pieceSizes, deal.size) - res.NewPieces = append(res.NewPieces, api.SectorPiece{ - Piece: ppi, - DealInfo: &deal.deal, + dinfo := deal.deal.Impl() + + res.NewPieces = append(res.NewPieces, SafeSectorPiece{ + api.SectorPiece{ + Piece: ppi, + DealInfo: &dinfo, + }, }) } @@ -304,8 +326,13 @@ func (m *Sealing) handleAddPieceFailed(ctx statemachine.Context, sector SectorIn return ctx.Send(SectorRetryWaitDeals{}) } -func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, deal api.PieceDealInfo) (api.SectorOffset, error) { - log.Infof("Adding piece for deal %d (publish msg: %s)", deal.DealID, deal.PublishCid) +func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, pieceInfo piece.PieceDealInfo) (api.SectorOffset, error) { + return m.sectorAddPieceToAny(ctx, size, data, &pieceInfo) +} + +func (m *Sealing) sectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, pieceInfo UniversalPieceInfo) (api.SectorOffset, error) { + log.Infof("Adding piece %s", pieceInfo.String()) + if (padreader.PaddedSize(uint64(size))) != size { return api.SectorOffset{}, xerrors.Errorf("cannot allocate unpadded piece") } @@ -324,10 +351,6 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec return api.SectorOffset{}, xerrors.Errorf("piece cannot fit into a sector") } - if _, err := deal.DealProposal.Cid(); err != nil { - return api.SectorOffset{}, xerrors.Errorf("getting proposal CID: %w", err) - } - cfg, err := m.getConfig() if err != nil { return api.SectorOffset{}, xerrors.Errorf("getting config: %w", err) @@ -337,19 +360,34 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec if err != nil { return api.SectorOffset{}, xerrors.Errorf("couldnt get chain head: %w", err) } - if ts.Height()+cfg.StartEpochSealingBuffer > deal.DealProposal.StartEpoch { - return api.SectorOffset{}, xerrors.Errorf( - "cannot add piece for deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d", - deal.DealProposal.PieceCID, ts.Height(), deal.DealProposal.StartEpoch) + + nv, err := m.Api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("getting network version: %w", err) } - claimTerms, err := m.getClaimTerms(ctx, deal, ts.Key()) + if err := pieceInfo.Valid(nv); err != nil { + return api.SectorOffset{}, xerrors.Errorf("piece metadata invalid: %w", err) + } + + startEpoch, err := pieceInfo.StartEpoch() + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("getting last start epoch: %w", err) + } + + if ts.Height()+cfg.StartEpochSealingBuffer > startEpoch { + return api.SectorOffset{}, xerrors.Errorf( + "cannot add piece for deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d", + pieceInfo.PieceCID(), ts.Height(), startEpoch) + } + + claimTerms, err := m.getClaimTerms(ctx, pieceInfo, ts.Key()) if err != nil { return api.SectorOffset{}, err } m.inputLk.Lock() - if pp, exist := m.pendingPieces[proposalCID(deal)]; exist { + if pp, exist := m.pendingPieces[pieceInfo.Key()]; exist { m.inputLk.Unlock() // we already have a pre-existing add piece call for this deal, let's wait for it to finish and see if it's successful @@ -366,7 +404,7 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec } // addPendingPiece takes over m.inputLk - pp := m.addPendingPiece(ctx, size, data, deal, claimTerms, sp) + pp := m.addPendingPiece(ctx, size, data, pieceInfo, claimTerms, sp) res, err := waitAddPieceResp(ctx, pp) if err != nil { @@ -375,32 +413,41 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err } -func (m *Sealing) getClaimTerms(ctx context.Context, deal api.PieceDealInfo, tsk types.TipSetKey) (pieceClaimBounds, error) { +func (m *Sealing) getClaimTerms(ctx context.Context, deal UniversalPieceInfo, tsk types.TipSetKey) (pieceClaimBounds, error) { + + all, err := deal.GetAllocation(ctx, m.Api, tsk) + if err != nil { + return pieceClaimBounds{}, err + } + if all != nil { + startEpoch, err := deal.StartEpoch() + if err != nil { + return pieceClaimBounds{}, err + } + + return pieceClaimBounds{ + claimTermEnd: startEpoch + all.TermMax, + }, nil + } + nv, err := m.Api.StateNetworkVersion(ctx, tsk) if err != nil { return pieceClaimBounds{}, err } - if nv >= network.Version17 { - all, err := m.Api.StateGetAllocationForPendingDeal(ctx, deal.DealID, tsk) - if err != nil { - return pieceClaimBounds{}, err - } - if all != nil { - return pieceClaimBounds{ - claimTermEnd: deal.DealProposal.StartEpoch + all.TermMax, - }, nil - } + endEpoch, err := deal.EndEpoch() + if err != nil { + return pieceClaimBounds{}, err } // no allocation for this deal, so just use a really high number for "term end" return pieceClaimBounds{ - claimTermEnd: deal.DealProposal.EndEpoch + policy.GetSectorMaxLifetime(abi.RegisteredSealProof_StackedDrg32GiBV1_1, network.Version17), + claimTermEnd: endEpoch + policy.GetSectorMaxLifetime(abi.RegisteredSealProof_StackedDrg32GiBV1_1, nv), }, nil } // called with m.inputLk; transfers the lock to another goroutine! -func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, deal api.PieceDealInfo, ct pieceClaimBounds, sp abi.RegisteredSealProof) *pendingPiece { +func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, deal UniversalPieceInfo, ct pieceClaimBounds, sp abi.RegisteredSealProof) *pendingPiece { doneCh := make(chan struct{}) pp := &pendingPiece{ size: size, @@ -417,14 +464,12 @@ func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSiz close(pp.doneCh) } - log.Debugw("new pending piece", "dealId", deal.DealID, - "piece", deal.DealProposal.PieceCID, - "size", size, - "dealStart", deal.DealSchedule.StartEpoch, - "dealEnd", deal.DealSchedule.EndEpoch, + log.Debugw("new pending piece", "pieceID", deal.String(), + "dealStart", result.Wrap(deal.StartEpoch()), + "dealEnd", result.Wrap(deal.EndEpoch()), "termEnd", ct.claimTermEnd) - m.pendingPieces[proposalCID(deal)] = pp + m.pendingPieces[deal.Key()] = pp go func() { defer m.inputLk.Unlock() if err := m.updateInput(ctx, sp); err != nil { @@ -489,7 +534,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e type match struct { sector abi.SectorID - deal cid.Cid + deal piece.PieceKey dealEnd abi.ChainEpoch claimTermEnd abi.ChainEpoch @@ -499,7 +544,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e } var matches []match - toAssign := map[cid.Cid]struct{}{} // used to maybe create new sectors + toAssign := map[piece.PieceKey]struct{}{} // used to maybe create new sectors // todo: this is distinctly O(n^2), may need to be optimized for tiny deals and large scale miners // (unlikely to be a problem now) @@ -523,12 +568,18 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e continue } + endEpoch, err := piece.deal.EndEpoch() + if err != nil { + log.Errorf("failed to get end epoch for deal %s", piece.deal) + continue + } + if piece.size <= avail { // (note: if we have enough space for the piece, we also have enough space for inter-piece padding) matches = append(matches, match{ sector: id, deal: proposalCid, - dealEnd: piece.deal.DealProposal.EndEpoch, + dealEnd: endEpoch, claimTermEnd: piece.claimTerms.claimTermEnd, size: piece.size, @@ -610,7 +661,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e } // pendingPieceIndex is an index in the Sealing.pendingPieces map -type pendingPieceIndex cid.Cid +type pendingPieceIndex piece.PieceKey type pieceBound struct { epoch abi.ChainEpoch @@ -634,13 +685,21 @@ func (m *Sealing) pendingPieceEpochBounds() []pieceBound { continue } + endEpoch, err := piece.deal.EndEpoch() + if err != nil { + // this really should never happen, at this point we have validated + // the piece enough times + log.Errorf("failed to get end epoch for deal %s: %v", ppi, err) + continue + } + // start bound on deal end - if boundsByEpoch[piece.deal.DealProposal.EndEpoch] == nil { - boundsByEpoch[piece.deal.DealProposal.EndEpoch] = &pieceBound{ - epoch: piece.deal.DealProposal.EndEpoch, + if boundsByEpoch[endEpoch] == nil { + boundsByEpoch[endEpoch] = &pieceBound{ + epoch: endEpoch, } } - boundsByEpoch[piece.deal.DealProposal.EndEpoch].boundStart = append(boundsByEpoch[piece.deal.DealProposal.EndEpoch].boundStart, pendingPieceIndex(ppi)) + boundsByEpoch[endEpoch].boundStart = append(boundsByEpoch[endEpoch].boundStart, pendingPieceIndex(ppi)) // end bound on term max if boundsByEpoch[piece.claimTerms.claimTermEnd] == nil { @@ -663,10 +722,10 @@ func (m *Sealing) pendingPieceEpochBounds() []pieceBound { var curBoundBytes abi.UnpaddedPieceSize for i, bound := range out { for _, ppi := range bound.boundStart { - curBoundBytes += m.pendingPieces[cid.Cid(ppi)].size + curBoundBytes += m.pendingPieces[piece.PieceKey(ppi)].size } for _, ppi := range bound.boundEnd { - curBoundBytes -= m.pendingPieces[cid.Cid(ppi)].size + curBoundBytes -= m.pendingPieces[piece.PieceKey(ppi)].size } out[i].dealBytesInBound = curBoundBytes @@ -897,15 +956,17 @@ func (m *Sealing) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showO deals := make([]abi.DealID, len(info.Pieces)) pieces := make([]api.SectorPiece, len(info.Pieces)) for i, piece := range info.Pieces { - pieces[i].Piece = piece.Piece - if piece.DealInfo == nil { + // todo make this work with DDO deals in some reasonable way + + pieces[i].Piece = piece.Piece() + if !piece.HasDealInfo() || piece.Impl().PublishCid == nil { continue } - pdi := *piece.DealInfo // copy + pdi := piece.DealInfo().Impl() // copy pieces[i].DealInfo = &pdi - deals[i] = piece.DealInfo.DealID + deals[i] = piece.DealInfo().Impl().DealID } log := make([]api.SectorLog, len(info.Log)) @@ -956,14 +1017,4 @@ func (m *Sealing) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showO return sInfo, nil } -func proposalCID(deal api.PieceDealInfo) cid.Cid { - pc, err := deal.DealProposal.Cid() - if err != nil { - log.Errorf("DealProposal.Cid error: %+v", err) - return cid.Undef - } - - return pc -} - var _ sectorblocks.SectorBuilder = &Sealing{} diff --git a/storage/pipeline/mocks/api.go b/storage/pipeline/mocks/api.go index 5c67a1c42..a4f1cd9ef 100644 --- a/storage/pipeline/mocks/api.go +++ b/storage/pipeline/mocks/api.go @@ -9,6 +9,7 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" address "github.com/filecoin-project/go-address" @@ -64,6 +65,21 @@ func (mr *MockSealingAPIMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockSealingAPI)(nil).ChainGetMessage), arg0, arg1) } +// ChainHasObj mocks base method. +func (m *MockSealingAPI) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHasObj indicates an expected call of ChainHasObj. +func (mr *MockSealingAPIMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockSealingAPI)(nil).ChainHasObj), arg0, arg1) +} + // ChainHead mocks base method. func (m *MockSealingAPI) ChainHead(arg0 context.Context) (*types.TipSet, error) { m.ctrl.T.Helper() @@ -79,6 +95,20 @@ func (mr *MockSealingAPIMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockSealingAPI)(nil).ChainHead), arg0) } +// ChainPutObj mocks base method. +func (m *MockSealingAPI) ChainPutObj(arg0 context.Context, arg1 blocks.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainPutObj", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainPutObj indicates an expected call of ChainPutObj. +func (mr *MockSealingAPIMockRecorder) ChainPutObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainPutObj", reflect.TypeOf((*MockSealingAPI)(nil).ChainPutObj), arg0, arg1) +} + // ChainReadObj mocks base method. func (m *MockSealingAPI) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) { m.ctrl.T.Helper() @@ -139,19 +169,34 @@ func (mr *MockSealingAPIMockRecorder) StateAccountKey(arg0, arg1, arg2 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockSealingAPI)(nil).StateAccountKey), arg0, arg1, arg2) } -// StateComputeDataCID mocks base method. -func (m *MockSealingAPI) StateComputeDataCID(arg0 context.Context, arg1 address.Address, arg2 abi.RegisteredSealProof, arg3 []abi.DealID, arg4 types.TipSetKey) (cid.Cid, error) { +// StateGetActor mocks base method. +func (m *MockSealingAPI) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.ActorV5, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StateComputeDataCID", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(cid.Cid) + ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.ActorV5) ret1, _ := ret[1].(error) return ret0, ret1 } -// StateComputeDataCID indicates an expected call of StateComputeDataCID. -func (mr *MockSealingAPIMockRecorder) StateComputeDataCID(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +// StateGetActor indicates an expected call of StateGetActor. +func (mr *MockSealingAPIMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateComputeDataCID", reflect.TypeOf((*MockSealingAPI)(nil).StateComputeDataCID), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockSealingAPI)(nil).StateGetActor), arg0, arg1, arg2) +} + +// StateGetAllocation mocks base method. +func (m *MockSealingAPI) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocation", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocation indicates an expected call of StateGetAllocation. +func (mr *MockSealingAPIMockRecorder) StateGetAllocation(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocation", reflect.TypeOf((*MockSealingAPI)(nil).StateGetAllocation), arg0, arg1, arg2, arg3) } // StateGetAllocationForPendingDeal mocks base method. @@ -169,6 +214,21 @@ func (mr *MockSealingAPIMockRecorder) StateGetAllocationForPendingDeal(arg0, arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockSealingAPI)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2) } +// StateGetAllocationIdForPendingDeal mocks base method. +func (m *MockSealingAPI) StateGetAllocationIdForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (verifreg.AllocationId, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocationIdForPendingDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(verifreg.AllocationId) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocationIdForPendingDeal indicates an expected call of StateGetAllocationIdForPendingDeal. +func (mr *MockSealingAPIMockRecorder) StateGetAllocationIdForPendingDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationIdForPendingDeal", reflect.TypeOf((*MockSealingAPI)(nil).StateGetAllocationIdForPendingDeal), arg0, arg1, arg2) +} + // StateGetRandomnessFromBeacon mocks base method. func (m *MockSealingAPI) StateGetRandomnessFromBeacon(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) { m.ctrl.T.Helper() @@ -439,6 +499,21 @@ func (mr *MockSealingAPIMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockSealingAPI)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) } +// StateVMCirculatingSupplyInternal mocks base method. +func (m *MockSealingAPI) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (api.CirculatingSupply, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1) + ret0, _ := ret[0].(api.CirculatingSupply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal. +func (mr *MockSealingAPIMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockSealingAPI)(nil).StateVMCirculatingSupplyInternal), arg0, arg1) +} + // StateWaitMsg mocks base method. func (m *MockSealingAPI) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch, arg4 bool) (*api.MsgLookup, error) { m.ctrl.T.Helper() diff --git a/storage/pipeline/mocks/mock_precommit_batcher.go b/storage/pipeline/mocks/mock_precommit_batcher.go index 68cce7fb0..fd46f601b 100644 --- a/storage/pipeline/mocks/mock_precommit_batcher.go +++ b/storage/pipeline/mocks/mock_precommit_batcher.go @@ -103,6 +103,21 @@ func (mr *MockPreCommitBatcherApiMockRecorder) StateAccountKey(arg0, arg1, arg2 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateAccountKey), arg0, arg1, arg2) } +// StateGetAllocation mocks base method. +func (m *MockPreCommitBatcherApi) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocation", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocation indicates an expected call of StateGetAllocation. +func (mr *MockPreCommitBatcherApiMockRecorder) StateGetAllocation(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocation", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateGetAllocation), arg0, arg1, arg2, arg3) +} + // StateGetAllocationForPendingDeal mocks base method. func (m *MockPreCommitBatcherApi) StateGetAllocationForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*verifreg.Allocation, error) { m.ctrl.T.Helper() diff --git a/storage/pipeline/piece/cbor_gen.go b/storage/pipeline/piece/cbor_gen.go new file mode 100644 index 000000000..ea085becc --- /dev/null +++ b/storage/pipeline/piece/cbor_gen.go @@ -0,0 +1,450 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package piece + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" + + abi "github.com/filecoin-project/go-state-types/abi" + miner "github.com/filecoin-project/go-state-types/builtin/v13/miner" + market "github.com/filecoin-project/go-state-types/builtin/v9/market" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{166}); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.DealProposal (market.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.DealSchedule (piece.DealSchedule) (struct) + if len("DealSchedule") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealSchedule\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealSchedule")); err != nil { + return err + } + + if err := t.DealSchedule.MarshalCBOR(cw); err != nil { + return err + } + + // t.KeepUnsealed (bool) (bool) + if len("KeepUnsealed") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { + return err + } + if _, err := cw.WriteString(string("KeepUnsealed")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { + return err + } + + // t.PieceActivationManifest (miner.PieceActivationManifest) (struct) + if len("PieceActivationManifest") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceActivationManifest\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceActivationManifest"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceActivationManifest")); err != nil { + return err + } + + if err := t.PieceActivationManifest.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceDealInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.DealProposal (market.DealProposal) (struct) + case "DealProposal": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.DealProposal) + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.DealSchedule (piece.DealSchedule) (struct) + case "DealSchedule": + + { + + if err := t.DealSchedule.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) + } + + } + // t.KeepUnsealed (bool) (bool) + case "KeepUnsealed": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.KeepUnsealed = false + case 21: + t.KeepUnsealed = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.PieceActivationManifest (miner.PieceActivationManifest) (struct) + case "PieceActivationManifest": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PieceActivationManifest = new(miner.PieceActivationManifest) + if err := t.PieceActivationManifest.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PieceActivationManifest pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealSchedule) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.EndEpoch (abi.ChainEpoch) (int64) + if len("EndEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"EndEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("EndEpoch")); err != nil { + return err + } + + if t.EndEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { + return err + } + } + + // t.StartEpoch (abi.ChainEpoch) (int64) + if len("StartEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StartEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("StartEpoch")); err != nil { + return err + } + + if t.StartEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { + return err + } + } + return nil +} + +func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealSchedule{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.EndEpoch (abi.ChainEpoch) (int64) + case "EndEpoch": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EndEpoch = abi.ChainEpoch(extraI) + } + // t.StartEpoch (abi.ChainEpoch) (int64) + case "StartEpoch": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StartEpoch = abi.ChainEpoch(extraI) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/storage/pipeline/piece/piece_info.go b/storage/pipeline/piece/piece_info.go new file mode 100644 index 000000000..7ee8f7029 --- /dev/null +++ b/storage/pipeline/piece/piece_info.go @@ -0,0 +1,186 @@ +package piece + +import ( + "context" + "fmt" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/types" +) + +// DealInfo is a tuple of deal identity and its schedule +type PieceDealInfo struct { + // "Old" builtin-market deal info + PublishCid *cid.Cid + DealID abi.DealID + DealProposal *market.DealProposal + + // Common deal info, required for all pieces + // TODO: https://github.com/filecoin-project/lotus/issues/11237 + DealSchedule DealSchedule + + // Direct Data Onboarding + // When PieceActivationManifest is set, builtin-market deal info must not be set + PieceActivationManifest *miner.PieceActivationManifest + + // Best-effort deal asks + KeepUnsealed bool +} + +// DealSchedule communicates the time interval of a storage deal. The deal must +// appear in a sealed (proven) sector no later than StartEpoch, otherwise it +// is invalid. +type DealSchedule struct { + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch +} + +func (ds *PieceDealInfo) isBuiltinMarketDeal() bool { + return ds.PublishCid != nil +} + +// Valid validates the deal info after being accepted through RPC, checks that +// the deal metadata is well-formed. +func (ds *PieceDealInfo) Valid(nv network.Version) error { + hasLegacyDealInfo := ds.PublishCid != nil && ds.DealID != 0 && ds.DealProposal != nil + hasPieceActivationManifest := ds.PieceActivationManifest != nil + + if hasLegacyDealInfo && hasPieceActivationManifest { + return xerrors.Errorf("piece deal info has both legacy deal info and piece activation manifest") + } + + if !hasLegacyDealInfo && !hasPieceActivationManifest { + return xerrors.Errorf("piece deal info has neither legacy deal info nor piece activation manifest") + } + + if hasLegacyDealInfo { + if _, err := ds.DealProposal.Cid(); err != nil { + return xerrors.Errorf("checking proposal CID: %w", err) + } + } + + if ds.DealSchedule.StartEpoch <= 0 { + return xerrors.Errorf("invalid deal start epoch %d", ds.DealSchedule.StartEpoch) + } + if ds.DealSchedule.EndEpoch <= 0 { + return xerrors.Errorf("invalid deal end epoch %d", ds.DealSchedule.EndEpoch) + } + if ds.DealSchedule.EndEpoch <= ds.DealSchedule.StartEpoch { + return xerrors.Errorf("invalid deal end epoch %d (start %d)", ds.DealSchedule.EndEpoch, ds.DealSchedule.StartEpoch) + } + + if hasPieceActivationManifest { + if nv < network.Version22 { + return xerrors.Errorf("direct-data-onboarding pieces aren't accepted before network version 22") + } + + // todo any more checks seem reasonable to put here? + } + + return nil +} + +type AllocationAPI interface { + StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) +} + +func (ds *PieceDealInfo) GetAllocation(ctx context.Context, aapi AllocationAPI, tsk types.TipSetKey) (*verifregtypes.Allocation, error) { + switch { + case ds.isBuiltinMarketDeal(): + return aapi.StateGetAllocationForPendingDeal(ctx, ds.DealID, tsk) + default: + if ds.PieceActivationManifest.VerifiedAllocationKey == nil { + return nil, nil + } + + caddr, err := address.NewIDAddress(uint64(ds.PieceActivationManifest.VerifiedAllocationKey.Client)) + if err != nil { + return nil, err + } + + all, err := aapi.StateGetAllocation(ctx, caddr, verifregtypes.AllocationId(ds.PieceActivationManifest.VerifiedAllocationKey.ID), tsk) + if err != nil { + return nil, err + } + + if all == nil { + return nil, nil + } + + if all.Client != ds.PieceActivationManifest.VerifiedAllocationKey.Client { + return nil, xerrors.Errorf("allocation client mismatch: %d != %d", all.Client, ds.PieceActivationManifest.VerifiedAllocationKey.Client) + } + + return all, nil + } +} + +// StartEpoch returns the last epoch in which the sector containing this deal +// must be sealed (committed) in order for the deal to be valid. +func (ds *PieceDealInfo) StartEpoch() (abi.ChainEpoch, error) { + switch { + case ds.isBuiltinMarketDeal(): + return ds.DealSchedule.StartEpoch, nil + default: + // note - when implementing make sure to cache any dynamically computed values + // todo do we want a smarter mechanism here + return ds.DealSchedule.StartEpoch, nil + } +} + +// EndEpoch returns the minimum epoch until which the sector containing this +// deal must be committed until. +func (ds *PieceDealInfo) EndEpoch() (abi.ChainEpoch, error) { + switch { + case ds.isBuiltinMarketDeal(): + return ds.DealSchedule.EndEpoch, nil + default: + // note - when implementing make sure to cache any dynamically computed values + // todo do we want a smarter mechanism here + return ds.DealSchedule.StartEpoch, nil + } +} + +func (ds *PieceDealInfo) PieceCID() cid.Cid { + switch { + case ds.isBuiltinMarketDeal(): + return ds.DealProposal.PieceCID + default: + return ds.PieceActivationManifest.CID + } +} + +func (ds *PieceDealInfo) String() string { + switch { + case ds.isBuiltinMarketDeal(): + return fmt.Sprintf("BuiltinMarket{DealID: %d, PieceCID: %s, PublishCid: %s}", ds.DealID, ds.DealProposal.PieceCID, ds.PublishCid) + default: + // todo check that VAlloc doesn't print as a pointer + return fmt.Sprintf("DirectDataOnboarding{PieceCID: %s, VAllloc: %x}", ds.PieceActivationManifest.CID, ds.PieceActivationManifest.VerifiedAllocationKey) + } +} + +func (ds *PieceDealInfo) KeepUnsealedRequested() bool { + return ds.KeepUnsealed +} + +type PieceKey string + +// Key returns a unique identifier for this deal info, for use in maps. +func (ds *PieceDealInfo) Key() PieceKey { + return PieceKey(ds.String()) +} + +func (ds *PieceDealInfo) Impl() PieceDealInfo { + return *ds +} diff --git a/storage/pipeline/pledge.go b/storage/pipeline/pledge.go new file mode 100644 index 000000000..04567fca1 --- /dev/null +++ b/storage/pipeline/pledge.go @@ -0,0 +1,114 @@ +package sealing + +import ( + "context" + + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/lotus/chain/types" +) + +var initialPledgeNum = types.NewInt(110) +var initialPledgeDen = types.NewInt(100) + +func (m *Sealing) pledgeForPower(ctx context.Context, addedPower abi.StoragePower) (abi.TokenAmount, error) { + store := adt.WrapStore(ctx, cbor.NewCborStore(bstore.NewAPIBlockstore(m.Api))) + + // load power actor + var ( + powerSmoothed builtin.FilterEstimate + pledgeCollateral abi.TokenAmount + ) + if act, err := m.Api.StateGetActor(ctx, power.Address, types.EmptyTSK); err != nil { + return types.EmptyInt, xerrors.Errorf("loading power actor: %w", err) + } else if s, err := power.Load(store, act); err != nil { + return types.EmptyInt, xerrors.Errorf("loading power actor state: %w", err) + } else if p, err := s.TotalPowerSmoothed(); err != nil { + return types.EmptyInt, xerrors.Errorf("failed to determine total power: %w", err) + } else if c, err := s.TotalLocked(); err != nil { + return types.EmptyInt, xerrors.Errorf("failed to determine pledge collateral: %w", err) + } else { + powerSmoothed = p + pledgeCollateral = c + } + + // load reward actor + rewardActor, err := m.Api.StateGetActor(ctx, reward.Address, types.EmptyTSK) + if err != nil { + return types.EmptyInt, xerrors.Errorf("loading reward actor: %w", err) + } + + rewardState, err := reward.Load(store, rewardActor) + if err != nil { + return types.EmptyInt, xerrors.Errorf("loading reward actor state: %w", err) + } + + // get circulating supply + circSupply, err := m.Api.StateVMCirculatingSupplyInternal(ctx, types.EmptyTSK) + if err != nil { + return big.Zero(), xerrors.Errorf("getting circulating supply: %w", err) + } + + // do the calculation + initialPledge, err := rewardState.InitialPledgeForPower( + addedPower, + pledgeCollateral, + &powerSmoothed, + circSupply.FilCirculating, + ) + if err != nil { + return big.Zero(), xerrors.Errorf("calculating initial pledge: %w", err) + } + + return types.BigDiv(types.BigMul(initialPledge, initialPledgeNum), initialPledgeDen), nil +} + +func (m *Sealing) sectorWeight(ctx context.Context, sector SectorInfo, expiration abi.ChainEpoch) (abi.StoragePower, error) { + spt, err := m.currentSealProof(ctx) + if err != nil { + return types.EmptyInt, xerrors.Errorf("getting seal proof type: %w", err) + } + + ssize, err := spt.SectorSize() + if err != nil { + return types.EmptyInt, xerrors.Errorf("getting sector size: %w", err) + } + + ts, err := m.Api.ChainHead(ctx) + if err != nil { + return types.EmptyInt, xerrors.Errorf("getting chain head: %w", err) + } + + // get verified deal infos + var w, vw = big.Zero(), big.Zero() + + for _, piece := range sector.Pieces { + if !piece.HasDealInfo() { + // todo StateMinerInitialPledgeCollateral doesn't add cc/padding to non-verified weight, is that correct? + continue + } + + alloc, err := piece.GetAllocation(ctx, m.Api, ts.Key()) + if err != nil || alloc == nil { + w = big.Add(w, abi.NewStoragePower(int64(piece.Piece().Size))) + continue + } + + vw = big.Add(vw, abi.NewStoragePower(int64(piece.Piece().Size))) + } + + // load market actor + duration := expiration - ts.Height() + sectorWeight := builtin.QAPowerForWeight(ssize, duration, w, vw) + + return sectorWeight, nil +} diff --git a/storage/pipeline/precommit_batch.go b/storage/pipeline/precommit_batch.go index 3a86c8628..099988010 100644 --- a/storage/pipeline/precommit_batch.go +++ b/storage/pipeline/precommit_batch.go @@ -36,6 +36,7 @@ type PreCommitBatcherApi interface { ChainHead(ctx context.Context) (*types.TipSet, error) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) // Address selector WalletBalance(context.Context, address.Address) (types.BigInt, error) @@ -428,11 +429,18 @@ func (b *PreCommitBatcher) Stop(ctx context.Context) error { func getDealStartCutoff(si SectorInfo) abi.ChainEpoch { cutoffEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback for _, p := range si.Pieces { - if p.DealInfo == nil { + if !p.HasDealInfo() { + continue + } + + startEpoch, err := p.StartEpoch() + if err != nil { + // almost definitely can't happen, but if it does there's less harm in + // just logging the error and moving on + log.Errorw("failed to get deal start epoch", "error", err) continue } - startEpoch := p.DealInfo.DealSchedule.StartEpoch if startEpoch < cutoffEpoch { cutoffEpoch = startEpoch } @@ -444,15 +452,19 @@ func getDealStartCutoff(si SectorInfo) abi.ChainEpoch { func (b *PreCommitBatcher) getAllocationCutoff(si SectorInfo) abi.ChainEpoch { cutoff := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback for _, p := range si.Pieces { - if p.DealInfo == nil { + if !p.HasDealInfo() { continue } - alloc, _ := b.api.StateGetAllocationForPendingDeal(b.mctx, p.DealInfo.DealID, types.EmptyTSK) + alloc, err := p.GetAllocation(b.mctx, b.api, types.EmptyTSK) + if err != nil { + log.Errorw("failed to get deal allocation", "error", err) + } // alloc is nil if this is not a verified deal in nv17 or later if alloc == nil { continue } + if alloc.Expiration < cutoff { cutoff = alloc.Expiration } diff --git a/storage/pipeline/precommit_batch_test.go b/storage/pipeline/precommit_batch_test.go deleted file mode 100644 index 1f3aaf244..000000000 --- a/storage/pipeline/precommit_batch_test.go +++ /dev/null @@ -1,291 +0,0 @@ -// stm: #unit -package sealing_test - -import ( - "bytes" - "context" - "sort" - "sync" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/network" - miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/config" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" - "github.com/filecoin-project/lotus/storage/pipeline/mocks" - "github.com/filecoin-project/lotus/storage/pipeline/sealiface" -) - -var fc = config.MinerFeeConfig{ - MaxPreCommitGasFee: types.FIL(types.FromFil(1)), - MaxCommitGasFee: types.FIL(types.FromFil(1)), - MaxTerminateGasFee: types.FIL(types.FromFil(1)), - MaxPreCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, - MaxCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, -} - -func TestPrecommitBatcher(t *testing.T) { - //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 - t0123, err := address.NewFromString("t0123") - require.NoError(t, err) - - ctx := context.Background() - - as := asel(func(ctx context.Context, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - return t0123, big.Zero(), nil - }) - - maxBatch := miner6.PreCommitSectorBatchMaxSize - - cfg := func() (sealiface.Config, error) { - return sealiface.Config{ - MaxWaitDealsSectors: 2, - MaxSealingSectors: 0, - MaxSealingSectorsForDeals: 0, - WaitDealsDelay: time.Hour * 6, - AlwaysKeepUnsealedCopy: true, - - MaxPreCommitBatch: maxBatch, - PreCommitBatchWait: 24 * time.Hour, - PreCommitBatchSlack: 3 * time.Hour, - BatchPreCommitAboveBaseFee: big.NewInt(10000), - - AggregateCommits: true, - MinCommitBatch: miner6.MinAggregatedSectors, - MaxCommitBatch: miner6.MaxAggregatedSectors, - CommitBatchWait: 24 * time.Hour, - CommitBatchSlack: 1 * time.Hour, - - TerminateBatchMin: 1, - TerminateBatchMax: 100, - TerminateBatchWait: 5 * time.Minute, - }, nil - } - - type promise func(t *testing.T) - type action func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise - - actions := func(as ...action) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - var ps []promise - for _, a := range as { - p := a(t, s, pcb) - if p != nil { - ps = append(ps, p) - } - } - - if len(ps) > 0 { - return func(t *testing.T) { - for _, p := range ps { - p(t) - } - } - } - return nil - } - } - - addSector := func(sn abi.SectorNumber, aboveBalancer bool) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - var pcres sealiface.PreCommitBatchRes - var pcerr error - done := sync.Mutex{} - done.Lock() - - si := pipeline.SectorInfo{ - SectorNumber: sn, - } - - basefee := big.NewInt(9999) - if aboveBalancer { - basefee = big.NewInt(10001) - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil).MaxTimes(2) // once in AddPreCommit - - go func() { - defer done.Unlock() - pcres, pcerr = pcb.AddPreCommit(ctx, si, big.Zero(), &minertypes.SectorPreCommitInfo{ - SectorNumber: si.SectorNumber, - SealedCID: fakePieceCid(t), - DealIDs: nil, - Expiration: 0, - }) - }() - - return func(t *testing.T) { - done.Lock() - require.NoError(t, pcerr) - require.Empty(t, pcres.Error) - require.Contains(t, pcres.Sectors, si.SectorNumber) - } - } - } - - addSectors := func(sectors []abi.SectorNumber, aboveBalancer bool) action { - as := make([]action, len(sectors)) - for i, sector := range sectors { - as[i] = addSector(sector, aboveBalancer) - } - return actions(as...) - } - - waitPending := func(n int) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - require.Eventually(t, func() bool { - p, err := pcb.Pending(ctx) - require.NoError(t, err) - return len(p) == n - }, time.Second*5, 10*time.Millisecond) - - return nil - } - } - - //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 - expectSend := func(expect []abi.SectorNumber, gasOverLimit bool) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{Owner: t0123, Worker: t0123}, nil) - if gasOverLimit { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, &api.ErrOutOfGas{}) - } else { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.Message{GasLimit: 100000}, nil) - } - - if !gasOverLimit { - s.EXPECT().MpoolPushMessage(gomock.Any(), funMatcher(func(i interface{}) bool { - b := i.(*types.Message) - var params miner6.PreCommitSectorBatchParams - require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b.Params))) - for s, number := range expect { - require.Equal(t, number, params.Sectors[s].SectorNumber) - } - return true - }), gomock.Any()).Return(dummySmsg, nil) - } - return nil - } - } - - expectInitialCalls := func() action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, big.NewInt(10001), 1), nil) - return nil - } - } - - flush := func(expect []abi.SectorNumber) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - _ = expectInitialCalls()(t, s, pcb) - _ = expectSend(expect, false)(t, s, pcb) - - r, err := pcb.Flush(ctx) - require.NoError(t, err) - require.Len(t, r, 1) - require.Empty(t, r[0].Error) - sort.Slice(r[0].Sectors, func(i, j int) bool { - return r[0].Sectors[i] < r[0].Sectors[j] - }) - require.Equal(t, expect, r[0].Sectors) - - return nil - } - } - - getSectors := func(n int) []abi.SectorNumber { - out := make([]abi.SectorNumber, n) - for i := range out { - out[i] = abi.SectorNumber(i) - } - return out - } - - tcs := map[string]struct { - actions []action - }{ - "addSingle": { - actions: []action{ - addSector(0, true), - waitPending(1), - flush([]abi.SectorNumber{0}), - }, - }, - "addMax": { - actions: []action{ - expectInitialCalls(), - expectSend(getSectors(maxBatch), false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addMax-gasAboveLimit": { - actions: []action{ - expectInitialCalls(), - expectSend(getSectors(maxBatch), true), - expectSend(getSectors(maxBatch)[:maxBatch/2], false), - expectSend(getSectors(maxBatch)[maxBatch/2:], false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addOne-belowBaseFee": { - actions: []action{ - expectSend(getSectors(1), false), - addSectors(getSectors(1), false), - }, - }, - } - - for name, tc := range tcs { - tc := tc - - t.Run(name, func(t *testing.T) { - // create go mock controller here - mockCtrl := gomock.NewController(t) - // when test is done, assert expectations on all mock objects. - defer mockCtrl.Finish() - - // create them mocks - pcapi := mocks.NewMockPreCommitBatcherApi(mockCtrl) - pcapi.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version20, nil).AnyTimes() - - pcb := pipeline.NewPreCommitBatcher(ctx, t0123, pcapi, as, fc, cfg) - - var promises []promise - - for _, a := range tc.actions { - p := a(t, pcapi, pcb) - if p != nil { - promises = append(promises, p) - } - } - - for _, p := range promises { - p(t) - } - - err := pcb.Stop(ctx) - require.NoError(t, err) - }) - } -} - -type funMatcher func(interface{}) bool - -func (funMatcher) Matches(interface{}) bool { - return true -} - -func (funMatcher) String() string { - return "fun" -} diff --git a/storage/pipeline/precommit_policy.go b/storage/pipeline/precommit_policy.go index 6e234f930..6df44d407 100644 --- a/storage/pipeline/precommit_policy.go +++ b/storage/pipeline/precommit_policy.go @@ -9,7 +9,6 @@ import ( "github.com/filecoin-project/go-state-types/builtin/v8/miner" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" @@ -17,7 +16,7 @@ import ( ) type PreCommitPolicy interface { - Expiration(ctx context.Context, ps ...api.SectorPiece) (abi.ChainEpoch, error) + Expiration(ctx context.Context, ps ...SafeSectorPiece) (abi.ChainEpoch, error) } type Chain interface { @@ -60,7 +59,7 @@ func NewBasicPreCommitPolicy(api Chain, cfgGetter dtypes.GetSealingConfigFunc, p // Expiration produces the pre-commit sector expiration epoch for an encoded // replica containing the provided enumeration of pieces and deals. -func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...api.SectorPiece) (abi.ChainEpoch, error) { +func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...SafeSectorPiece) (abi.ChainEpoch, error) { ts, err := p.api.ChainHead(ctx) if err != nil { return 0, err @@ -69,17 +68,22 @@ func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...api.SectorP var end *abi.ChainEpoch for _, p := range ps { - if p.DealInfo == nil { + if !p.HasDealInfo() { continue } - if p.DealInfo.DealSchedule.EndEpoch < ts.Height() { + endEpoch, err := p.EndEpoch() + if err != nil { + return 0, xerrors.Errorf("failed to get end epoch: %w", err) + } + + if endEpoch < ts.Height() { log.Warnf("piece schedule %+v ended before current epoch %d", p, ts.Height()) continue } - if end == nil || *end < p.DealInfo.DealSchedule.EndEpoch { - tmp := p.DealInfo.DealSchedule.EndEpoch + if end == nil || *end < endEpoch { + tmp := endEpoch end = &tmp } } diff --git a/storage/pipeline/precommit_policy_test.go b/storage/pipeline/precommit_policy_test.go index 7865560de..ec2a61ff2 100644 --- a/storage/pipeline/precommit_policy_test.go +++ b/storage/pipeline/precommit_policy_test.go @@ -11,6 +11,8 @@ import ( commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" @@ -20,6 +22,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" pipeline "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" ) @@ -47,6 +50,39 @@ func (f *fakeChain) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey return build.TestNetworkVersion, nil } +func makeBFTs(t *testing.T, basefee abi.TokenAmount, h abi.ChainEpoch) *types.TipSet { + dummyCid, _ := cid.Parse("bafkqaaa") + + var ts, err = types.NewTipSet([]*types.BlockHeader{ + { + Height: h, + Miner: builtin.SystemActorAddr, + + Parents: []cid.Cid{}, + + Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, + + ParentStateRoot: dummyCid, + Messages: dummyCid, + ParentMessageReceipts: dummyCid, + + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + + ParentBaseFee: basefee, + }, + }) + if t != nil { + require.NoError(t, err) + } + + return ts +} + +func makeTs(t *testing.T, h abi.ChainEpoch) *types.TipSet { + return makeBFTs(t, big.NewInt(0), h) +} + func (f *fakeChain) ChainHead(ctx context.Context) (*types.TipSet, error) { return makeTs(nil, f.h), nil } @@ -58,6 +94,10 @@ func fakePieceCid(t *testing.T) cid.Cid { return fakePieceCid } +func cidPtr(c cid.Cid) *cid.Cid { + return &c +} + func TestBasicPolicyEmptySector(t *testing.T) { cfg := fakeConfigGetter(nil) h := abi.ChainEpoch(55) @@ -97,33 +137,35 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) { h: abi.ChainEpoch(55), }, cfg, 2) longestDealEpochEnd := abi.ChainEpoch(547300) - pieces := []api.SectorPiece{ - { + pieces := []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(42), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(42), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(70), EndEpoch: abi.ChainEpoch(547275), }, }, - }, - { + }), + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(43), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(43), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(80), EndEpoch: longestDealEpochEnd, }, }, - }, + }), } exp, err := policy.Expiration(context.Background(), pieces...) @@ -138,20 +180,21 @@ func TestBasicPolicyIgnoresExistingScheduleIfExpired(t *testing.T) { h: abi.ChainEpoch(55), }, cfg, 0) - pieces := []api.SectorPiece{ - { + pieces := []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(44), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(44), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(10), }, }, - }, + }), } exp, err := pcp.Expiration(context.Background(), pieces...) @@ -170,27 +213,28 @@ func TestMissingDealIsIgnored(t *testing.T) { h: abi.ChainEpoch(55), }, cfg, 0) - pieces := []api.SectorPiece{ - { + pieces := []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(44), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(44), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(547300), }, }, - }, - { + }), + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, DealInfo: nil, - }, + }), } exp, err := policy.Expiration(context.Background(), pieces...) diff --git a/storage/pipeline/receive.go b/storage/pipeline/receive.go index 8427eba54..231afbc39 100644 --- a/storage/pipeline/receive.go +++ b/storage/pipeline/receive.go @@ -86,6 +86,11 @@ func (m *Sealing) checkSectorMeta(ctx context.Context, meta api.RemoteSectorMeta return SectorInfo{}, xerrors.Errorf("getting chain head: %w", err) } + nv, err := m.Api.StateNetworkVersion(ctx, ts.Key()) + if err != nil { + return SectorInfo{}, xerrors.Errorf("getting network version: %w", err) + } + var info SectorInfo var validatePoRep bool @@ -217,9 +222,24 @@ func (m *Sealing) checkSectorMeta(ctx context.Context, meta api.RemoteSectorMeta info.State = ReceiveSector info.SectorNumber = meta.Sector.Number - info.Pieces = meta.Pieces + info.Pieces = make([]SafeSectorPiece, len(meta.Pieces)) info.SectorType = meta.Type + for i, piece := range meta.Pieces { + info.Pieces[i] = SafeSectorPiece{ + real: piece, + } + + if !info.Pieces[i].HasDealInfo() { + continue // cc + } + + err := info.Pieces[i].DealInfo().Valid(nv) + if err != nil { + return SectorInfo{}, xerrors.Errorf("piece %d deal info invalid: %w", i, err) + } + } + if meta.RemoteSealingDoneEndpoint != "" { // validate the url if _, err := url.Parse(meta.RemoteSealingDoneEndpoint); err != nil { @@ -229,7 +249,7 @@ func (m *Sealing) checkSectorMeta(ctx context.Context, meta api.RemoteSectorMeta info.RemoteSealingDoneEndpoint = meta.RemoteSealingDoneEndpoint } - if err := checkPieces(ctx, m.maddr, meta.Sector.Number, meta.Pieces, m.Api, false); err != nil { + if err := checkPieces(ctx, m.maddr, meta.Sector.Number, info.Pieces, m.Api, false); err != nil { return SectorInfo{}, xerrors.Errorf("checking pieces: %w", err) } diff --git a/storage/pipeline/sealiface/config.go b/storage/pipeline/sealiface/config.go index e41b143ec..2ac6e0d58 100644 --- a/storage/pipeline/sealiface/config.go +++ b/storage/pipeline/sealiface/config.go @@ -62,4 +62,9 @@ type Config struct { TerminateBatchWait time.Duration UseSyntheticPoRep bool + + RequireActivationSuccess bool + RequireActivationSuccessUpdate bool + RequireNotificationSuccess bool + RequireNotificationSuccessUpdate bool } diff --git a/storage/pipeline/sealing.go b/storage/pipeline/sealing.go index 936bd8b39..75791fae8 100644 --- a/storage/pipeline/sealing.go +++ b/storage/pipeline/sealing.go @@ -5,6 +5,7 @@ import ( "sync" "time" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" @@ -25,12 +26,15 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" + "github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/storage/ctladdr" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/storiface" @@ -49,7 +53,6 @@ type SealingAPI interface { StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) - StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorLocation, error) StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) @@ -72,6 +75,13 @@ type SealingAPI interface { ChainReadObj(context.Context, cid.Cid) ([]byte, error) StateMinerAllocated(context.Context, address.Address, types.TipSetKey) (*bitfield.BitField, error) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) + StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) + ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) + ChainPutObj(ctx context.Context, block blocks.Block) error // Address selector WalletBalance(context.Context, address.Address) (types.BigInt, error) @@ -110,8 +120,8 @@ type Sealing struct { inputLk sync.Mutex openSectors map[abi.SectorID]*openSector sectorTimers map[abi.SectorID]*time.Timer - pendingPieces map[cid.Cid]*pendingPiece - assignedPieces map[abi.SectorID][]cid.Cid + pendingPieces map[piece.PieceKey]*pendingPiece + assignedPieces map[abi.SectorID][]piece.PieceKey nextDealSector *abi.SectorNumber // used to prevent a race where we could create a new sector more than once available map[abi.SectorID]struct{} @@ -139,16 +149,16 @@ type openSector struct { number abi.SectorNumber ccUpdate bool - maybeAccept func(cid.Cid) error // called with inputLk + maybeAccept func(key piece.PieceKey) error // called with inputLk } func (o *openSector) checkDealAssignable(piece *pendingPiece, expF expFn) (bool, error) { log := log.With( "sector", o.number, - "deal", piece.deal.DealID, - "dealEnd", piece.deal.DealProposal.EndEpoch, - "dealStart", piece.deal.DealProposal.StartEpoch, + "piece", piece.deal.String(), + "dealEnd", result.Wrap(piece.deal.EndEpoch()), + "dealStart", result.Wrap(piece.deal.StartEpoch()), "dealClaimEnd", piece.claimTerms.claimTermEnd, "lastAssignedDealEnd", o.lastDealEnd, @@ -181,7 +191,12 @@ func (o *openSector) checkDealAssignable(piece *pendingPiece, expF expFn) (bool, return false, nil } - if sectorExpiration < piece.deal.DealProposal.EndEpoch { + endEpoch, err := piece.deal.EndEpoch() + if err != nil { + return false, xerrors.Errorf("failed to get end epoch: %w", err) + } + + if sectorExpiration < endEpoch { log.Debugw("deal not assignable to sector", "reason", "sector expiration less than deal expiration") return false, nil } @@ -205,7 +220,7 @@ type pendingPiece struct { resp *pieceAcceptResp size abi.UnpaddedPieceSize - deal api.PieceDealInfo + deal UniversalPieceInfo claimTerms pieceClaimBounds @@ -215,10 +230,10 @@ type pendingPiece struct { accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error) } -func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sealer.SectorManager, verif storiface.Verifier, prov storiface.Prover, pcp PreCommitPolicy, gc dtypes.GetSealingConfigFunc, journal journal.Journal, addrSel AddressSelector) *Sealing { +func New(mctx context.Context, sapi SealingAPI, fc config.MinerFeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sealer.SectorManager, verif storiface.Verifier, prov storiface.Prover, pcp PreCommitPolicy, gc dtypes.GetSealingConfigFunc, journal journal.Journal, addrSel AddressSelector) *Sealing { s := &Sealing{ - Api: api, - DealInfo: &CurrentDealInfoManager{api}, + Api: sapi, + DealInfo: &CurrentDealInfoManager{sapi}, ds: ds, @@ -232,8 +247,8 @@ func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events openSectors: map[abi.SectorID]*openSector{}, sectorTimers: map[abi.SectorID]*time.Timer{}, - pendingPieces: map[cid.Cid]*pendingPiece{}, - assignedPieces: map[abi.SectorID][]cid.Cid{}, + pendingPieces: map[piece.PieceKey]*pendingPiece{}, + assignedPieces: map[abi.SectorID][]piece.PieceKey{}, available: map[abi.SectorID]struct{}{}, @@ -242,9 +257,9 @@ func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events addrSel: addrSel, - terminator: NewTerminationBatcher(mctx, maddr, api, addrSel, fc, gc), - precommiter: NewPreCommitBatcher(mctx, maddr, api, addrSel, fc, gc), - commiter: NewCommitBatcher(mctx, maddr, api, addrSel, fc, gc, prov), + terminator: NewTerminationBatcher(mctx, maddr, sapi, addrSel, fc, gc), + precommiter: NewPreCommitBatcher(mctx, maddr, sapi, addrSel, fc, gc), + commiter: NewCommitBatcher(mctx, maddr, sapi, addrSel, fc, gc, prov), getConfig: gc, diff --git a/storage/pipeline/states_failed.go b/storage/pipeline/states_failed.go index 3323c4c9b..3e4ea4dde 100644 --- a/storage/pipeline/states_failed.go +++ b/storage/pipeline/states_failed.go @@ -235,7 +235,7 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect return nil } - if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, ts.Key(), m.Api); err != nil { + if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, m.Api); err != nil { switch err.(type) { case *ErrApi: log.Errorf("handleSubmitReplicaUpdateFailed: api error, not proceeding: %+v", err) @@ -265,7 +265,7 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect } if !active { err := xerrors.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber) - log.Errorf(err.Error()) + log.Errorf("%s", err) return ctx.Send(SectorAbortUpgrade{err}) } @@ -466,7 +466,7 @@ func (m *Sealing) handleAbortUpgrade(ctx statemachine.Context, sector SectorInfo // failWith is a mutator or global mutator func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, sector SectorInfo, failWith interface{}) error { - toFix, paddingPieces, err := recoveryPiecesToFix(ctx.Context(), m.Api, sector, m.maddr) + toFix, nonBuiltinMarketPieces, err := recoveryPiecesToFix(ctx.Context(), m.Api, sector, m.maddr) if err != nil { return err } @@ -478,33 +478,35 @@ func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, secto updates := map[int]abi.DealID{} for _, i := range toFix { + // note: all toFix pieces are builtin-market pieces + p := sector.Pieces[i] - if p.DealInfo.PublishCid == nil { + if p.Impl().PublishCid == nil { // TODO: check if we are in an early enough state try to remove this piece - log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID) + log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID) // Not much to do here (and this can only happen for old spacerace sectors) return ctx.Send(failWith) } var dp *market.DealProposal - if p.DealInfo.DealProposal != nil { - mdp := *p.DealInfo.DealProposal + if p.Impl().DealProposal != nil { + mdp := *p.Impl().DealProposal dp = &mdp } - res, err := m.DealInfo.GetCurrentDealInfo(ctx.Context(), ts.Key(), dp, *p.DealInfo.PublishCid) + res, err := m.DealInfo.GetCurrentDealInfo(ctx.Context(), ts.Key(), dp, *p.Impl().PublishCid) if err != nil { failed[i] = xerrors.Errorf("getting current deal info for piece %d: %w", i, err) continue } if res.MarketDeal == nil { - failed[i] = xerrors.Errorf("nil market deal (%d,%d,%d,%s)", i, sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID) + failed[i] = xerrors.Errorf("nil market deal (%d,%d,%d,%s)", i, sector.SectorNumber, p.Impl().DealID, p.Impl().DealProposal.PieceCID) continue } - if res.MarketDeal.Proposal.PieceCID != p.Piece.PieceCID { - failed[i] = xerrors.Errorf("recovered piece (%d) deal in sector %d (dealid %d) has different PieceCID %s != %s", i, sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, res.MarketDeal.Proposal.PieceCID) + if res.MarketDeal.Proposal.PieceCID != p.PieceCID() { + failed[i] = xerrors.Errorf("recovered piece (%d) deal in sector %d (dealid %d) has different PieceCID %s != %s", i, sector.SectorNumber, p.Impl().DealID, p.Impl().DealProposal.PieceCID, res.MarketDeal.Proposal.PieceCID) continue } @@ -517,7 +519,7 @@ func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, secto merr = multierror.Append(merr, e) } - if len(failed)+paddingPieces == len(sector.Pieces) { + if len(failed)+nonBuiltinMarketPieces == len(sector.Pieces) { log.Errorf("removing sector %d: all deals expired or unrecoverable: %+v", sector.SectorNumber, merr) return ctx.Send(failWith) } @@ -542,6 +544,7 @@ func (m *Sealing) handleSnapDealsRecoverDealIDs(ctx statemachine.Context, sector return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{xerrors.New("failed recovering deal ids")}) } +// recoveryPiecesToFix returns the list of sector piece indexes to fix, and the number of non-builtin-market pieces func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, maddr address.Address) ([]int, int, error) { ts, err := api.ChainHead(ctx) if err != nil { @@ -549,51 +552,68 @@ func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, } var toFix []int - paddingPieces := 0 + nonBuiltinMarketPieces := 0 for i, p := range sector.Pieces { - // if no deal is associated with the piece, ensure that we added it as - // filler (i.e. ensure that it has a zero PieceCID) - if p.DealInfo == nil { - exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded()) - if !p.Piece.PieceCID.Equals(exp) { - return nil, 0, xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID) - } - paddingPieces++ - continue - } + i, p := i, p + + err := p.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + // if no deal is associated with the piece, ensure that we added it as + // filler (i.e. ensure that it has a zero PieceCID) + exp := zerocomm.ZeroPieceCommitment(p.Piece().Size.Unpadded()) + if !info.PieceCID().Equals(exp) { + return xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece().PieceCID) + } + nonBuiltinMarketPieces++ + return nil + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + deal, err := api.StateMarketStorageDeal(ctx, p.DealInfo().Impl().DealID, ts.Key()) + if err != nil { + log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo().Impl().DealID, i, err) + toFix = append(toFix, i) + return nil + } + + if deal.Proposal.Provider != maddr { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, deal.Proposal.Provider, maddr) + toFix = append(toFix, i) + return nil + } + + if deal.Proposal.PieceCID != p.Piece().PieceCID { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, p.Piece().PieceCID, deal.Proposal.PieceCID) + toFix = append(toFix, i) + return nil + } + + if p.Piece().Size != deal.Proposal.PieceSize { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, p.Piece().Size, deal.Proposal.PieceSize) + toFix = append(toFix, i) + return nil + } + + if ts.Height() >= deal.Proposal.StartEpoch { + // TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces + // (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval) + return xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, deal.Proposal.StartEpoch, ts.Height()) + } + + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + // DDO pieces have no repair strategy + + nonBuiltinMarketPieces++ + return nil + }, + }) - deal, err := api.StateMarketStorageDeal(ctx, p.DealInfo.DealID, ts.Key()) if err != nil { - log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err) - toFix = append(toFix, i) - continue - } - - if deal.Proposal.Provider != maddr { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, deal.Proposal.Provider, maddr) - toFix = append(toFix, i) - continue - } - - if deal.Proposal.PieceCID != p.Piece.PieceCID { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, deal.Proposal.PieceCID) - toFix = append(toFix, i) - continue - } - - if p.Piece.Size != deal.Proposal.PieceSize { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, deal.Proposal.PieceSize) - toFix = append(toFix, i) - continue - } - - if ts.Height() >= deal.Proposal.StartEpoch { - // TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces - // (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval) - return nil, 0, xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, deal.Proposal.StartEpoch, ts.Height()) + return nil, 0, xerrors.Errorf("checking piece %d: %w", i, err) } } - return toFix, paddingPieces, nil + return toFix, nonBuiltinMarketPieces, nil } diff --git a/storage/pipeline/states_failed_test.go b/storage/pipeline/states_failed_test.go index f6846c8f5..bc658d59b 100644 --- a/storage/pipeline/states_failed_test.go +++ b/storage/pipeline/states_failed_test.go @@ -23,6 +23,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" pipeline "github.com/filecoin-project/lotus/storage/pipeline" "github.com/filecoin-project/lotus/storage/pipeline/mocks" + "github.com/filecoin-project/lotus/storage/pipeline/piece" ) func TestStateRecoverDealIDs(t *testing.T) { @@ -76,16 +77,16 @@ func TestStateRecoverDealIDs(t *testing.T) { // TODO sctx should satisfy an interface so it can be useable for mocking. This will fail because we are passing in an empty context now to get this to build. // https://github.com/filecoin-project/lotus/issues/7867 err := fakeSealing.HandleRecoverDealIDs(statemachine.Context{}, pipeline.SectorInfo{ - Pieces: []api2.SectorPiece{ - { - DealInfo: &api2.PieceDealInfo{ + Pieces: []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api2.SectorPiece{ + DealInfo: &piece.PieceDealInfo{ DealID: dealId, PublishCid: &pc, }, Piece: abi.PieceInfo{ PieceCID: idCid("oldPieceCID"), }, - }, + }), }, }) require.NoError(t, err) diff --git a/storage/pipeline/states_replica_update.go b/storage/pipeline/states_replica_update.go index 6717f49a6..380078e75 100644 --- a/storage/pipeline/states_replica_update.go +++ b/storage/pipeline/states_replica_update.go @@ -22,7 +22,7 @@ import ( func (m *Sealing) handleReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { // if the sector ended up not having any deals, abort the upgrade - if !sector.hasDeals() { + if !sector.hasData() { return ctx.Send(SectorAbortUpgrade{xerrors.New("sector had no deals")}) } @@ -58,7 +58,7 @@ func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector Sect } if !active { err := xerrors.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber) - log.Errorf(err.Error()) + log.Errorf("%s", err) return ctx.Send(SectorAbortUpgrade{err}) } @@ -82,14 +82,13 @@ func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector Sect } func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { - ts, err := m.Api.ChainHead(ctx.Context()) if err != nil { log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) return nil } - if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, ts.Key(), m.Api); err != nil { + if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, m.Api); err != nil { return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } @@ -114,24 +113,8 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec log.Errorf("failed to get update proof type from seal proof: %+v", err) return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - enc := new(bytes.Buffer) - params := &miner.ProveReplicaUpdatesParams{ - Updates: []miner.ReplicaUpdate{ - { - SectorID: sector.SectorNumber, - Deadline: sl.Deadline, - Partition: sl.Partition, - NewSealedSectorCID: *sector.UpdateSealed, - Deals: sector.dealIDs(), - UpdateProofType: updateProof, - ReplicaProof: sector.ReplicaUpdateProof, - }, - }, - } - if err := params.MarshalCBOR(enc); err != nil { - log.Errorf("failed to serialize update replica params: %w", err) - return ctx.Send(SectorSubmitReplicaUpdateFailed{}) - } + + // figure out from address and collateral cfg, err := m.getConfig() if err != nil { @@ -140,34 +123,24 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec onChainInfo, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, ts.Key()) if err != nil { - log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) - return nil + log.Errorf("failed to get sector info: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } if onChainInfo == nil { - return xerrors.Errorf("sector not found %d", sector.SectorNumber) + log.Errorw("on chain info was nil", "sector", sector.SectorNumber) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - sp, err := m.currentSealProof(ctx.Context()) + weightUpdate, err := m.sectorWeight(ctx.Context(), sector, onChainInfo.Expiration) if err != nil { - log.Errorf("sealer failed to return current seal proof not proceeding: %+v", err) - return nil - } - virtualPCI := miner.SectorPreCommitInfo{ - SealProof: sp, - SectorNumber: sector.SectorNumber, - SealedCID: *sector.UpdateSealed, - //SealRandEpoch: 0, - DealIDs: sector.dealIDs(), - Expiration: onChainInfo.Expiration, - //ReplaceCapacity: false, - //ReplaceSectorDeadline: 0, - //ReplaceSectorPartition: 0, - //ReplaceSectorNumber: 0, + log.Errorf("failed to get sector weight: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - collateral, err := m.Api.StateMinerInitialPledgeCollateral(ctx.Context(), m.maddr, virtualPCI, ts.Key()) + collateral, err := m.pledgeForPower(ctx.Context(), weightUpdate) if err != nil { - return xerrors.Errorf("getting initial pledge collateral: %w", err) + log.Errorf("failed to get pledge for power: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } collateral = big.Sub(collateral, onChainInfo.InitialPledge) @@ -194,13 +167,81 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec log.Errorf("no good address to send replica update message from: %+v", err) return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.ProveReplicaUpdates, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) + + // figure out message type + + pams, deals, err := m.processPieces(ctx.Context(), sector) + if err != nil { + log.Errorf("failed to process pieces: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + if len(pams) > 0 { + // PRU3 + + params := &miner.ProveReplicaUpdates3Params{ + SectorUpdates: []miner.SectorUpdateManifest{ + { + Sector: sector.SectorNumber, + Deadline: sl.Deadline, + Partition: sl.Partition, + NewSealedCID: *sector.UpdateSealed, + Pieces: pams, + }, + }, + SectorProofs: [][]byte{sector.ReplicaUpdateProof}, + UpdateProofsType: updateProof, + //AggregateProof + //AggregateProofType + RequireActivationSuccess: cfg.RequireActivationSuccessUpdate, + RequireNotificationSuccess: cfg.RequireNotificationSuccessUpdate, + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + log.Errorf("failed to serialize update replica params: %w", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.ProveReplicaUpdates3, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + return ctx.Send(SectorReplicaUpdateSubmitted{Message: mcid}) + } + + // PRU2 + params := &miner.ProveReplicaUpdatesParams2{ + Updates: []miner.ReplicaUpdate2{ + { + SectorID: sector.SectorNumber, + Deadline: sl.Deadline, + Partition: sl.Partition, + NewSealedSectorCID: *sector.UpdateSealed, + NewUnsealedSectorCID: *sector.UpdateUnsealed, + UpdateProofType: updateProof, + ReplicaProof: sector.ReplicaUpdateProof, + Deals: deals, + }, + }, + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + log.Errorf("failed to serialize update replica params: %w", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.ProveReplicaUpdates2, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) if err != nil { log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err) return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } return ctx.Send(SectorReplicaUpdateSubmitted{Message: mcid}) + } func (m *Sealing) handleWaitMutable(ctx statemachine.Context, sector SectorInfo) error { diff --git a/storage/pipeline/states_sealing.go b/storage/pipeline/states_sealing.go index 5c91161ef..aef394789 100644 --- a/storage/pipeline/states_sealing.go +++ b/storage/pipeline/states_sealing.go @@ -12,11 +12,16 @@ import ( "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" + miner2 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" + "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/network" @@ -25,6 +30,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" @@ -32,6 +38,8 @@ import ( "github.com/filecoin-project/lotus/storage/sealer/storiface" ) +const MinDDONetworkVersion = network.Version22 + var DealSectorPriority = 1024 var MaxTicketAge = policy.MaxPreCommitRandomnessLookback @@ -59,7 +67,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err m.cleanupAssignedDeals(sector) // if this is a snapdeals sector, but it ended up not having any deals, abort the upgrade - if sector.State == SnapDealsPacking && !sector.hasDeals() { + if sector.State == SnapDealsPacking && !sector.hasData() { return ctx.Send(SectorAbortUpgrade{xerrors.New("sector had no deals")}) } @@ -67,7 +75,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err var allocated abi.UnpaddedPieceSize for _, piece := range sector.Pieces { - allocated += piece.Piece.Size.Unpadded() + allocated += piece.Piece().Size.Unpadded() } ssize, err := sector.SectorType.SectorSize() @@ -417,11 +425,47 @@ func (m *Sealing) preCommitInfo(ctx statemachine.Context, sector SectorInfo) (*m SealedCID: *sector.CommR, SealRandEpoch: sector.TicketEpoch, - DealIDs: sector.dealIDs(), } - if sector.hasDeals() { + if sector.hasData() { + // only CC sectors don't have UnsealedCID params.UnsealedCid = sector.CommD + + // true when the sector has non-builtin-marked data + sectorIsDDO := false + + for _, piece := range sector.Pieces { + err := piece.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + return nil // ignore + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + if sectorIsDDO { + return nil // will be passed later in the Commit message + } + params.DealIDs = append(params.DealIDs, info.Impl().DealID) + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + if nv < MinDDONetworkVersion { + return xerrors.Errorf("DDO sectors are not supported on network version %d", nv) + } + + log.Infow("DDO piece in sector", "sector", sector.SectorNumber, "piece", info.String()) + + sectorIsDDO = true + + // DDO sectors don't carry DealIDs, we will pass those + // deals in the Commit message later + params.DealIDs = nil + return nil + }, + }) + + if err != nil { + return nil, big.Zero(), types.EmptyTSK, xerrors.Errorf("handleDealInfo: %w", err) + } + } } collateral, err := m.Api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, ts.Key()) @@ -572,10 +616,6 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) return xerrors.Errorf("getting config: %w", err) } - log.Info("scheduling seal proof computation...") - - log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%s; d:%s", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD) - if sector.CommD == nil || sector.CommR == nil { return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) } @@ -700,6 +740,8 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) } func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo) error { + // TODO: Deprecate this path, always go through batcher, just respect the AggregateCommits config in there + cfg, err := m.getConfig() if err != nil { return xerrors.Errorf("getting config: %w", err) @@ -783,11 +825,113 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo }) } +// processPieces returns either: +// - a list of piece activation manifests +// - a list of deal IDs, if all non-filler pieces are deal-id pieces +func (m *Sealing) processPieces(ctx context.Context, sector SectorInfo) ([]miner.PieceActivationManifest, []abi.DealID, error) { + pams := make([]miner.PieceActivationManifest, 0, len(sector.Pieces)) + dealIDs := make([]abi.DealID, 0, len(sector.Pieces)) + var hasDDO bool + + for _, piece := range sector.Pieces { + piece := piece + + // first figure out if this is a ddo sector + err := piece.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + // Fillers are implicit (todo review: Are they??) + return nil + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + hasDDO = true + return nil + }, + }) + if err != nil { + return nil, nil, xerrors.Errorf("handleDealInfo: %w", err) + } + } + for _, piece := range sector.Pieces { + piece := piece + + err := piece.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + // Fillers are implicit (todo review: Are they??) + return nil + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + if hasDDO { + alloc, err := m.Api.StateGetAllocationIdForPendingDeal(ctx, info.Impl().DealID, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting allocation for deal %d: %w", info.Impl().DealID, err) + } + clid, err := m.Api.StateLookupID(ctx, info.Impl().DealProposal.Client, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting client address for deal %d: %w", info.Impl().DealID, err) + } + + clientId, err := address.IDFromAddress(clid) + if err != nil { + return xerrors.Errorf("getting client address for deal %d: %w", info.Impl().DealID, err) + } + + var vac *miner2.VerifiedAllocationKey + if alloc != verifreg.NoAllocationID { + vac = &miner2.VerifiedAllocationKey{ + Client: abi.ActorID(clientId), + ID: verifreg13.AllocationId(alloc), + } + } + + payload, err := cborutil.Dump(info.Impl().DealID) + if err != nil { + return xerrors.Errorf("serializing deal id: %w", err) + } + + pams = append(pams, miner.PieceActivationManifest{ + CID: piece.Piece().PieceCID, + Size: piece.Piece().Size, + VerifiedAllocationKey: vac, + Notify: []miner2.DataActivationNotification{ + { + Address: market.Address, + Payload: payload, + }, + }, + }) + + return nil + } + + dealIDs = append(dealIDs, info.Impl().DealID) + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + pams = append(pams, *piece.Impl().PieceActivationManifest) + return nil + }, + }) + if err != nil { + return nil, nil, xerrors.Errorf("handleDealInfo: %w", err) + } + } + + return pams, dealIDs, nil +} + func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector SectorInfo) error { if sector.CommD == nil || sector.CommR == nil { return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) } + pams, dealIDs, err := m.processPieces(ctx.Context(), sector) + if err != nil { + return err + } + res, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{ Info: proof.AggregateSealVerifyInfo{ Number: sector.SectorNumber, @@ -796,8 +940,14 @@ func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector S SealedCID: *sector.CommR, UnsealedCID: *sector.CommD, }, - Proof: sector.Proof, // todo: this correct?? + Proof: sector.Proof, Spt: sector.SectorType, + + ActivationManifest: miner2.SectorActivationManifest{ + SectorNumber: sector.SectorNumber, + Pieces: pams, + }, + DealIDPrecommit: len(dealIDs) > 0, }) if err != nil || res.Error != "" { @@ -875,7 +1025,7 @@ func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorIn return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)}) } - if cfg.MakeCCSectorsAvailable && !sector.hasDeals() { + if cfg.MakeCCSectorsAvailable && !sector.hasData() { return ctx.Send(SectorFinalizedAvailable{}) } return ctx.Send(SectorFinalized{}) diff --git a/storage/pipeline/types.go b/storage/pipeline/types.go index e752eb2b9..48ae60546 100644 --- a/storage/pipeline/types.go +++ b/storage/pipeline/types.go @@ -2,14 +2,20 @@ package sealing import ( "context" + "encoding/json" + "io" "github.com/ipfs/go-cid" + "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -41,6 +47,20 @@ const ( RetCommitFailed = ReturnState(CommitFailed) ) +type UniversalPieceInfo interface { + Impl() piece.PieceDealInfo + String() string + Key() piece.PieceKey + + Valid(nv network.Version) error + StartEpoch() (abi.ChainEpoch, error) + EndEpoch() (abi.ChainEpoch, error) + PieceCID() cid.Cid + KeepUnsealedRequested() bool + + GetAllocation(ctx context.Context, aapi piece.AllocationAPI, tsk types.TipSetKey) (*verifreg.Allocation, error) +} + type SectorInfo struct { State SectorState SectorNumber abi.SectorNumber @@ -49,7 +69,7 @@ type SectorInfo struct { // Packing CreationTime int64 // unix seconds - Pieces []api.SectorPiece + Pieces []SafeSectorPiece // PreCommit1 TicketValue abi.SealRandomness @@ -79,7 +99,7 @@ type SectorInfo struct { // CCUpdate CCUpdate bool - CCPieces []api.SectorPiece + CCPieces []SafeSectorPiece UpdateSealed *cid.Cid UpdateUnsealed *cid.Cid ReplicaUpdateProof storiface.ReplicaUpdateProof @@ -113,18 +133,19 @@ type SectorInfo struct { func (t *SectorInfo) pieceInfos() []abi.PieceInfo { out := make([]abi.PieceInfo, len(t.Pieces)) for i, p := range t.Pieces { - out[i] = p.Piece + out[i] = p.Piece() } return out } -func (t *SectorInfo) dealIDs() []abi.DealID { - out := make([]abi.DealID, 0, len(t.Pieces)) - for _, p := range t.Pieces { - if p.DealInfo == nil { +func (t *SectorInfo) nonPaddingPieceInfos() []abi.PieceInfo { + out := make([]abi.PieceInfo, len(t.Pieces)) + for i, p := range t.Pieces { + if !p.HasDealInfo() { continue } - out = append(out, p.DealInfo.DealID) + + out[i] = p.Piece() } return out } @@ -132,14 +153,14 @@ func (t *SectorInfo) dealIDs() []abi.DealID { func (t *SectorInfo) existingPieceSizes() []abi.UnpaddedPieceSize { out := make([]abi.UnpaddedPieceSize, len(t.Pieces)) for i, p := range t.Pieces { - out[i] = p.Piece.Size.Unpadded() + out[i] = p.Piece().Size.Unpadded() } return out } -func (t *SectorInfo) hasDeals() bool { +func (t *SectorInfo) hasData() bool { for _, piece := range t.Pieces { - if piece.DealInfo != nil { + if piece.HasDealInfo() { return true } } @@ -151,7 +172,7 @@ func (t *SectorInfo) sealingCtx(ctx context.Context) context.Context { // TODO: can also take start epoch into account to give priority to sectors // we need sealed sooner - if t.hasDeals() { + if t.hasData() { return sealer.WithPriority(ctx, DealSectorPriority) } @@ -160,19 +181,19 @@ func (t *SectorInfo) sealingCtx(ctx context.Context) context.Context { // Returns list of offset/length tuples of sector data ranges which clients // requested to keep unsealed -func (t *SectorInfo) keepUnsealedRanges(pieces []api.SectorPiece, invert, alwaysKeep bool) []storiface.Range { +func (t *SectorInfo) keepUnsealedRanges(pieces []SafeSectorPiece, invert, alwaysKeep bool) []storiface.Range { var out []storiface.Range var at abi.UnpaddedPieceSize for _, piece := range pieces { - psize := piece.Piece.Size.Unpadded() + psize := piece.Piece().Size.Unpadded() at += psize - if piece.DealInfo == nil { + if !piece.HasDealInfo() { continue } - keep := piece.DealInfo.KeepUnsealed || alwaysKeep + keep := piece.DealInfo().KeepUnsealedRequested() || alwaysKeep if keep == invert { continue @@ -195,3 +216,110 @@ type SealingStateEvt struct { After SectorState Error string } + +// SafeSectorPiece is a wrapper around SectorPiece which makes it hard to misuse +// especially by making it hard to access raw Deal / DDO info +type SafeSectorPiece struct { + real api.SectorPiece +} + +func SafePiece(piece api.SectorPiece) SafeSectorPiece { + return SafeSectorPiece{piece} +} + +var _ UniversalPieceInfo = &SafeSectorPiece{} + +func (sp *SafeSectorPiece) Piece() abi.PieceInfo { + return sp.real.Piece +} + +func (sp *SafeSectorPiece) HasDealInfo() bool { + return sp.real.DealInfo != nil +} + +func (sp *SafeSectorPiece) DealInfo() UniversalPieceInfo { + return sp.real.DealInfo +} + +// cbor passthrough +func (sp *SafeSectorPiece) UnmarshalCBOR(r io.Reader) (err error) { + return sp.real.UnmarshalCBOR(r) +} + +func (sp *SafeSectorPiece) MarshalCBOR(w io.Writer) error { + return sp.real.MarshalCBOR(w) +} + +// json passthrough +func (sp *SafeSectorPiece) UnmarshalJSON(b []byte) error { + return json.Unmarshal(b, &sp.real) +} + +func (sp *SafeSectorPiece) MarshalJSON() ([]byte, error) { + return json.Marshal(sp.real) +} + +type handleDealInfoParams struct { + FillerHandler func(UniversalPieceInfo) error + BuiltinMarketHandler func(UniversalPieceInfo) error + DDOHandler func(UniversalPieceInfo) error +} + +func (sp *SafeSectorPiece) handleDealInfo(params handleDealInfoParams) error { + if !sp.HasDealInfo() { + if params.FillerHandler == nil { + return xerrors.Errorf("FillerHandler is not provided") + } + return params.FillerHandler(sp) + } + + if sp.real.DealInfo.PublishCid != nil { + if params.BuiltinMarketHandler == nil { + return xerrors.Errorf("BuiltinMarketHandler is not provided") + } + return params.BuiltinMarketHandler(sp) + } + + if params.DDOHandler == nil { + return xerrors.Errorf("DDOHandler is not provided") + } + return params.DDOHandler(sp) +} + +// SectorPiece Proxy + +func (sp *SafeSectorPiece) Impl() piece.PieceDealInfo { + return sp.real.DealInfo.Impl() +} + +func (sp *SafeSectorPiece) String() string { + return sp.real.DealInfo.String() +} + +func (sp *SafeSectorPiece) Key() piece.PieceKey { + return sp.real.DealInfo.Key() +} + +func (sp *SafeSectorPiece) Valid(nv network.Version) error { + return sp.real.DealInfo.Valid(nv) +} + +func (sp *SafeSectorPiece) StartEpoch() (abi.ChainEpoch, error) { + return sp.real.DealInfo.StartEpoch() +} + +func (sp *SafeSectorPiece) EndEpoch() (abi.ChainEpoch, error) { + return sp.real.DealInfo.EndEpoch() +} + +func (sp *SafeSectorPiece) PieceCID() cid.Cid { + return sp.real.DealInfo.PieceCID() +} + +func (sp *SafeSectorPiece) KeepUnsealedRequested() bool { + return sp.real.DealInfo.KeepUnsealedRequested() +} + +func (sp *SafeSectorPiece) GetAllocation(ctx context.Context, aapi piece.AllocationAPI, tsk types.TipSetKey) (*verifreg.Allocation, error) { + return sp.real.DealInfo.GetAllocation(ctx, aapi, tsk) +} diff --git a/storage/pipeline/types_test.go b/storage/pipeline/types_test.go index b8fbb113a..d92b68d55 100644 --- a/storage/pipeline/types_test.go +++ b/storage/pipeline/types_test.go @@ -13,6 +13,7 @@ import ( tutils "github.com/filecoin-project/specs-actors/v2/support/testing" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/storage/pipeline/piece" ) func TestSectorInfoSerialization(t *testing.T) { @@ -23,9 +24,9 @@ func TestSectorInfoSerialization(t *testing.T) { t.Fatal(err) } - dealInfo := api.PieceDealInfo{ + dealInfo := piece.PieceDealInfo{ DealID: d, - DealSchedule: api.DealSchedule{ + DealSchedule: piece.DealSchedule{ StartEpoch: 0, EndEpoch: 100, }, @@ -43,13 +44,13 @@ func TestSectorInfoSerialization(t *testing.T) { si := &SectorInfo{ State: "stateful", SectorNumber: 234, - Pieces: []api.SectorPiece{{ + Pieces: []SafeSectorPiece{{real: api.SectorPiece{ Piece: abi.PieceInfo{ Size: 5, PieceCID: dummyCid, }, DealInfo: &dealInfo, - }}, + }}}, CommD: &dummyCid, CommR: nil, Proof: nil, @@ -77,8 +78,8 @@ func TestSectorInfoSerialization(t *testing.T) { assert.Equal(t, si.State, si2.State) assert.Equal(t, si.SectorNumber, si2.SectorNumber) - assert.Equal(t, si.Pieces[0].DealInfo.DealID, si2.Pieces[0].DealInfo.DealID) - assert.Equal(t, si.Pieces[0].DealInfo.DealProposal.PieceCID, si2.Pieces[0].DealInfo.DealProposal.PieceCID) + assert.Equal(t, si.Pieces[0].Impl().DealID, si2.Pieces[0].Impl().DealID) + assert.Equal(t, si.Pieces[0].Impl().DealProposal.PieceCID, si2.Pieces[0].Impl().DealProposal.PieceCID) assert.Equal(t, *si.CommD, *si2.CommD) assert.DeepEqual(t, si.TicketValue, si2.TicketValue) assert.Equal(t, si.TicketEpoch, si2.TicketEpoch) diff --git a/storage/pipeline/upgrade_queue.go b/storage/pipeline/upgrade_queue.go index 9d9e1ca46..5e3392a9f 100644 --- a/storage/pipeline/upgrade_queue.go +++ b/storage/pipeline/upgrade_queue.go @@ -21,7 +21,7 @@ func (m *Sealing) MarkForUpgrade(ctx context.Context, id abi.SectorNumber) error return xerrors.Errorf("unable to snap-up sectors not in the 'Proving' state") } - if si.hasDeals() { + if si.hasData() { return xerrors.Errorf("not a committed-capacity sector, has deals") } diff --git a/storage/sealer/storiface/cbor_gen.go b/storage/sealer/storiface/cbor_gen.go index 0b42136ea..02726b5de 100644 --- a/storage/sealer/storiface/cbor_gen.go +++ b/storage/sealer/storiface/cbor_gen.go @@ -124,13 +124,11 @@ func (t *CallID) UnmarshalCBOR(r io.Reader) (err error) { if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - if extra != 16 { return fmt.Errorf("expected array to have 16 elements") } t.ID = [16]uint8{} - if _, err := io.ReadFull(cr, t.ID[:]); err != nil { return err } @@ -356,6 +354,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -463,6 +462,7 @@ func (t *SectorLocation) UnmarshalCBOR(r io.Reader) (err error) { } } + } } diff --git a/storage/sectorblocks/blocks.go b/storage/sectorblocks/blocks.go index 1593174bd..4b84e18fb 100644 --- a/storage/sectorblocks/blocks.go +++ b/storage/sectorblocks/blocks.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -48,7 +49,7 @@ func DsKeyToDealID(key datastore.Key) (uint64, error) { } type SectorBuilder interface { - SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d piece.PieceDealInfo) (api.SectorOffset, error) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) } @@ -100,7 +101,7 @@ func (st *SectorBlocks) writeRef(ctx context.Context, dealID abi.DealID, sectorI return st.keys.Put(ctx, DealIDToDsKey(dealID), newRef) // TODO: batch somehow } -func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d api.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { +func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d piece.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { so, err := st.SectorBuilder.SectorAddPieceToAny(ctx, size, r, d) if err != nil { return 0, 0, err