Merge branch 'master' into sbansal/nonce-coordination-and-consensus-for-chain-nodes
This commit is contained in:
commit
b95d1a6323
@ -320,7 +320,7 @@ type StorageMiner interface {
|
||||
// the path specified when calling CreateBackup is within the base path
|
||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
|
||||
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
|
||||
|
||||
|
@ -683,7 +683,7 @@ type StorageMinerStruct struct {
|
||||
|
||||
BeneficiaryWithdrawBalance func(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) `perm:"admin"`
|
||||
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
|
||||
ComputeDataCid func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data) (abi.PieceInfo, error) `perm:"admin"`
|
||||
|
||||
@ -4160,14 +4160,14 @@ func (s *StorageMinerStub) BeneficiaryWithdrawBalance(p0 context.Context, p1 abi
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef) (map[abi.SectorNumber]string, error) {
|
||||
if s.Internal.CheckProvable == nil {
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
}
|
||||
return s.Internal.CheckProvable(p0, p1, p2, p3)
|
||||
return s.Internal.CheckProvable(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef) (map[abi.SectorNumber]string, error) {
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
}
|
||||
|
||||
|
Binary file not shown.
@ -595,6 +595,7 @@ var actorControlList = &cli.Command{
|
||||
|
||||
printKey("owner", mi.Owner)
|
||||
printKey("worker", mi.Worker)
|
||||
printKey("beneficiary", mi.Beneficiary)
|
||||
for i, ca := range mi.ControlAddresses {
|
||||
printKey(fmt.Sprintf("control-%d", i), ca)
|
||||
}
|
||||
|
@ -535,6 +535,7 @@ var stateList = []stateMeta{
|
||||
{col: color.FgYellow, state: sealing.ProveReplicaUpdate},
|
||||
{col: color.FgYellow, state: sealing.SubmitReplicaUpdate},
|
||||
{col: color.FgYellow, state: sealing.ReplicaUpdateWait},
|
||||
{col: color.FgYellow, state: sealing.WaitMutable},
|
||||
{col: color.FgYellow, state: sealing.FinalizeReplicaUpdate},
|
||||
{col: color.FgYellow, state: sealing.ReleaseSectorKey},
|
||||
|
||||
|
@ -583,7 +583,7 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
})
|
||||
}
|
||||
|
||||
bad, err := minerApi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow"))
|
||||
bad, err := minerApi.CheckProvable(ctx, info.WindowPoStProofType, tocheck)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -415,8 +415,7 @@ Inputs:
|
||||
},
|
||||
"ProofType": 8
|
||||
}
|
||||
],
|
||||
true
|
||||
]
|
||||
]
|
||||
```
|
||||
|
||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
||||
Subproject commit 1537f74659ba29fe8d01364123dfd2eb585a1551
|
||||
Subproject commit 280c4f8b94fd46dc824a5c827dece73ec7fe3efd
|
4
go.mod
4
go.mod
@ -37,7 +37,7 @@ require (
|
||||
github.com/filecoin-project/go-data-transfer v1.15.2
|
||||
github.com/filecoin-project/go-fil-commcid v0.1.0
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0
|
||||
github.com/filecoin-project/go-fil-markets v1.24.0-v17
|
||||
github.com/filecoin-project/go-fil-markets v1.25.0
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.8
|
||||
github.com/filecoin-project/go-legs v0.4.4
|
||||
github.com/filecoin-project/go-padreader v0.0.1
|
||||
@ -104,7 +104,7 @@ require (
|
||||
github.com/ipfs/go-unixfsnode v1.4.0
|
||||
github.com/ipfs/interface-go-ipfs-core v0.7.0
|
||||
github.com/ipld/go-car v0.4.0
|
||||
github.com/ipld/go-car/v2 v2.4.1
|
||||
github.com/ipld/go-car/v2 v2.5.0
|
||||
github.com/ipld/go-codec-dagpb v1.3.2
|
||||
github.com/ipld/go-ipld-prime v0.17.0
|
||||
github.com/ipld/go-ipld-selector-text-lite v0.0.1
|
||||
|
8
go.sum
8
go.sum
@ -327,8 +327,8 @@ github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88Oq
|
||||
github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo=
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8=
|
||||
github.com/filecoin-project/go-fil-markets v1.24.0-v17 h1:YjT0usMeR6kdAo3RBfftTPe5bNIgNmBbo5YzJHF1iLk=
|
||||
github.com/filecoin-project/go-fil-markets v1.24.0-v17/go.mod h1:JW/UHkHDqP4MikCIIWNY5IHvTTsdv/zNMk9jJXKzhIU=
|
||||
github.com/filecoin-project/go-fil-markets v1.25.0 h1:zWkc1v84JL9KttiqOy2IIZB0jksIdAt1WLCdOP/KvAg=
|
||||
github.com/filecoin-project/go-fil-markets v1.25.0/go.mod h1:3lzXZt5mRHTHAmZ10sUviiutaLVL57B99FgBU1MYqWY=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
|
||||
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
|
||||
@ -861,8 +861,8 @@ github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBH
|
||||
github.com/ipld/go-car v0.4.0 h1:U6W7F1aKF/OJMHovnOVdst2cpQE5GhmHibQkAixgNcQ=
|
||||
github.com/ipld/go-car v0.4.0/go.mod h1:Uslcn4O9cBKK9wqHm/cLTFacg6RAPv6LZx2mxd2Ypl4=
|
||||
github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI=
|
||||
github.com/ipld/go-car/v2 v2.4.1 h1:9S+FYbQzQJ/XzsdiOV13W5Iu/i+gUnr6csbSD9laFEg=
|
||||
github.com/ipld/go-car/v2 v2.4.1/go.mod h1:zjpRf0Jew9gHqSvjsKVyoq9OY9SWoEKdYCQUKVaaPT0=
|
||||
github.com/ipld/go-car/v2 v2.5.0 h1:S9h7A6qBAJ+B1M1jIKtau+HPDe30UbM71vsyBzwvRIE=
|
||||
github.com/ipld/go-car/v2 v2.5.0/go.mod h1:jKjGOqoCj5zn6KjnabD6JbnCsMntqU2hLiU6baZVO3E=
|
||||
github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s=
|
||||
github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA=
|
||||
github.com/ipld/go-codec-dagpb v1.3.1/go.mod h1:ErNNglIi5KMur/MfFE/svtgQthzVvf+43MrzLbpcIZY=
|
||||
|
@ -406,7 +406,7 @@ func checkSectors(ctx context.Context, t *testing.T, api kit.TestFullNode, miner
|
||||
|
||||
require.Len(t, tocheck, expectChecked)
|
||||
|
||||
bad, err := miner.CheckProvable(ctx, info.WindowPoStProofType, tocheck, true)
|
||||
bad, err := miner.CheckProvable(ctx, info.WindowPoStProofType, tocheck)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, bad, expectBad)
|
||||
}
|
||||
|
@ -1295,20 +1295,17 @@ func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error
|
||||
return backup(ctx, sm.DS, fpath)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) {
|
||||
var rg storiface.RGetter
|
||||
if expensive {
|
||||
rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, bool, error) {
|
||||
si, err := sm.Miner.SectorsStatus(ctx, id.Number, false)
|
||||
if err != nil {
|
||||
return cid.Undef, false, err
|
||||
}
|
||||
if si.CommR == nil {
|
||||
return cid.Undef, false, xerrors.Errorf("commr is nil")
|
||||
}
|
||||
|
||||
return *si.CommR, si.ReplicaUpdateMessage != nil, nil
|
||||
func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef) (map[abi.SectorNumber]string, error) {
|
||||
rg := func(ctx context.Context, id abi.SectorID) (cid.Cid, bool, error) {
|
||||
si, err := sm.Miner.SectorsStatus(ctx, id.Number, false)
|
||||
if err != nil {
|
||||
return cid.Undef, false, err
|
||||
}
|
||||
if si.CommR == nil {
|
||||
return cid.Undef, false, xerrors.Errorf("commr is nil")
|
||||
}
|
||||
|
||||
return *si.CommR, si.ReplicaUpdateMessage != nil, nil
|
||||
}
|
||||
|
||||
bad, err := sm.StorageMgr.CheckProvable(ctx, pp, sectors, rg)
|
||||
|
@ -188,6 +188,11 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
|
||||
SubmitReplicaUpdate: planOne(
|
||||
on(SectorReplicaUpdateSubmitted{}, ReplicaUpdateWait),
|
||||
on(SectorSubmitReplicaUpdateFailed{}, ReplicaUpdateFailed),
|
||||
on(SectorDeadlineImmutable{}, WaitMutable),
|
||||
),
|
||||
WaitMutable: planOne(
|
||||
on(SectorDeadlineMutable{}, SubmitReplicaUpdate),
|
||||
on(SectorAbortUpgrade{}, AbortUpgrade),
|
||||
),
|
||||
ReplicaUpdateWait: planOne(
|
||||
on(SectorReplicaUpdateLanded{}, UpdateActivating),
|
||||
@ -525,6 +530,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
|
||||
return m.handleProveReplicaUpdate, processed, nil
|
||||
case SubmitReplicaUpdate:
|
||||
return m.handleSubmitReplicaUpdate, processed, nil
|
||||
case WaitMutable:
|
||||
return m.handleWaitMutable, processed, nil
|
||||
case ReplicaUpdateWait:
|
||||
return m.handleReplicaUpdateWait, processed, nil
|
||||
case FinalizeReplicaUpdate:
|
||||
|
@ -323,6 +323,9 @@ func (evt SectorStartCCUpdate) apply(state *SectorInfo) {
|
||||
// Clear filler piece but remember in case of abort
|
||||
state.CCPieces = state.Pieces
|
||||
state.Pieces = nil
|
||||
|
||||
// Clear CreationTime in case this sector was accepting piece data previously
|
||||
state.CreationTime = 0
|
||||
}
|
||||
|
||||
type SectorReplicaUpdate struct {
|
||||
@ -458,6 +461,7 @@ func (evt SectorRevertUpgradeToProving) apply(state *SectorInfo) {
|
||||
state.ReplicaUpdateMessage = nil
|
||||
state.Pieces = state.CCPieces
|
||||
state.CCPieces = nil
|
||||
state.CreationTime = 0
|
||||
}
|
||||
|
||||
type SectorRetrySubmitReplicaUpdateWait struct{}
|
||||
@ -472,6 +476,14 @@ type SectorSubmitReplicaUpdateFailed struct{}
|
||||
|
||||
func (evt SectorSubmitReplicaUpdateFailed) apply(state *SectorInfo) {}
|
||||
|
||||
type SectorDeadlineImmutable struct{}
|
||||
|
||||
func (evt SectorDeadlineImmutable) apply(state *SectorInfo) {}
|
||||
|
||||
type SectorDeadlineMutable struct{}
|
||||
|
||||
func (evt SectorDeadlineMutable) apply(state *SectorInfo) {}
|
||||
|
||||
type SectorReleaseKeyFailed struct{ error }
|
||||
|
||||
func (evt SectorReleaseKeyFailed) FormatError(xerrors.Printer) (next error) {
|
||||
|
@ -390,3 +390,64 @@ func TestTicketExpired(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreationTimeCleared(t *testing.T) {
|
||||
var notif []struct{ before, after SectorInfo }
|
||||
ma, _ := address.NewIDAddress(55151)
|
||||
m := test{
|
||||
s: &Sealing{
|
||||
maddr: ma,
|
||||
stats: SectorStats{
|
||||
bySector: map[abi.SectorID]SectorState{},
|
||||
byState: map[SectorState]int64{},
|
||||
},
|
||||
notifee: func(before, after SectorInfo) {
|
||||
notif = append(notif, struct{ before, after SectorInfo }{before, after})
|
||||
},
|
||||
},
|
||||
t: t,
|
||||
state: &SectorInfo{State: Available},
|
||||
}
|
||||
|
||||
// sector starts with zero CreationTime
|
||||
m.planSingle(SectorStartCCUpdate{})
|
||||
require.Equal(m.t, m.state.State, SnapDealsWaitDeals)
|
||||
|
||||
require.Equal(t, int64(0), m.state.CreationTime)
|
||||
|
||||
// First AddPiece will set CreationTime
|
||||
m.planSingle(SectorAddPiece{})
|
||||
require.Equal(m.t, m.state.State, SnapDealsAddPiece)
|
||||
|
||||
require.NotEqual(t, int64(0), m.state.CreationTime)
|
||||
|
||||
m.planSingle(SectorPieceAdded{})
|
||||
require.Equal(m.t, m.state.State, SnapDealsWaitDeals)
|
||||
|
||||
// abort shoult clean up CreationTime
|
||||
m.planSingle(SectorAbortUpgrade{})
|
||||
require.Equal(m.t, m.state.State, AbortUpgrade)
|
||||
|
||||
require.NotEqual(t, int64(0), m.state.CreationTime)
|
||||
|
||||
m.planSingle(SectorRevertUpgradeToProving{})
|
||||
require.Equal(m.t, m.state.State, Proving)
|
||||
|
||||
require.Equal(t, int64(0), m.state.CreationTime)
|
||||
|
||||
m.planSingle(SectorMarkForUpdate{})
|
||||
|
||||
// in case CreationTime was set for whatever reason (lotus bug / manual sector state change)
|
||||
// make sure we clean it up when starting upgrade
|
||||
m.state.CreationTime = 325
|
||||
m.planSingle(SectorStartCCUpdate{})
|
||||
require.Equal(m.t, m.state.State, SnapDealsWaitDeals)
|
||||
|
||||
require.Equal(t, int64(0), m.state.CreationTime)
|
||||
|
||||
// "First" AddPiece will set CreationTime
|
||||
m.planSingle(SectorAddPiece{})
|
||||
require.Equal(m.t, m.state.State, SnapDealsAddPiece)
|
||||
|
||||
require.NotEqual(t, int64(0), m.state.CreationTime)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-commp-utils/zerocomm"
|
||||
@ -91,12 +92,17 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e
|
||||
}
|
||||
|
||||
func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, used abi.UnpaddedPieceSize) (bool, error) {
|
||||
log := log.WithOptions(zap.Fields(
|
||||
zap.Uint64("sector", uint64(sector.SectorNumber)),
|
||||
zap.Int("deals", len(sector.dealIDs())),
|
||||
))
|
||||
|
||||
now := time.Now()
|
||||
st := m.sectorTimers[m.minerSectorID(sector.SectorNumber)]
|
||||
if st != nil {
|
||||
if !st.Stop() { // timer expired, SectorStartPacking was/is being sent
|
||||
// we send another SectorStartPacking in case one was sent in the handleAddPiece state
|
||||
log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timeout")
|
||||
log.Infow("starting to seal deal sector", "trigger", "wait-timeout")
|
||||
return true, ctx.Send(SectorStartPacking{})
|
||||
}
|
||||
}
|
||||
@ -113,13 +119,13 @@ func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo,
|
||||
|
||||
if len(sector.dealIDs()) >= maxDeals {
|
||||
// can't accept more deals
|
||||
log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "maxdeals")
|
||||
log.Infow("starting to seal deal sector", "trigger", "maxdeals")
|
||||
return true, ctx.Send(SectorStartPacking{})
|
||||
}
|
||||
|
||||
if used.Padded() == abi.PaddedPieceSize(ssize) {
|
||||
// sector full
|
||||
log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "filled")
|
||||
log.Infow("starting to seal deal sector", "trigger", "filled")
|
||||
return true, ctx.Send(SectorStartPacking{})
|
||||
}
|
||||
|
||||
@ -149,15 +155,15 @@ func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo,
|
||||
}
|
||||
|
||||
if now.After(sealTime) {
|
||||
log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timeout")
|
||||
log.Infow("starting to seal deal sector", "trigger", "wait-timeout", "creation", sector.CreationTime)
|
||||
return true, ctx.Send(SectorStartPacking{})
|
||||
}
|
||||
|
||||
m.sectorTimers[m.minerSectorID(sector.SectorNumber)] = time.AfterFunc(sealTime.Sub(now), func() {
|
||||
log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timer")
|
||||
log.Infow("starting to seal deal sector", "trigger", "wait-timer")
|
||||
|
||||
if err := ctx.Send(SectorStartPacking{}); err != nil {
|
||||
log.Errorw("sending SectorStartPacking event failed", "sector", sector.SectorNumber, "error", err)
|
||||
log.Errorw("sending SectorStartPacking event failed", "error", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -53,6 +53,7 @@ var ExistSectorStateList = map[SectorState]struct{}{
|
||||
UpdateReplica: {},
|
||||
ProveReplicaUpdate: {},
|
||||
SubmitReplicaUpdate: {},
|
||||
WaitMutable: {},
|
||||
ReplicaUpdateWait: {},
|
||||
UpdateActivating: {},
|
||||
ReleaseSectorKey: {},
|
||||
@ -110,6 +111,7 @@ const (
|
||||
UpdateReplica SectorState = "UpdateReplica"
|
||||
ProveReplicaUpdate SectorState = "ProveReplicaUpdate"
|
||||
SubmitReplicaUpdate SectorState = "SubmitReplicaUpdate"
|
||||
WaitMutable SectorState = "WaitMutable"
|
||||
ReplicaUpdateWait SectorState = "ReplicaUpdateWait"
|
||||
FinalizeReplicaUpdate SectorState = "FinalizeReplicaUpdate"
|
||||
UpdateActivating SectorState = "UpdateActivating"
|
||||
@ -161,7 +163,7 @@ func toStatState(st SectorState, finEarly bool) statSectorState {
|
||||
return sstStaging
|
||||
case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, FinalizeSector, SnapDealsPacking, UpdateReplica, ProveReplicaUpdate, FinalizeReplicaUpdate, ReceiveSector:
|
||||
return sstSealing
|
||||
case SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, SubmitReplicaUpdate, ReplicaUpdateWait:
|
||||
case SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, WaitMutable, SubmitReplicaUpdate, ReplicaUpdateWait:
|
||||
if finEarly {
|
||||
// we use statSectorState for throttling storage use. With FinalizeEarly
|
||||
// we can consider sectors in states after CommitFinalize as finalized, so
|
||||
@ -184,6 +186,7 @@ func IsUpgradeState(st SectorState) bool {
|
||||
UpdateReplica,
|
||||
ProveReplicaUpdate,
|
||||
SubmitReplicaUpdate,
|
||||
WaitMutable,
|
||||
|
||||
SnapDealsAddPieceFailed,
|
||||
SnapDealsDealsExpired,
|
||||
|
@ -257,8 +257,9 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect
|
||||
return nil
|
||||
}
|
||||
if !active {
|
||||
log.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber)
|
||||
return ctx.Send(SectorAbortUpgrade{})
|
||||
err := xerrors.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber)
|
||||
log.Errorf(err.Error())
|
||||
return ctx.Send(SectorAbortUpgrade{err})
|
||||
}
|
||||
|
||||
return ctx.Send(SectorRetrySubmitReplicaUpdate{})
|
||||
@ -421,6 +422,8 @@ func (m *Sealing) handleAbortUpgrade(ctx statemachine.Context, sector SectorInfo
|
||||
return xerrors.Errorf("should never reach AbortUpgrade as a non-CCUpdate sector")
|
||||
}
|
||||
|
||||
m.cleanupAssignedDeals(sector)
|
||||
|
||||
// Remove snap deals replica if any
|
||||
if err := m.sealer.ReleaseReplicaUpgrade(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil {
|
||||
return xerrors.Errorf("removing CC update files from sector storage")
|
||||
|
@ -138,6 +138,12 @@ func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInf
|
||||
delete(m.available, m.minerSectorID(sector.SectorNumber))
|
||||
m.inputLk.Unlock()
|
||||
|
||||
// guard against manual state updates from snap-deals states into Proving
|
||||
// note: normally snap deals should be aborted through the abort command, but
|
||||
// apparently sometimes some SPs would use update-state to force the sector back
|
||||
// into the Proving state, breaking the deal input pipeline in the process.
|
||||
m.cleanupAssignedDeals(sector)
|
||||
|
||||
// TODO: Watch termination
|
||||
// TODO: Auto-extend if set
|
||||
|
||||
|
@ -57,8 +57,9 @@ func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector Sect
|
||||
return nil
|
||||
}
|
||||
if !active {
|
||||
log.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber)
|
||||
return ctx.Send(SectorAbortUpgrade{})
|
||||
err := xerrors.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber)
|
||||
log.Errorf(err.Error())
|
||||
return ctx.Send(SectorAbortUpgrade{err})
|
||||
}
|
||||
|
||||
vanillaProofs, err := m.sealer.ProveReplicaUpdate1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed)
|
||||
@ -97,6 +98,17 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec
|
||||
log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
dlinfo, err := m.Api.StateMinerProvingDeadline(ctx.Context(), m.maddr, ts.Key())
|
||||
if err != nil {
|
||||
log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %w", err)
|
||||
}
|
||||
// if sector's deadline is immutable wait in a non error state
|
||||
// sector's deadline is immutable if it is the current deadline or the next deadline
|
||||
if sl.Deadline == dlinfo.Index || (dlinfo.Index+1)%dlinfo.WPoStPeriodDeadlines == sl.Deadline {
|
||||
return ctx.Send(SectorDeadlineImmutable{})
|
||||
}
|
||||
|
||||
updateProof, err := sector.SectorType.RegisteredUpdateProof()
|
||||
if err != nil {
|
||||
log.Errorf("failed to get update proof type from seal proof: %+v", err)
|
||||
@ -187,6 +199,67 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec
|
||||
return ctx.Send(SectorReplicaUpdateSubmitted{Message: mcid})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleWaitMutable(ctx statemachine.Context, sector SectorInfo) error {
|
||||
immutable := true
|
||||
for immutable {
|
||||
ts, err := m.Api.ChainHead(ctx.Context())
|
||||
if err != nil {
|
||||
log.Errorf("handleWaitMutable: api error, not proceeding: %+v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
sl, err := m.Api.StateSectorPartition(ctx.Context(), m.maddr, sector.SectorNumber, ts.Key())
|
||||
if err != nil {
|
||||
log.Errorf("handleWaitMutable: api error, not proceeding: %+v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
dlinfo, err := m.Api.StateMinerProvingDeadline(ctx.Context(), m.maddr, ts.Key())
|
||||
if err != nil {
|
||||
log.Errorf("handleWaitMutable: api error, not proceeding: %w", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
sectorDeadlineOpen := sl.Deadline == dlinfo.Index
|
||||
sectorDeadlineNext := (dlinfo.Index+1)%dlinfo.WPoStPeriodDeadlines == sl.Deadline
|
||||
immutable = sectorDeadlineOpen || sectorDeadlineNext
|
||||
|
||||
// Sleep for immutable epochs
|
||||
if immutable {
|
||||
dlineEpochsRemaining := dlinfo.NextOpen() - ts.Height()
|
||||
var targetEpoch abi.ChainEpoch
|
||||
if sectorDeadlineOpen {
|
||||
// sleep for remainder of deadline
|
||||
targetEpoch = ts.Height() + dlineEpochsRemaining
|
||||
} else {
|
||||
// sleep for remainder of deadline and next one
|
||||
targetEpoch = ts.Height() + dlineEpochsRemaining + dlinfo.WPoStChallengeWindow
|
||||
}
|
||||
|
||||
atHeight := make(chan struct{})
|
||||
err := m.events.ChainAt(ctx.Context(), func(context.Context, *types.TipSet, abi.ChainEpoch) error {
|
||||
close(atHeight)
|
||||
return nil
|
||||
}, func(ctx context.Context, ts *types.TipSet) error {
|
||||
log.Warn("revert in handleWaitMutable")
|
||||
return nil
|
||||
}, 5, targetEpoch)
|
||||
if err != nil {
|
||||
log.Errorf("handleWaitMutalbe: events error: api error, not proceeding: %w", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-atHeight:
|
||||
case <-ctx.Context().Done():
|
||||
return ctx.Context().Err()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return ctx.Send(SectorDeadlineMutable{})
|
||||
}
|
||||
|
||||
func (m *Sealing) handleReplicaUpdateWait(ctx statemachine.Context, sector SectorInfo) error {
|
||||
if sector.ReplicaUpdateMessage == nil {
|
||||
log.Errorf("handleReplicaUpdateWait: no replica update message cid recorded")
|
||||
|
@ -33,7 +33,7 @@ import (
|
||||
var DealSectorPriority = 1024
|
||||
var MaxTicketAge = policy.MaxPreCommitRandomnessLookback
|
||||
|
||||
func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) error {
|
||||
func (m *Sealing) cleanupAssignedDeals(sector SectorInfo) {
|
||||
m.inputLk.Lock()
|
||||
// make sure we are not accepting deals into this sector
|
||||
for _, c := range m.assignedPieces[m.minerSectorID(sector.SectorNumber)] {
|
||||
@ -51,6 +51,10 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err
|
||||
delete(m.openSectors, m.minerSectorID(sector.SectorNumber))
|
||||
delete(m.assignedPieces, m.minerSectorID(sector.SectorNumber))
|
||||
m.inputLk.Unlock()
|
||||
}
|
||||
|
||||
func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) error {
|
||||
m.cleanupAssignedDeals(sector)
|
||||
|
||||
// if this is a snapdeals sector, but it ended up not having any deals, abort the upgrade
|
||||
if sector.State == SnapDealsPacking && !sector.hasDeals() {
|
||||
|
Loading…
Reference in New Issue
Block a user