Merge pull request #6282 from filecoin-project/feat/fip-0013-addenda
FIP 13 addenda: correctly handle commit batch timer
This commit is contained in:
commit
56145201db
86
extern/storage-sealing/commit_batch.go
vendored
86
extern/storage-sealing/commit_batch.go
vendored
@ -18,6 +18,7 @@ import (
|
|||||||
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
)
|
)
|
||||||
@ -27,6 +28,7 @@ const arp = abi.RegisteredAggregationProof_SnarkPackV1
|
|||||||
type CommitBatcherApi interface {
|
type CommitBatcherApi interface {
|
||||||
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error)
|
||||||
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
|
StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error)
|
||||||
|
ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type AggregateInput struct {
|
type AggregateInput struct {
|
||||||
@ -45,8 +47,9 @@ type CommitBatcher struct {
|
|||||||
getConfig GetSealingConfigFunc
|
getConfig GetSealingConfigFunc
|
||||||
verif ffiwrapper.Verifier
|
verif ffiwrapper.Verifier
|
||||||
|
|
||||||
todo map[abi.SectorNumber]AggregateInput
|
deadlines map[abi.SectorNumber]time.Time
|
||||||
waiting map[abi.SectorNumber][]chan cid.Cid
|
todo map[abi.SectorNumber]AggregateInput
|
||||||
|
waiting map[abi.SectorNumber][]chan cid.Cid
|
||||||
|
|
||||||
notify, stop, stopped chan struct{}
|
notify, stop, stopped chan struct{}
|
||||||
force chan chan *cid.Cid
|
force chan chan *cid.Cid
|
||||||
@ -63,8 +66,9 @@ func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBat
|
|||||||
getConfig: getConfig,
|
getConfig: getConfig,
|
||||||
verif: verif,
|
verif: verif,
|
||||||
|
|
||||||
todo: map[abi.SectorNumber]AggregateInput{},
|
deadlines: map[abi.SectorNumber]time.Time{},
|
||||||
waiting: map[abi.SectorNumber][]chan cid.Cid{},
|
todo: map[abi.SectorNumber]AggregateInput{},
|
||||||
|
waiting: map[abi.SectorNumber][]chan cid.Cid{},
|
||||||
|
|
||||||
notify: make(chan struct{}, 1),
|
notify: make(chan struct{}, 1),
|
||||||
force: make(chan chan *cid.Cid),
|
force: make(chan chan *cid.Cid),
|
||||||
@ -100,7 +104,7 @@ func (b *CommitBatcher) run() {
|
|||||||
return
|
return
|
||||||
case <-b.notify:
|
case <-b.notify:
|
||||||
sendAboveMax = true
|
sendAboveMax = true
|
||||||
case <-time.After(cfg.CommitBatchWait):
|
case <-time.After(b.batchWait(cfg.CommitBatchWait, cfg.CommitBatchSlack)):
|
||||||
sendAboveMin = true
|
sendAboveMin = true
|
||||||
case fr := <-b.force: // user triggered
|
case fr := <-b.force: // user triggered
|
||||||
forceRes = fr
|
forceRes = fr
|
||||||
@ -114,6 +118,69 @@ func (b *CommitBatcher) run() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *CommitBatcher) batchWait(maxWait, slack time.Duration) time.Duration {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
b.lk.Lock()
|
||||||
|
defer b.lk.Unlock()
|
||||||
|
|
||||||
|
var deadline time.Time
|
||||||
|
for sn := range b.todo {
|
||||||
|
sectorDeadline := b.deadlines[sn]
|
||||||
|
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
|
||||||
|
deadline = sectorDeadline
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for sn := range b.waiting {
|
||||||
|
sectorDeadline := b.deadlines[sn]
|
||||||
|
if deadline.IsZero() || (!sectorDeadline.IsZero() && sectorDeadline.Before(deadline)) {
|
||||||
|
deadline = sectorDeadline
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if deadline.IsZero() {
|
||||||
|
return maxWait
|
||||||
|
}
|
||||||
|
|
||||||
|
deadline = deadline.Add(-slack)
|
||||||
|
if deadline.Before(now) {
|
||||||
|
return time.Nanosecond // can't return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
wait := deadline.Sub(now)
|
||||||
|
if wait > maxWait {
|
||||||
|
wait = maxWait
|
||||||
|
}
|
||||||
|
|
||||||
|
return wait
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *CommitBatcher) getSectorDeadline(si SectorInfo) time.Time {
|
||||||
|
tok, curEpoch, err := b.api.ChainHead(b.mctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("getting chain head: %s", err)
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
deadlineEpoch := si.TicketEpoch
|
||||||
|
for _, p := range si.Pieces {
|
||||||
|
if p.DealInfo == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
startEpoch := p.DealInfo.DealSchedule.StartEpoch
|
||||||
|
if startEpoch < deadlineEpoch {
|
||||||
|
deadlineEpoch = startEpoch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if deadlineEpoch <= curEpoch {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.Duration(deadlineEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
func (b *CommitBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
|
func (b *CommitBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
|
||||||
b.lk.Lock()
|
b.lk.Lock()
|
||||||
defer b.lk.Unlock()
|
defer b.lk.Unlock()
|
||||||
@ -182,6 +249,7 @@ func (b *CommitBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
|
|||||||
}
|
}
|
||||||
delete(b.waiting, sn)
|
delete(b.waiting, sn)
|
||||||
delete(b.todo, sn)
|
delete(b.todo, sn)
|
||||||
|
delete(b.deadlines, sn)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -192,12 +260,14 @@ func (b *CommitBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// register commit, wait for batch message, return message CID
|
// register commit, wait for batch message, return message CID
|
||||||
func (b *CommitBatcher) AddCommit(ctx context.Context, s abi.SectorNumber, in AggregateInput) (mcid cid.Cid, err error) {
|
func (b *CommitBatcher) AddCommit(ctx context.Context, s SectorInfo, in AggregateInput) (mcid cid.Cid, err error) {
|
||||||
|
sn := s.SectorNumber
|
||||||
b.lk.Lock()
|
b.lk.Lock()
|
||||||
b.todo[s] = in
|
b.deadlines[sn] = b.getSectorDeadline(s)
|
||||||
|
b.todo[sn] = in
|
||||||
|
|
||||||
sent := make(chan cid.Cid, 1)
|
sent := make(chan cid.Cid, 1)
|
||||||
b.waiting[s] = append(b.waiting[s], sent)
|
b.waiting[sn] = append(b.waiting[sn], sent)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case b.notify <- struct{}{}:
|
case b.notify <- struct{}{}:
|
||||||
|
1
extern/storage-sealing/sealiface/config.go
vendored
1
extern/storage-sealing/sealiface/config.go
vendored
@ -22,6 +22,7 @@ type Config struct {
|
|||||||
MinCommitBatch int
|
MinCommitBatch int
|
||||||
MaxCommitBatch int
|
MaxCommitBatch int
|
||||||
CommitBatchWait time.Duration
|
CommitBatchWait time.Duration
|
||||||
|
CommitBatchSlack time.Duration
|
||||||
|
|
||||||
TerminateBatchMax uint64
|
TerminateBatchMax uint64
|
||||||
TerminateBatchMin uint64
|
TerminateBatchMin uint64
|
||||||
|
2
extern/storage-sealing/states_sealing.go
vendored
2
extern/storage-sealing/states_sealing.go
vendored
@ -537,7 +537,7 @@ func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector S
|
|||||||
return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")})
|
return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")})
|
||||||
}
|
}
|
||||||
|
|
||||||
mcid, err := m.commiter.AddCommit(ctx.Context(), sector.SectorNumber, AggregateInput{
|
mcid, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{
|
||||||
info: proof.AggregateSealVerifyInfo{
|
info: proof.AggregateSealVerifyInfo{
|
||||||
Number: sector.SectorNumber,
|
Number: sector.SectorNumber,
|
||||||
Randomness: sector.TicketValue,
|
Randomness: sector.TicketValue,
|
||||||
|
@ -246,9 +246,10 @@ func DefaultStorageMiner() *StorageMiner {
|
|||||||
MinCommitBatch: 1, // we must have at least one proof to aggregate
|
MinCommitBatch: 1, // we must have at least one proof to aggregate
|
||||||
MaxCommitBatch: 204, // this is the maximum aggregation per FIP13
|
MaxCommitBatch: 204, // this is the maximum aggregation per FIP13
|
||||||
CommitBatchWait: time.Day, // this can be up to 6 days
|
CommitBatchWait: time.Day, // this can be up to 6 days
|
||||||
TerminateBatchMin: 1, // same as above
|
CommitBatchSlack: 8 * time.Hour,
|
||||||
TerminateBatchMax: 204, // same as above
|
TerminateBatchMin: 1,
|
||||||
TerminateBatchWait: time.Day, // this can be up to 6 days
|
TerminateBatchMax: 100,
|
||||||
|
TerminateBatchWait: 5 * time.Minute,
|
||||||
},
|
},
|
||||||
|
|
||||||
Storage: sectorstorage.SealerConfig{
|
Storage: sectorstorage.SealerConfig{
|
||||||
|
@ -827,6 +827,11 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error
|
|||||||
AggregateCommits: cfg.AggregateCommits,
|
AggregateCommits: cfg.AggregateCommits,
|
||||||
MinCommitBatch: cfg.MinCommitBatch,
|
MinCommitBatch: cfg.MinCommitBatch,
|
||||||
MaxCommitBatch: cfg.MaxCommitBatch,
|
MaxCommitBatch: cfg.MaxCommitBatch,
|
||||||
|
CommitBatchWait: cfg.CommitBatchWait,
|
||||||
|
CommitBatchSlack: cfg.CommitBatchSlack,
|
||||||
|
TerminateBatchMax: cfg.TerminateBatchMax,
|
||||||
|
TerminateBatchMin: cfg.TerminateBatchMin,
|
||||||
|
TerminateBatchWait: cfg.TerminateBatchWait,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
@ -845,6 +850,11 @@ func NewGetSealConfigFunc(r repo.LockedRepo) (dtypes.GetSealingConfigFunc, error
|
|||||||
AggregateCommits: cfg.Sealing.AggregateCommits,
|
AggregateCommits: cfg.Sealing.AggregateCommits,
|
||||||
MinCommitBatch: cfg.Sealing.MinCommitBatch,
|
MinCommitBatch: cfg.Sealing.MinCommitBatch,
|
||||||
MaxCommitBatch: cfg.Sealing.MaxCommitBatch,
|
MaxCommitBatch: cfg.Sealing.MaxCommitBatch,
|
||||||
|
CommitBatchWait: cfg.Sealing.CommitBatchWait,
|
||||||
|
CommitBatchSlack: cfg.Sealing.CommitBatchSlack,
|
||||||
|
TerminateBatchMax: cfg.Sealing.TerminateBatchMax,
|
||||||
|
TerminateBatchMin: cfg.Sealing.TerminateBatchMin,
|
||||||
|
TerminateBatchWait: cfg.Sealing.TerminateBatchWait,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
|
Loading…
Reference in New Issue
Block a user