Merge pull request #7407 from filecoin-project/nonsense/check-deal-start-epoch-on-SectorAddPieceToAny

check for deal start epoch on SectorAddPieceToAny
This commit is contained in:
Łukasz Magiera 2021-10-01 18:03:10 +01:00 committed by GitHub
commit 28e720fef2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 54 additions and 14 deletions

View File

@ -207,6 +207,12 @@
# env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERS
#SimultaneousTransfers = 20
# Minimum start epoch buffer to give time for sealing of sector with deal.
#
# type: uint64
# env var: LOTUS_DEALMAKING_STARTEPOCHSEALINGBUFFER
#StartEpochSealingBuffer = 480
# A command used for fine-grained evaluation of storage deals
# see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details
#

View File

@ -274,6 +274,21 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec
return api.SectorOffset{}, xerrors.Errorf("getting proposal CID: %w", err)
}
cfg, err := m.getConfig()
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("getting config: %w", err)
}
_, head, err := m.Api.ChainHead(ctx)
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("couldnt get chain head: %w", err)
}
if head+cfg.StartEpochSealingBuffer > deal.DealProposal.StartEpoch {
return api.SectorOffset{}, xerrors.Errorf(
"cannot add piece for deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d",
deal.DealProposal.PieceCID, head, deal.DealProposal.StartEpoch)
}
m.inputLk.Lock()
if _, exist := m.pendingPieces[proposalCID(deal)]; exist {
m.inputLk.Unlock()

View File

@ -22,6 +22,8 @@ type Config struct {
CommittedCapacitySectorLifetime time.Duration
StartEpochSealingBuffer abi.ChainEpoch
AlwaysKeepUnsealedCopy bool
FinalizeEarly bool

View File

@ -59,10 +59,11 @@ type DealPublisher struct {
publishPeriod time.Duration
publishSpec *api.MessageSendSpec
lk sync.Mutex
pending []*pendingDeal
cancelWaitForMoreDeals context.CancelFunc
publishPeriodStart time.Time
lk sync.Mutex
pending []*pendingDeal
cancelWaitForMoreDeals context.CancelFunc
publishPeriodStart time.Time
startEpochSealingBuffer abi.ChainEpoch
}
// A deal that is queued to be published
@ -93,6 +94,8 @@ type PublishMsgConfig struct {
// The maximum number of deals to include in a single PublishStorageDeals
// message
MaxDealsPerMsg uint64
// Minimum start epoch buffer to give time for sealing of sector with deal
StartEpochSealingBuffer uint64
}
func NewDealPublisher(
@ -124,13 +127,14 @@ func newDealPublisher(
) *DealPublisher {
ctx, cancel := context.WithCancel(context.Background())
return &DealPublisher{
api: dpapi,
as: as,
ctx: ctx,
Shutdown: cancel,
maxDealsPerPublishMsg: publishMsgCfg.MaxDealsPerMsg,
publishPeriod: publishMsgCfg.Period,
publishSpec: publishSpec,
api: dpapi,
as: as,
ctx: ctx,
Shutdown: cancel,
maxDealsPerPublishMsg: publishMsgCfg.MaxDealsPerMsg,
publishPeriod: publishMsgCfg.Period,
startEpochSealingBuffer: abi.ChainEpoch(publishMsgCfg.StartEpochSealingBuffer),
publishSpec: publishSpec,
}
}
@ -329,7 +333,7 @@ func (p *DealPublisher) validateDeal(deal market2.ClientDealProposal) error {
if err != nil {
return err
}
if head.Height() > deal.Proposal.StartEpoch {
if head.Height()+p.startEpochSealingBuffer > deal.Proposal.StartEpoch {
return xerrors.Errorf(
"cannot publish deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d",
deal.Proposal.PieceCID, head.Height(), deal.Proposal.StartEpoch)

View File

@ -199,8 +199,9 @@ func ConfigStorageMiner(c interface{}) Option {
Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))),
),
Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Fees, storageadapter.PublishMsgConfig{
Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod),
MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg,
Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod),
MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg,
StartEpochSealingBuffer: cfg.Dealmaking.StartEpochSealingBuffer,
})),
Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees, &cfg.Dealmaking)),
),

View File

@ -148,6 +148,8 @@ func DefaultStorageMiner() *StorageMiner {
SimultaneousTransfers: DefaultSimultaneousTransfers,
StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed
RetrievalPricing: &RetrievalPricing{
Strategy: RetrievalPricingDefaultMode,
Default: &RetrievalPricingDefault{

View File

@ -265,6 +265,12 @@ passed to the sealing node by the markets service. 0 is unlimited.`,
Comment: `The maximum number of parallel online data transfers (storage+retrieval)`,
},
{
Name: "StartEpochSealingBuffer",
Type: "uint64",
Comment: `Minimum start epoch buffer to give time for sealing of sector with deal.`,
},
{
Name: "Filter",
Type: "string",

View File

@ -131,6 +131,8 @@ type DealmakingConfig struct {
MaxStagingDealsBytes int64
// The maximum number of parallel online data transfers (storage+retrieval)
SimultaneousTransfers uint64
// Minimum start epoch buffer to give time for sealing of sector with deal.
StartEpochSealingBuffer uint64
// A command used for fine-grained evaluation of storage deals
// see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details

View File

@ -932,6 +932,8 @@ func ToSealingConfig(cfg *config.StorageMiner) sealiface.Config {
TerminateBatchMax: cfg.Sealing.TerminateBatchMax,
TerminateBatchMin: cfg.Sealing.TerminateBatchMin,
TerminateBatchWait: time.Duration(cfg.Sealing.TerminateBatchWait),
StartEpochSealingBuffer: abi.ChainEpoch(cfg.Dealmaking.StartEpochSealingBuffer),
}
}