Merge pull request #8982 from filecoin-project/feat/post-maxpart-config
feat: wdpost: Config for maximum partition count per message
This commit is contained in:
commit
b499ef0c3a
@ -375,6 +375,24 @@
|
||||
# env var: LOTUS_PROVING_DISABLEWDPOSTPRECHECKS
|
||||
#DisableWDPoStPreChecks = false
|
||||
|
||||
# Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16)
|
||||
#
|
||||
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||
#
|
||||
# The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
||||
# means that a single message can prove at most 10 partinions
|
||||
#
|
||||
# In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
|
||||
# too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
|
||||
# than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||
#
|
||||
# Setting this value above the network limit has no effect
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_PROVING_MAXPARTITIONSPERMESSAGE
|
||||
#MaxPartitionsPerMessage = 0
|
||||
|
||||
|
||||
[Sealing]
|
||||
# Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time.
|
||||
|
@ -689,6 +689,24 @@ sent to the chain
|
||||
After changing this option, confirm that the new value works in your setup by invoking
|
||||
'lotus-miner proving compute window-post 0'`,
|
||||
},
|
||||
{
|
||||
Name: "MaxPartitionsPerMessage",
|
||||
Type: "int",
|
||||
|
||||
Comment: `Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16)
|
||||
|
||||
A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||
|
||||
The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
||||
means that a single message can prove at most 10 partinions
|
||||
|
||||
In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
|
||||
too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
|
||||
than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||
to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||
|
||||
Setting this value above the network limit has no effect`,
|
||||
},
|
||||
},
|
||||
"Pubsub": []DocField{
|
||||
{
|
||||
|
@ -270,6 +270,21 @@ type ProvingConfig struct {
|
||||
// After changing this option, confirm that the new value works in your setup by invoking
|
||||
// 'lotus-miner proving compute window-post 0'
|
||||
DisableWDPoStPreChecks bool
|
||||
|
||||
// Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16)
|
||||
//
|
||||
// A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||
//
|
||||
// The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
||||
// means that a single message can prove at most 10 partinions
|
||||
//
|
||||
// In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
|
||||
// too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
|
||||
// than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||
//
|
||||
// Setting this value above the network limit has no effect
|
||||
MaxPartitionsPerMessage int
|
||||
}
|
||||
|
||||
type SealingConfig struct {
|
||||
|
@ -517,6 +517,13 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv net
|
||||
partitionsPerMsg = declMax
|
||||
}
|
||||
|
||||
// respect user config if set
|
||||
if s.maxPartitionsPerMessage > 0 {
|
||||
if partitionsPerMsg > s.maxPartitionsPerMessage {
|
||||
partitionsPerMsg = s.maxPartitionsPerMessage
|
||||
}
|
||||
}
|
||||
|
||||
// The number of messages will be:
|
||||
// ceiling(number of partitions / partitions per message)
|
||||
batchCount := len(partitions) / partitionsPerMsg
|
||||
|
@ -268,6 +268,105 @@ func TestWDPostDoPost(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestWDPostDoPost verifies that doPost will send the correct number of window
|
||||
// PoST messages for a given number of partitions based on user config
|
||||
func TestWDPostDoPostPartLimitConfig(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
ctx := context.Background()
|
||||
expectedMsgCount := 364
|
||||
|
||||
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
|
||||
postAct := tutils.NewIDAddr(t, 100)
|
||||
|
||||
mockStgMinerAPI := newMockStorageMinerAPI()
|
||||
|
||||
// Get the number of sectors allowed in a partition for this proof type
|
||||
sectorsPerPartition, err := builtin.PoStProofWindowPoStPartitionSectors(proofType)
|
||||
require.NoError(t, err)
|
||||
// Work out the number of partitions that can be included in a message
|
||||
// without exceeding the message sector limit
|
||||
|
||||
//stm: @BLOCKCHAIN_POLICY_GET_MAX_POST_PARTITIONS_001
|
||||
partitionsPerMsg, err := policy.GetMaxPoStPartitions(network.Version13, proofType)
|
||||
require.NoError(t, err)
|
||||
if partitionsPerMsg > minertypes.AddressedPartitionsMax {
|
||||
partitionsPerMsg = minertypes.AddressedPartitionsMax
|
||||
}
|
||||
|
||||
partitionCount := 4 * partitionsPerMsg
|
||||
|
||||
// Assert that user config is less than network limit
|
||||
userPartLimit := 33
|
||||
lastMsgParts := 21
|
||||
require.Greater(t, partitionCount, userPartLimit)
|
||||
|
||||
// Assert that we consts are correct
|
||||
require.Equal(t, (expectedMsgCount-1)*userPartLimit+lastMsgParts, 4*partitionsPerMsg)
|
||||
|
||||
var partitions []api.Partition
|
||||
for p := 0; p < partitionCount; p++ {
|
||||
sectors := bitfield.New()
|
||||
for s := uint64(0); s < sectorsPerPartition; s++ {
|
||||
sectors.Set(s)
|
||||
}
|
||||
partitions = append(partitions, api.Partition{
|
||||
AllSectors: sectors,
|
||||
FaultySectors: bitfield.New(),
|
||||
RecoveringSectors: bitfield.New(),
|
||||
LiveSectors: sectors,
|
||||
ActiveSectors: sectors,
|
||||
})
|
||||
}
|
||||
mockStgMinerAPI.setPartitions(partitions)
|
||||
|
||||
// Run window PoST
|
||||
scheduler := &WindowPoStScheduler{
|
||||
api: mockStgMinerAPI,
|
||||
prover: &mockProver{},
|
||||
verifier: &mockVerif{},
|
||||
faultTracker: &mockFaultTracker{},
|
||||
proofType: proofType,
|
||||
actor: postAct,
|
||||
journal: journal.NilJournal(),
|
||||
addrSel: &ctladdr.AddressSelector{},
|
||||
|
||||
maxPartitionsPerMessage: userPartLimit,
|
||||
}
|
||||
|
||||
di := &dline.Info{
|
||||
WPoStPeriodDeadlines: minertypes.WPoStPeriodDeadlines,
|
||||
WPoStProvingPeriod: minertypes.WPoStProvingPeriod,
|
||||
WPoStChallengeWindow: minertypes.WPoStChallengeWindow,
|
||||
WPoStChallengeLookback: minertypes.WPoStChallengeLookback,
|
||||
FaultDeclarationCutoff: minertypes.FaultDeclarationCutoff,
|
||||
}
|
||||
ts := mockTipSet(t)
|
||||
|
||||
scheduler.startGeneratePoST(ctx, ts, di, func(posts []minertypes.SubmitWindowedPoStParams, err error) {
|
||||
scheduler.startSubmitPoST(ctx, ts, di, posts, func(err error) {})
|
||||
})
|
||||
|
||||
// Read the window PoST messages
|
||||
for i := 0; i < expectedMsgCount; i++ {
|
||||
msg := <-mockStgMinerAPI.pushedMessages
|
||||
require.Equal(t, builtin.MethodsMiner.SubmitWindowedPoSt, msg.Method)
|
||||
var params minertypes.SubmitWindowedPoStParams
|
||||
err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
||||
require.NoError(t, err)
|
||||
|
||||
if i == expectedMsgCount-1 {
|
||||
// In the last message we only included a 21 partitions
|
||||
require.Len(t, params.Partitions, lastMsgParts)
|
||||
} else {
|
||||
// All previous messages should include the full number of partitions
|
||||
require.Len(t, params.Partitions, userPartLimit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mockTipSet(t *testing.T) *types.TipSet {
|
||||
minerAct := tutils.NewActorAddr(t, "miner")
|
||||
c, err := cid.Decode("QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH")
|
||||
|
@ -62,16 +62,17 @@ type NodeAPI interface {
|
||||
// WindowPoStScheduler watches the chain though the changeHandler, which in turn
|
||||
// turn calls the scheduler when the time arrives to do work.
|
||||
type WindowPoStScheduler struct {
|
||||
api NodeAPI
|
||||
feeCfg config.MinerFeeConfig
|
||||
addrSel *ctladdr.AddressSelector
|
||||
prover storiface.ProverPoSt
|
||||
verifier storiface.Verifier
|
||||
faultTracker sealer.FaultTracker
|
||||
proofType abi.RegisteredPoStProof
|
||||
partitionSectors uint64
|
||||
disablePreChecks bool
|
||||
ch *changeHandler
|
||||
api NodeAPI
|
||||
feeCfg config.MinerFeeConfig
|
||||
addrSel *ctladdr.AddressSelector
|
||||
prover storiface.ProverPoSt
|
||||
verifier storiface.Verifier
|
||||
faultTracker sealer.FaultTracker
|
||||
proofType abi.RegisteredPoStProof
|
||||
partitionSectors uint64
|
||||
disablePreChecks bool
|
||||
maxPartitionsPerMessage int
|
||||
ch *changeHandler
|
||||
|
||||
actor address.Address
|
||||
|
||||
@ -98,15 +99,16 @@ func NewWindowedPoStScheduler(api NodeAPI,
|
||||
}
|
||||
|
||||
return &WindowPoStScheduler{
|
||||
api: api,
|
||||
feeCfg: cfg,
|
||||
addrSel: as,
|
||||
prover: sp,
|
||||
verifier: verif,
|
||||
faultTracker: ft,
|
||||
proofType: mi.WindowPoStProofType,
|
||||
partitionSectors: mi.WindowPoStPartitionSectors,
|
||||
disablePreChecks: pcfg.DisableWDPoStPreChecks,
|
||||
api: api,
|
||||
feeCfg: cfg,
|
||||
addrSel: as,
|
||||
prover: sp,
|
||||
verifier: verif,
|
||||
faultTracker: ft,
|
||||
proofType: mi.WindowPoStProofType,
|
||||
partitionSectors: mi.WindowPoStPartitionSectors,
|
||||
disablePreChecks: pcfg.DisableWDPoStPreChecks,
|
||||
maxPartitionsPerMessage: pcfg.MaxPartitionsPerMessage,
|
||||
|
||||
actor: actor,
|
||||
evtTypes: [...]journal.EventType{
|
||||
|
Loading…
Reference in New Issue
Block a user