Merge pull request #9413 from filecoin-project/sbansal/single-partition-post-for-recovery-sectors
feat: wdpost: Add ability to only have single partition per msg for partitions with…
This commit is contained in:
commit
e74838f024
@ -380,11 +380,9 @@
|
|||||||
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||||
#
|
#
|
||||||
# The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
# The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
||||||
# means that a single message can prove at most 10 partinions
|
# means that a single message can prove at most 10 partitions
|
||||||
#
|
#
|
||||||
# In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
|
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
# too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
|
|
||||||
# than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
|
||||||
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
#
|
#
|
||||||
# Setting this value above the network limit has no effect
|
# Setting this value above the network limit has no effect
|
||||||
@ -403,6 +401,19 @@
|
|||||||
# env var: LOTUS_PROVING_MAXPARTITIONSPERRECOVERYMESSAGE
|
# env var: LOTUS_PROVING_MAXPARTITIONSPERRECOVERYMESSAGE
|
||||||
#MaxPartitionsPerRecoveryMessage = 0
|
#MaxPartitionsPerRecoveryMessage = 0
|
||||||
|
|
||||||
|
# Enable single partition per PoSt Message for partitions containing recovery sectors
|
||||||
|
#
|
||||||
|
# In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
|
||||||
|
# too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
|
||||||
|
# with recovering sectors in the post message
|
||||||
|
#
|
||||||
|
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
|
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
# env var: LOTUS_PROVING_SINGLERECOVERINGPARTITIONPERPOSTMESSAGE
|
||||||
|
#SingleRecoveringPartitionPerPostMessage = false
|
||||||
|
|
||||||
|
|
||||||
[Sealing]
|
[Sealing]
|
||||||
# Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time.
|
# Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time.
|
||||||
|
@ -698,11 +698,9 @@ After changing this option, confirm that the new value works in your setup by in
|
|||||||
A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||||
|
|
||||||
The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
||||||
means that a single message can prove at most 10 partinions
|
means that a single message can prove at most 10 partitions
|
||||||
|
|
||||||
In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
|
Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
|
|
||||||
than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
|
||||||
to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
|
|
||||||
Setting this value above the network limit has no effect`,
|
Setting this value above the network limit has no effect`,
|
||||||
@ -717,6 +715,19 @@ In those cases it may be necessary to set this value to something low (eg 1);
|
|||||||
Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
||||||
resulting in more total gas use (but each message will have lower gas limit)`,
|
resulting in more total gas use (but each message will have lower gas limit)`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "SingleRecoveringPartitionPerPostMessage",
|
||||||
|
Type: "bool",
|
||||||
|
|
||||||
|
Comment: `Enable single partition per PoSt Message for partitions containing recovery sectors
|
||||||
|
|
||||||
|
In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
|
||||||
|
too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
|
||||||
|
with recovering sectors in the post message
|
||||||
|
|
||||||
|
Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
|
to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)`,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"Pubsub": []DocField{
|
"Pubsub": []DocField{
|
||||||
{
|
{
|
||||||
|
@ -276,11 +276,9 @@ type ProvingConfig struct {
|
|||||||
// A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
// A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||||
//
|
//
|
||||||
// The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
// The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
||||||
// means that a single message can prove at most 10 partinions
|
// means that a single message can prove at most 10 partitions
|
||||||
//
|
//
|
||||||
// In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
|
// Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
// too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
|
|
||||||
// than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
|
||||||
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
//
|
//
|
||||||
// Setting this value above the network limit has no effect
|
// Setting this value above the network limit has no effect
|
||||||
@ -294,6 +292,16 @@ type ProvingConfig struct {
|
|||||||
// Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
// Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
||||||
// resulting in more total gas use (but each message will have lower gas limit)
|
// resulting in more total gas use (but each message will have lower gas limit)
|
||||||
MaxPartitionsPerRecoveryMessage int
|
MaxPartitionsPerRecoveryMessage int
|
||||||
|
|
||||||
|
// Enable single partition per PoSt Message for partitions containing recovery sectors
|
||||||
|
//
|
||||||
|
// In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
|
||||||
|
// too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
|
||||||
|
// with recovering sectors in the post message
|
||||||
|
//
|
||||||
|
// Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
|
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
|
SingleRecoveringPartitionPerPostMessage bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type SealingConfig struct {
|
type SealingConfig struct {
|
||||||
|
@ -286,7 +286,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, manual bool, di
|
|||||||
|
|
||||||
// Split partitions into batches, so as not to exceed the number of sectors
|
// Split partitions into batches, so as not to exceed the number of sectors
|
||||||
// allowed in a single message
|
// allowed in a single message
|
||||||
partitionBatches, err := s.batchPartitions(partitions, nv)
|
partitionBatches, err := s.BatchPartitions(partitions, nv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -492,7 +492,9 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, manual bool, di
|
|||||||
return posts, nil
|
return posts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv network.Version) ([][]api.Partition, error) {
|
// Note: Partition order within batches must match original partition order in order
|
||||||
|
// for code following the user code to work
|
||||||
|
func (s *WindowPoStScheduler) BatchPartitions(partitions []api.Partition, nv network.Version) ([][]api.Partition, error) {
|
||||||
// We don't want to exceed the number of sectors allowed in a message.
|
// We don't want to exceed the number of sectors allowed in a message.
|
||||||
// So given the number of sectors in a partition, work out the number of
|
// So given the number of sectors in a partition, work out the number of
|
||||||
// partitions that can be in a message without exceeding sectors per
|
// partitions that can be in a message without exceeding sectors per
|
||||||
@ -524,21 +526,33 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv net
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The number of messages will be:
|
batches := [][]api.Partition{}
|
||||||
// ceiling(number of partitions / partitions per message)
|
|
||||||
batchCount := len(partitions) / partitionsPerMsg
|
|
||||||
if len(partitions)%partitionsPerMsg != 0 {
|
|
||||||
batchCount++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split the partitions into batches
|
currBatch := []api.Partition{}
|
||||||
batches := make([][]api.Partition, 0, batchCount)
|
for _, partition := range partitions {
|
||||||
for i := 0; i < len(partitions); i += partitionsPerMsg {
|
recSectors, err := partition.RecoveringSectors.Count()
|
||||||
end := i + partitionsPerMsg
|
if err != nil {
|
||||||
if end > len(partitions) {
|
return nil, err
|
||||||
end = len(partitions)
|
|
||||||
}
|
}
|
||||||
batches = append(batches, partitions[i:end])
|
|
||||||
|
// Only add single partition to a batch if it contains recovery sectors
|
||||||
|
// and has the below user config set
|
||||||
|
if s.singleRecoveringPartitionPerPostMessage && recSectors > 0 {
|
||||||
|
if len(currBatch) > 0 {
|
||||||
|
batches = append(batches, currBatch)
|
||||||
|
currBatch = []api.Partition{}
|
||||||
|
}
|
||||||
|
batches = append(batches, []api.Partition{partition})
|
||||||
|
} else {
|
||||||
|
if len(currBatch) >= partitionsPerMsg {
|
||||||
|
batches = append(batches, currBatch)
|
||||||
|
currBatch = []api.Partition{}
|
||||||
|
}
|
||||||
|
currBatch = append(currBatch, partition)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(currBatch) > 0 {
|
||||||
|
batches = append(batches, currBatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
return batches, nil
|
return batches, nil
|
||||||
|
@ -177,6 +177,26 @@ func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPo
|
|||||||
return map[abi.SectorID]string{}, nil
|
return map[abi.SectorID]string{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func generatePartition(sectorCount uint64, recoverySectorCount uint64) api.Partition {
|
||||||
|
var partition api.Partition
|
||||||
|
sectors := bitfield.New()
|
||||||
|
recoverySectors := bitfield.New()
|
||||||
|
for s := uint64(0); s < sectorCount; s++ {
|
||||||
|
sectors.Set(s)
|
||||||
|
}
|
||||||
|
for s := uint64(0); s < recoverySectorCount; s++ {
|
||||||
|
recoverySectors.Set(s)
|
||||||
|
}
|
||||||
|
partition = api.Partition{
|
||||||
|
AllSectors: sectors,
|
||||||
|
FaultySectors: bitfield.New(),
|
||||||
|
RecoveringSectors: recoverySectors,
|
||||||
|
LiveSectors: sectors,
|
||||||
|
ActiveSectors: sectors,
|
||||||
|
}
|
||||||
|
return partition
|
||||||
|
}
|
||||||
|
|
||||||
// TestWDPostDoPost verifies that doPost will send the correct number of window
|
// TestWDPostDoPost verifies that doPost will send the correct number of window
|
||||||
// PoST messages for a given number of partitions
|
// PoST messages for a given number of partitions
|
||||||
func TestWDPostDoPost(t *testing.T) {
|
func TestWDPostDoPost(t *testing.T) {
|
||||||
@ -368,6 +388,55 @@ func TestWDPostDoPostPartLimitConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestBatchPartitionsRecoverySectors tests if the batches with recovery sectors
|
||||||
|
// contain only single partitions while keeping all the partitions in order
|
||||||
|
func TestBatchPartitionsRecoverySectors(t *testing.T) {
|
||||||
|
|
||||||
|
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
|
||||||
|
postAct := tutils.NewIDAddr(t, 100)
|
||||||
|
|
||||||
|
mockStgMinerAPI := newMockStorageMinerAPI()
|
||||||
|
|
||||||
|
userPartLimit := 4
|
||||||
|
|
||||||
|
scheduler := &WindowPoStScheduler{
|
||||||
|
api: mockStgMinerAPI,
|
||||||
|
prover: &mockProver{},
|
||||||
|
verifier: &mockVerif{},
|
||||||
|
faultTracker: &mockFaultTracker{},
|
||||||
|
proofType: proofType,
|
||||||
|
actor: postAct,
|
||||||
|
journal: journal.NilJournal(),
|
||||||
|
addrSel: &ctladdr.AddressSelector{},
|
||||||
|
|
||||||
|
maxPartitionsPerPostMessage: userPartLimit,
|
||||||
|
singleRecoveringPartitionPerPostMessage: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
var partitions []api.Partition
|
||||||
|
for p := 0; p < 4; p++ {
|
||||||
|
partitions = append(partitions, generatePartition(100, 0))
|
||||||
|
}
|
||||||
|
for p := 0; p < 2; p++ {
|
||||||
|
partitions = append(partitions, generatePartition(100, 10))
|
||||||
|
}
|
||||||
|
for p := 0; p < 6; p++ {
|
||||||
|
partitions = append(partitions, generatePartition(100, 0))
|
||||||
|
}
|
||||||
|
partitions = append(partitions, generatePartition(100, 10))
|
||||||
|
|
||||||
|
expectedBatchLens := []int{4, 1, 1, 4, 2, 1}
|
||||||
|
|
||||||
|
batches, err := scheduler.BatchPartitions(partitions, network.Version16)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, len(batches), 6)
|
||||||
|
|
||||||
|
for i, batch := range batches {
|
||||||
|
require.Equal(t, len(batch), expectedBatchLens[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TestWDPostDeclareRecoveriesPartLimitConfig verifies that declareRecoveries will send the correct number of
|
// TestWDPostDeclareRecoveriesPartLimitConfig verifies that declareRecoveries will send the correct number of
|
||||||
// DeclareFaultsRecovered messages for a given number of partitions based on user config
|
// DeclareFaultsRecovered messages for a given number of partitions based on user config
|
||||||
func TestWDPostDeclareRecoveriesPartLimitConfig(t *testing.T) {
|
func TestWDPostDeclareRecoveriesPartLimitConfig(t *testing.T) {
|
||||||
|
@ -64,18 +64,19 @@ type NodeAPI interface {
|
|||||||
// WindowPoStScheduler watches the chain though the changeHandler, which in turn
|
// WindowPoStScheduler watches the chain though the changeHandler, which in turn
|
||||||
// turn calls the scheduler when the time arrives to do work.
|
// turn calls the scheduler when the time arrives to do work.
|
||||||
type WindowPoStScheduler struct {
|
type WindowPoStScheduler struct {
|
||||||
api NodeAPI
|
api NodeAPI
|
||||||
feeCfg config.MinerFeeConfig
|
feeCfg config.MinerFeeConfig
|
||||||
addrSel *ctladdr.AddressSelector
|
addrSel *ctladdr.AddressSelector
|
||||||
prover storiface.ProverPoSt
|
prover storiface.ProverPoSt
|
||||||
verifier storiface.Verifier
|
verifier storiface.Verifier
|
||||||
faultTracker sealer.FaultTracker
|
faultTracker sealer.FaultTracker
|
||||||
proofType abi.RegisteredPoStProof
|
proofType abi.RegisteredPoStProof
|
||||||
partitionSectors uint64
|
partitionSectors uint64
|
||||||
disablePreChecks bool
|
disablePreChecks bool
|
||||||
maxPartitionsPerPostMessage int
|
maxPartitionsPerPostMessage int
|
||||||
maxPartitionsPerRecoveryMessage int
|
maxPartitionsPerRecoveryMessage int
|
||||||
ch *changeHandler
|
singleRecoveringPartitionPerPostMessage bool
|
||||||
|
ch *changeHandler
|
||||||
|
|
||||||
actor address.Address
|
actor address.Address
|
||||||
|
|
||||||
@ -102,18 +103,19 @@ func NewWindowedPoStScheduler(api NodeAPI,
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &WindowPoStScheduler{
|
return &WindowPoStScheduler{
|
||||||
api: api,
|
api: api,
|
||||||
feeCfg: cfg,
|
feeCfg: cfg,
|
||||||
addrSel: as,
|
addrSel: as,
|
||||||
prover: sp,
|
prover: sp,
|
||||||
verifier: verif,
|
verifier: verif,
|
||||||
faultTracker: ft,
|
faultTracker: ft,
|
||||||
proofType: mi.WindowPoStProofType,
|
proofType: mi.WindowPoStProofType,
|
||||||
partitionSectors: mi.WindowPoStPartitionSectors,
|
partitionSectors: mi.WindowPoStPartitionSectors,
|
||||||
disablePreChecks: pcfg.DisableWDPoStPreChecks,
|
disablePreChecks: pcfg.DisableWDPoStPreChecks,
|
||||||
maxPartitionsPerPostMessage: pcfg.MaxPartitionsPerPoStMessage,
|
maxPartitionsPerPostMessage: pcfg.MaxPartitionsPerPoStMessage,
|
||||||
maxPartitionsPerRecoveryMessage: pcfg.MaxPartitionsPerRecoveryMessage,
|
maxPartitionsPerRecoveryMessage: pcfg.MaxPartitionsPerRecoveryMessage,
|
||||||
actor: actor,
|
singleRecoveringPartitionPerPostMessage: pcfg.SingleRecoveringPartitionPerPostMessage,
|
||||||
|
actor: actor,
|
||||||
evtTypes: [...]journal.EventType{
|
evtTypes: [...]journal.EventType{
|
||||||
evtTypeWdPoStScheduler: j.RegisterEventType("wdpost", "scheduler"),
|
evtTypeWdPoStScheduler: j.RegisterEventType("wdpost", "scheduler"),
|
||||||
evtTypeWdPoStProofs: j.RegisterEventType("wdpost", "proofs_processed"),
|
evtTypeWdPoStProofs: j.RegisterEventType("wdpost", "proofs_processed"),
|
||||||
|
Loading…
Reference in New Issue
Block a user