Merge pull request #9427 from filecoin-project/sbansal/backport-9413
feat: wdpost: Add ability to only have single partition per msg for partitions with recovery sectors
This commit is contained in:
commit
fd2b827e31
@ -380,11 +380,9 @@
|
|||||||
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||||
#
|
#
|
||||||
# The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
# The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
||||||
# means that a single message can prove at most 10 partinions
|
# means that a single message can prove at most 10 partitions
|
||||||
#
|
#
|
||||||
# In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
|
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
# too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
|
|
||||||
# than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
|
||||||
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
#
|
#
|
||||||
# Setting this value above the network limit has no effect
|
# Setting this value above the network limit has no effect
|
||||||
@ -403,6 +401,19 @@
|
|||||||
# env var: LOTUS_PROVING_MAXPARTITIONSPERRECOVERYMESSAGE
|
# env var: LOTUS_PROVING_MAXPARTITIONSPERRECOVERYMESSAGE
|
||||||
#MaxPartitionsPerRecoveryMessage = 0
|
#MaxPartitionsPerRecoveryMessage = 0
|
||||||
|
|
||||||
|
# Enable single partition per PoSt Message for partitions containing recovery sectors
|
||||||
|
#
|
||||||
|
# In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
|
||||||
|
# too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
|
||||||
|
# with recovering sectors in the post message
|
||||||
|
#
|
||||||
|
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
|
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
# env var: LOTUS_PROVING_SINGLERECOVERINGPARTITIONPERPOSTMESSAGE
|
||||||
|
#SingleRecoveringPartitionPerPostMessage = false
|
||||||
|
|
||||||
|
|
||||||
[Sealing]
|
[Sealing]
|
||||||
# Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time.
|
# Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time.
|
||||||
|
@ -698,11 +698,9 @@ After changing this option, confirm that the new value works in your setup by in
|
|||||||
A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||||
|
|
||||||
The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
||||||
means that a single message can prove at most 10 partinions
|
means that a single message can prove at most 10 partitions
|
||||||
|
|
||||||
In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
|
Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
|
|
||||||
than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
|
||||||
to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
|
|
||||||
Setting this value above the network limit has no effect`,
|
Setting this value above the network limit has no effect`,
|
||||||
@ -717,6 +715,19 @@ In those cases it may be necessary to set this value to something low (eg 1);
|
|||||||
Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
||||||
resulting in more total gas use (but each message will have lower gas limit)`,
|
resulting in more total gas use (but each message will have lower gas limit)`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "SingleRecoveringPartitionPerPostMessage",
|
||||||
|
Type: "bool",
|
||||||
|
|
||||||
|
Comment: `Enable single partition per PoSt Message for partitions containing recovery sectors
|
||||||
|
|
||||||
|
In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
|
||||||
|
too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
|
||||||
|
with recovering sectors in the post message
|
||||||
|
|
||||||
|
Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
|
to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)`,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"Pubsub": []DocField{
|
"Pubsub": []DocField{
|
||||||
{
|
{
|
||||||
|
@ -276,11 +276,9 @@ type ProvingConfig struct {
|
|||||||
// A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
// A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||||
//
|
//
|
||||||
// The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
// The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
|
||||||
// means that a single message can prove at most 10 partinions
|
// means that a single message can prove at most 10 partitions
|
||||||
//
|
//
|
||||||
// In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
|
// Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
// too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
|
|
||||||
// than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
|
||||||
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
//
|
//
|
||||||
// Setting this value above the network limit has no effect
|
// Setting this value above the network limit has no effect
|
||||||
@ -294,6 +292,16 @@ type ProvingConfig struct {
|
|||||||
// Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
// Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
||||||
// resulting in more total gas use (but each message will have lower gas limit)
|
// resulting in more total gas use (but each message will have lower gas limit)
|
||||||
MaxPartitionsPerRecoveryMessage int
|
MaxPartitionsPerRecoveryMessage int
|
||||||
|
|
||||||
|
// Enable single partition per PoSt Message for partitions containing recovery sectors
|
||||||
|
//
|
||||||
|
// In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
|
||||||
|
// too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
|
||||||
|
// with recovering sectors in the post message
|
||||||
|
//
|
||||||
|
// Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||||
|
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||||
|
SingleRecoveringPartitionPerPostMessage bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type SealingConfig struct {
|
type SealingConfig struct {
|
||||||
|
@ -286,7 +286,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, manual bool, di
|
|||||||
|
|
||||||
// Split partitions into batches, so as not to exceed the number of sectors
|
// Split partitions into batches, so as not to exceed the number of sectors
|
||||||
// allowed in a single message
|
// allowed in a single message
|
||||||
partitionBatches, err := s.batchPartitions(partitions, nv)
|
partitionBatches, err := s.BatchPartitions(partitions, nv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -492,7 +492,9 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, manual bool, di
|
|||||||
return posts, nil
|
return posts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv network.Version) ([][]api.Partition, error) {
|
// Note: Partition order within batches must match original partition order in order
|
||||||
|
// for code following the user code to work
|
||||||
|
func (s *WindowPoStScheduler) BatchPartitions(partitions []api.Partition, nv network.Version) ([][]api.Partition, error) {
|
||||||
// We don't want to exceed the number of sectors allowed in a message.
|
// We don't want to exceed the number of sectors allowed in a message.
|
||||||
// So given the number of sectors in a partition, work out the number of
|
// So given the number of sectors in a partition, work out the number of
|
||||||
// partitions that can be in a message without exceeding sectors per
|
// partitions that can be in a message without exceeding sectors per
|
||||||
@ -524,21 +526,33 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv net
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The number of messages will be:
|
batches := [][]api.Partition{}
|
||||||
// ceiling(number of partitions / partitions per message)
|
|
||||||
batchCount := len(partitions) / partitionsPerMsg
|
currBatch := []api.Partition{}
|
||||||
if len(partitions)%partitionsPerMsg != 0 {
|
for _, partition := range partitions {
|
||||||
batchCount++
|
recSectors, err := partition.RecoveringSectors.Count()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Split the partitions into batches
|
// Only add single partition to a batch if it contains recovery sectors
|
||||||
batches := make([][]api.Partition, 0, batchCount)
|
// and has the below user config set
|
||||||
for i := 0; i < len(partitions); i += partitionsPerMsg {
|
if s.singleRecoveringPartitionPerPostMessage && recSectors > 0 {
|
||||||
end := i + partitionsPerMsg
|
if len(currBatch) > 0 {
|
||||||
if end > len(partitions) {
|
batches = append(batches, currBatch)
|
||||||
end = len(partitions)
|
currBatch = []api.Partition{}
|
||||||
}
|
}
|
||||||
batches = append(batches, partitions[i:end])
|
batches = append(batches, []api.Partition{partition})
|
||||||
|
} else {
|
||||||
|
if len(currBatch) >= partitionsPerMsg {
|
||||||
|
batches = append(batches, currBatch)
|
||||||
|
currBatch = []api.Partition{}
|
||||||
|
}
|
||||||
|
currBatch = append(currBatch, partition)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(currBatch) > 0 {
|
||||||
|
batches = append(batches, currBatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
return batches, nil
|
return batches, nil
|
||||||
|
@ -177,6 +177,26 @@ func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPo
|
|||||||
return map[abi.SectorID]string{}, nil
|
return map[abi.SectorID]string{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func generatePartition(sectorCount uint64, recoverySectorCount uint64) api.Partition {
|
||||||
|
var partition api.Partition
|
||||||
|
sectors := bitfield.New()
|
||||||
|
recoverySectors := bitfield.New()
|
||||||
|
for s := uint64(0); s < sectorCount; s++ {
|
||||||
|
sectors.Set(s)
|
||||||
|
}
|
||||||
|
for s := uint64(0); s < recoverySectorCount; s++ {
|
||||||
|
recoverySectors.Set(s)
|
||||||
|
}
|
||||||
|
partition = api.Partition{
|
||||||
|
AllSectors: sectors,
|
||||||
|
FaultySectors: bitfield.New(),
|
||||||
|
RecoveringSectors: recoverySectors,
|
||||||
|
LiveSectors: sectors,
|
||||||
|
ActiveSectors: sectors,
|
||||||
|
}
|
||||||
|
return partition
|
||||||
|
}
|
||||||
|
|
||||||
// TestWDPostDoPost verifies that doPost will send the correct number of window
|
// TestWDPostDoPost verifies that doPost will send the correct number of window
|
||||||
// PoST messages for a given number of partitions
|
// PoST messages for a given number of partitions
|
||||||
func TestWDPostDoPost(t *testing.T) {
|
func TestWDPostDoPost(t *testing.T) {
|
||||||
@ -368,6 +388,55 @@ func TestWDPostDoPostPartLimitConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestBatchPartitionsRecoverySectors tests if the batches with recovery sectors
|
||||||
|
// contain only single partitions while keeping all the partitions in order
|
||||||
|
func TestBatchPartitionsRecoverySectors(t *testing.T) {
|
||||||
|
|
||||||
|
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
|
||||||
|
postAct := tutils.NewIDAddr(t, 100)
|
||||||
|
|
||||||
|
mockStgMinerAPI := newMockStorageMinerAPI()
|
||||||
|
|
||||||
|
userPartLimit := 4
|
||||||
|
|
||||||
|
scheduler := &WindowPoStScheduler{
|
||||||
|
api: mockStgMinerAPI,
|
||||||
|
prover: &mockProver{},
|
||||||
|
verifier: &mockVerif{},
|
||||||
|
faultTracker: &mockFaultTracker{},
|
||||||
|
proofType: proofType,
|
||||||
|
actor: postAct,
|
||||||
|
journal: journal.NilJournal(),
|
||||||
|
addrSel: &ctladdr.AddressSelector{},
|
||||||
|
|
||||||
|
maxPartitionsPerPostMessage: userPartLimit,
|
||||||
|
singleRecoveringPartitionPerPostMessage: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
var partitions []api.Partition
|
||||||
|
for p := 0; p < 4; p++ {
|
||||||
|
partitions = append(partitions, generatePartition(100, 0))
|
||||||
|
}
|
||||||
|
for p := 0; p < 2; p++ {
|
||||||
|
partitions = append(partitions, generatePartition(100, 10))
|
||||||
|
}
|
||||||
|
for p := 0; p < 6; p++ {
|
||||||
|
partitions = append(partitions, generatePartition(100, 0))
|
||||||
|
}
|
||||||
|
partitions = append(partitions, generatePartition(100, 10))
|
||||||
|
|
||||||
|
expectedBatchLens := []int{4, 1, 1, 4, 2, 1}
|
||||||
|
|
||||||
|
batches, err := scheduler.BatchPartitions(partitions, network.Version16)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, len(batches), 6)
|
||||||
|
|
||||||
|
for i, batch := range batches {
|
||||||
|
require.Equal(t, len(batch), expectedBatchLens[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TestWDPostDeclareRecoveriesPartLimitConfig verifies that declareRecoveries will send the correct number of
|
// TestWDPostDeclareRecoveriesPartLimitConfig verifies that declareRecoveries will send the correct number of
|
||||||
// DeclareFaultsRecovered messages for a given number of partitions based on user config
|
// DeclareFaultsRecovered messages for a given number of partitions based on user config
|
||||||
func TestWDPostDeclareRecoveriesPartLimitConfig(t *testing.T) {
|
func TestWDPostDeclareRecoveriesPartLimitConfig(t *testing.T) {
|
||||||
|
@ -75,6 +75,7 @@ type WindowPoStScheduler struct {
|
|||||||
disablePreChecks bool
|
disablePreChecks bool
|
||||||
maxPartitionsPerPostMessage int
|
maxPartitionsPerPostMessage int
|
||||||
maxPartitionsPerRecoveryMessage int
|
maxPartitionsPerRecoveryMessage int
|
||||||
|
singleRecoveringPartitionPerPostMessage bool
|
||||||
ch *changeHandler
|
ch *changeHandler
|
||||||
|
|
||||||
actor address.Address
|
actor address.Address
|
||||||
@ -113,6 +114,7 @@ func NewWindowedPoStScheduler(api NodeAPI,
|
|||||||
disablePreChecks: pcfg.DisableWDPoStPreChecks,
|
disablePreChecks: pcfg.DisableWDPoStPreChecks,
|
||||||
maxPartitionsPerPostMessage: pcfg.MaxPartitionsPerPoStMessage,
|
maxPartitionsPerPostMessage: pcfg.MaxPartitionsPerPoStMessage,
|
||||||
maxPartitionsPerRecoveryMessage: pcfg.MaxPartitionsPerRecoveryMessage,
|
maxPartitionsPerRecoveryMessage: pcfg.MaxPartitionsPerRecoveryMessage,
|
||||||
|
singleRecoveringPartitionPerPostMessage: pcfg.SingleRecoveringPartitionPerPostMessage,
|
||||||
actor: actor,
|
actor: actor,
|
||||||
evtTypes: [...]journal.EventType{
|
evtTypes: [...]journal.EventType{
|
||||||
evtTypeWdPoStScheduler: j.RegisterEventType("wdpost", "scheduler"),
|
evtTypeWdPoStScheduler: j.RegisterEventType("wdpost", "scheduler"),
|
||||||
|
Loading…
Reference in New Issue
Block a user