Add ability to only have single partition per msg for partitions with recovery sectors

This commit is contained in:
Shrenuj Bansal 2022-10-04 18:33:18 +00:00
parent fb3d5e47d5
commit dc247cc57d
4 changed files with 136 additions and 43 deletions

View File

@ -276,11 +276,9 @@ type ProvingConfig struct {
// A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
//
// The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
// means that a single message can prove at most 10 partinions
// means that a single message can prove at most 10 partitions
//
// In some cases when submitting PoSt messages which are recovering sectors, the default network limit may still be
// too high to fit in the block gas limit; In those cases it may be necessary to set this value to something lower
// than 10; Note that setting this value lower may result in less efficient gas use - more messages will be sent,
// Note that setting this value lower may result in less efficient gas use - more messages will be sent,
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
//
// Setting this value above the network limit has no effect
@ -294,6 +292,16 @@ type ProvingConfig struct {
// Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
// resulting in more total gas use (but each message will have lower gas limit)
MaxPartitionsPerRecoveryMessage int
// Enable single partition per Post Message for partitions containing recovery sectors
//
// In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
// too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
// with recovery sectors in the post message
//
// Note that setting this value lower may result in less efficient gas use - more messages will be sent,
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
SingleRecoveringPartitionPerPostMessage bool
}
type SealingConfig struct {

View File

@ -286,7 +286,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, manual bool, di
// Split partitions into batches, so as not to exceed the number of sectors
// allowed in a single message
partitionBatches, err := s.batchPartitions(partitions, nv)
partitionBatches, err := s.BatchPartitions(partitions, nv)
if err != nil {
return nil, err
}
@ -492,7 +492,9 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, manual bool, di
return posts, nil
}
func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv network.Version) ([][]api.Partition, error) {
// Note: Partition order within batches must match original partition order in order
// for code following the user code to work
func (s *WindowPoStScheduler) BatchPartitions(partitions []api.Partition, nv network.Version) ([][]api.Partition, error) {
// We don't want to exceed the number of sectors allowed in a message.
// So given the number of sectors in a partition, work out the number of
// partitions that can be in a message without exceeding sectors per
@ -524,21 +526,33 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv net
}
}
// The number of messages will be:
// ceiling(number of partitions / partitions per message)
batchCount := len(partitions) / partitionsPerMsg
if len(partitions)%partitionsPerMsg != 0 {
batchCount++
}
batches := [][]api.Partition{}
// Split the partitions into batches
batches := make([][]api.Partition, 0, batchCount)
for i := 0; i < len(partitions); i += partitionsPerMsg {
end := i + partitionsPerMsg
if end > len(partitions) {
end = len(partitions)
currBatch := []api.Partition{}
for i := 0; i < len(partitions); i++ {
recSectors, err := partitions[i].RecoveringSectors.Count()
if err != nil {
return nil, err
}
batches = append(batches, partitions[i:end])
// Only add single partition to a batch if it contains recovery sectors
// and has the below user config set
if s.singleRecoveringPartitionPerPostMessage && recSectors > 0 {
if len(currBatch) > 0 {
batches = append(batches, currBatch)
currBatch = []api.Partition{}
}
batches = append(batches, []api.Partition{partitions[i]})
} else {
if len(currBatch) >= partitionsPerMsg {
batches = append(batches, currBatch)
currBatch = []api.Partition{}
}
currBatch = append(currBatch, partitions[i])
}
}
if len(currBatch) > 0 {
batches = append(batches, currBatch)
}
return batches, nil

View File

@ -177,6 +177,26 @@ func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPo
return map[abi.SectorID]string{}, nil
}
func generatePartition(sectorCount uint64, recoverySectorCount uint64) api.Partition {
var partition api.Partition
sectors := bitfield.New()
recoverySectors := bitfield.New()
for s := uint64(0); s < sectorCount; s++ {
sectors.Set(s)
}
for s := uint64(0); s < recoverySectorCount; s++ {
recoverySectors.Set(s)
}
partition = api.Partition{
AllSectors: sectors,
FaultySectors: bitfield.New(),
RecoveringSectors: recoverySectors,
LiveSectors: sectors,
ActiveSectors: sectors,
}
return partition
}
// TestWDPostDoPost verifies that doPost will send the correct number of window
// PoST messages for a given number of partitions
func TestWDPostDoPost(t *testing.T) {
@ -368,6 +388,55 @@ func TestWDPostDoPostPartLimitConfig(t *testing.T) {
}
}
// TestBatchPartitionsRecoverySectors tests if the batches with recovery sectors
// contain only single partitions while keeping all the partitions in order
func TestBatchPartitionsRecoverySectors(t *testing.T) {
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
postAct := tutils.NewIDAddr(t, 100)
mockStgMinerAPI := newMockStorageMinerAPI()
userPartLimit := 4
scheduler := &WindowPoStScheduler{
api: mockStgMinerAPI,
prover: &mockProver{},
verifier: &mockVerif{},
faultTracker: &mockFaultTracker{},
proofType: proofType,
actor: postAct,
journal: journal.NilJournal(),
addrSel: &ctladdr.AddressSelector{},
maxPartitionsPerPostMessage: userPartLimit,
singleRecoveringPartitionPerPostMessage: true,
}
var partitions []api.Partition
for p := 0; p < 4; p++ {
partitions = append(partitions, generatePartition(100, 0))
}
for p := 0; p < 2; p++ {
partitions = append(partitions, generatePartition(100, 10))
}
for p := 0; p < 6; p++ {
partitions = append(partitions, generatePartition(100, 0))
}
partitions = append(partitions, generatePartition(100, 10))
expectedBatchLens := []int{4, 1, 1, 4, 2, 1}
batches, err := scheduler.BatchPartitions(partitions, network.Version16)
require.NoError(t, err)
require.Equal(t, len(batches), 6)
for i, batch := range batches {
require.Equal(t, len(batch), expectedBatchLens[i])
}
}
// TestWDPostDeclareRecoveriesPartLimitConfig verifies that declareRecoveries will send the correct number of
// DeclareFaultsRecovered messages for a given number of partitions based on user config
func TestWDPostDeclareRecoveriesPartLimitConfig(t *testing.T) {

View File

@ -64,18 +64,19 @@ type NodeAPI interface {
// WindowPoStScheduler watches the chain though the changeHandler, which in turn
// turn calls the scheduler when the time arrives to do work.
type WindowPoStScheduler struct {
api NodeAPI
feeCfg config.MinerFeeConfig
addrSel *ctladdr.AddressSelector
prover storiface.ProverPoSt
verifier storiface.Verifier
faultTracker sealer.FaultTracker
proofType abi.RegisteredPoStProof
partitionSectors uint64
disablePreChecks bool
maxPartitionsPerPostMessage int
maxPartitionsPerRecoveryMessage int
ch *changeHandler
api NodeAPI
feeCfg config.MinerFeeConfig
addrSel *ctladdr.AddressSelector
prover storiface.ProverPoSt
verifier storiface.Verifier
faultTracker sealer.FaultTracker
proofType abi.RegisteredPoStProof
partitionSectors uint64
disablePreChecks bool
maxPartitionsPerPostMessage int
maxPartitionsPerRecoveryMessage int
singleRecoveringPartitionPerPostMessage bool
ch *changeHandler
actor address.Address
@ -102,18 +103,19 @@ func NewWindowedPoStScheduler(api NodeAPI,
}
return &WindowPoStScheduler{
api: api,
feeCfg: cfg,
addrSel: as,
prover: sp,
verifier: verif,
faultTracker: ft,
proofType: mi.WindowPoStProofType,
partitionSectors: mi.WindowPoStPartitionSectors,
disablePreChecks: pcfg.DisableWDPoStPreChecks,
maxPartitionsPerPostMessage: pcfg.MaxPartitionsPerPoStMessage,
maxPartitionsPerRecoveryMessage: pcfg.MaxPartitionsPerRecoveryMessage,
actor: actor,
api: api,
feeCfg: cfg,
addrSel: as,
prover: sp,
verifier: verif,
faultTracker: ft,
proofType: mi.WindowPoStProofType,
partitionSectors: mi.WindowPoStPartitionSectors,
disablePreChecks: pcfg.DisableWDPoStPreChecks,
maxPartitionsPerPostMessage: pcfg.MaxPartitionsPerPoStMessage,
maxPartitionsPerRecoveryMessage: pcfg.MaxPartitionsPerRecoveryMessage,
singleRecoveringPartitionPerPostMessage: pcfg.SingleRecoveringPartitionPerPostMessage,
actor: actor,
evtTypes: [...]journal.EventType{
evtTypeWdPoStScheduler: j.RegisterEventType("wdpost", "scheduler"),
evtTypeWdPoStProofs: j.RegisterEventType("wdpost", "proofs_processed"),