fix: storage - move rand generation after proofs
This commit is contained in:
parent
f0f15f899c
commit
db998e9c0f
@ -90,12 +90,13 @@ func (s *WindowPoStScheduler) doPost(ctx context.Context, deadline *dline.Info,
|
|||||||
|
|
||||||
for i := range posts {
|
for i := range posts {
|
||||||
post := &posts[i]
|
post := &posts[i]
|
||||||
sm, err := s.submitPost(ctx, &posts[i])
|
sm, err := s.submitPost(ctx, post)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("submitPost failed: %+v", err)
|
log.Errorf("submitPost failed: %+v", err)
|
||||||
s.failPost(err, deadline)
|
s.failPost(err, deadline)
|
||||||
|
} else {
|
||||||
|
recordProofsEvent(post.Partitions, sm.Cid())
|
||||||
}
|
}
|
||||||
recordProofsEvent(post.Partitions, sm.Cid())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
||||||
@ -412,6 +413,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Generate proofs in batches
|
||||||
posts := make([]miner.SubmitWindowedPoStParams, 0, len(partitionBatches))
|
posts := make([]miner.SubmitWindowedPoStParams, 0, len(partitionBatches))
|
||||||
for batchIdx, batch := range partitionBatches {
|
for batchIdx, batch := range partitionBatches {
|
||||||
batchPartitionStartIdx := 0
|
batchPartitionStartIdx := 0
|
||||||
@ -425,18 +427,12 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
Proofs: nil,
|
Proofs: nil,
|
||||||
}
|
}
|
||||||
|
|
||||||
//var sinfos []proof.SectorInfo
|
|
||||||
//sidToPart := map[abi.SectorNumber]uint64{}
|
|
||||||
//skipCount := uint64(0)
|
|
||||||
|
|
||||||
skipCount := uint64(0)
|
skipCount := uint64(0)
|
||||||
postSkipped := bitfield.New()
|
postSkipped := bitfield.New()
|
||||||
var postOut []proof.PoStProof
|
var postOut []proof.PoStProof
|
||||||
var sinfos []proof.SectorInfo
|
somethingToProve := true
|
||||||
|
|
||||||
for retries := 0; retries < 5; retries++ {
|
for retries := 0; retries < 5; retries++ {
|
||||||
sinfos = make([]proof.SectorInfo, 0)
|
var sinfos []proof.SectorInfo
|
||||||
|
|
||||||
for partIdx, partition := range batch {
|
for partIdx, partition := range batch {
|
||||||
// TODO: Can do this in parallel
|
// TODO: Can do this in parallel
|
||||||
toProve, err := partition.ActiveSectors()
|
toProve, err := partition.ActiveSectors()
|
||||||
@ -488,11 +484,12 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(sinfos) == 0 {
|
if len(sinfos) == 0 {
|
||||||
// nothing to prove..
|
// nothing to prove for this batch
|
||||||
//return nil, errNoPartitions
|
somethingToProve = false
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Generate proof
|
||||||
log.Infow("running windowPost",
|
log.Infow("running windowPost",
|
||||||
"chain-random", rand,
|
"chain-random", rand,
|
||||||
"deadline", di,
|
"deadline", di,
|
||||||
@ -513,9 +510,12 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
log.Infow("computing window PoSt", "batch", batchIdx, "elapsed", elapsed)
|
log.Infow("computing window PoSt", "batch", batchIdx, "elapsed", elapsed)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
// Proof generation successful, stop retrying
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Proof generation failed, so retry
|
||||||
|
|
||||||
if len(ps) == 0 {
|
if len(ps) == 0 {
|
||||||
return nil, xerrors.Errorf("running post failed: %w", err)
|
return nil, xerrors.Errorf("running post failed: %w", err)
|
||||||
}
|
}
|
||||||
@ -528,7 +528,8 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(sinfos) == 0 {
|
// Nothing to prove for this batch, try the next batch
|
||||||
|
if !somethingToProve {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -541,10 +542,12 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
|||||||
posts = append(posts, params)
|
posts = append(posts, params)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compute randomness after generating proofs so as to reduce the impact
|
||||||
|
// of chain reorgs (which change randomness)
|
||||||
commEpoch := di.Open
|
commEpoch := di.Open
|
||||||
commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil)
|
commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), di, err)
|
return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), commEpoch, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range posts {
|
for i := range posts {
|
||||||
|
Loading…
Reference in New Issue
Block a user