stores: Fix single post proof reading with updated sectors

This commit is contained in:
Łukasz Magiera 2022-01-20 14:59:57 +01:00
parent e476cf7968
commit 33dfcf2663
6 changed files with 37 additions and 15 deletions

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"crypto/rand" "crypto/rand"
"fmt" "fmt"
"time"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -14,6 +15,8 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
) )
var PostCheckTimeout = 160 * time.Second
// FaultTracker TODO: Track things more actively // FaultTracker TODO: Track things more actively
type FaultTracker interface { type FaultTracker interface {
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error)
@ -68,7 +71,10 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof,
return nil return nil
} }
_, err = m.storage.GenerateSingleVanillaProof(ctx, sector.ID.Miner, storiface.PostSectorChallenge{ vctx, cancel2 := context.WithTimeout(ctx, PostCheckTimeout)
defer cancel2()
_, err = m.storage.GenerateSingleVanillaProof(vctx, sector.ID.Miner, storiface.PostSectorChallenge{
SealProof: sector.ProofType, SealProof: sector.ProofType,
SectorNumber: sector.ID.Number, SectorNumber: sector.ID.Number,
SealedCID: commr, SealedCID: commr,

View File

@ -1,10 +1,12 @@
package stores package stores
import ( import (
"bytes"
"encoding/json" "encoding/json"
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
"time"
"github.com/gorilla/mux" "github.com/gorilla/mux"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
@ -306,11 +308,8 @@ func (handler *FetchHandler) generateSingleVanillaProof(w http.ResponseWriter, r
return return
} }
w.WriteHeader(200) w.Header().Set("Content-Type", "application/octet-stream")
_, err = w.Write(vanilla) http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(vanilla))
if err != nil {
log.Error("response writer: ", err)
}
} }
func ftFromString(t string) (storiface.SectorFileType, error) { func ftFromString(t string) (storiface.SectorFileType, error) {

View File

@ -722,11 +722,11 @@ func (st *Local) GenerateSingleVanillaProof(ctx context.Context, minerID abi.Act
var cache string var cache string
var sealed string var sealed string
if si.Update { if si.Update {
src, _, err := st.AcquireSector(ctx, sr, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) src, _, err := st.AcquireSector(ctx, sr, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
if err != nil { if err != nil {
return nil, xerrors.Errorf("acquire sector: %w", err) return nil, xerrors.Errorf("acquire sector: %w", err)
} }
cache, sealed = src.Update, src.UpdateCache cache, sealed = src.UpdateCache, src.Update
} else { } else {
src, _, err := st.AcquireSector(ctx, sr, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) src, _, err := st.AcquireSector(ctx, sr, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)
if err != nil { if err != nil {

View File

@ -750,7 +750,12 @@ func (r *Remote) GenerateSingleVanillaProof(ctx context.Context, minerID abi.Act
Number: sinfo.SectorNumber, Number: sinfo.SectorNumber,
} }
si, err := r.index.StorageFindSector(ctx, sid, storiface.FTSealed|storiface.FTCache, 0, false) ft := storiface.FTSealed | storiface.FTCache
if sinfo.Update {
ft = storiface.FTUpdate | storiface.FTUpdateCache
}
si, err := r.index.StorageFindSector(ctx, sid, ft, 0, false)
if err != nil { if err != nil {
return nil, xerrors.Errorf("finding sector %d failed: %w", sid, err) return nil, xerrors.Errorf("finding sector %d failed: %w", sid, err)
} }

View File

@ -63,11 +63,10 @@ func (p *partitionTracker) done(t *testing.T) bool {
return uint64(len(p.partitions)) == p.count(t) return uint64(len(p.partitions)) == p.count(t)
} }
func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, smsg *types.SignedMessage) (ret bool) { func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types.Message) (ret bool) {
defer func() { defer func() {
ret = p.done(t) ret = p.done(t)
}() }()
msg := smsg.Message
if !(msg.To == bm.miner.ActorAddr) { if !(msg.To == bm.miner.ActorAddr) {
return return
} }
@ -124,7 +123,7 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur
tracker := newPartitionTracker(ctx, dlinfo.Index, bm) tracker := newPartitionTracker(ctx, dlinfo.Index, bm)
if !tracker.done(bm.t) { // need to wait for post if !tracker.done(bm.t) { // need to wait for post
bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t)) bm.t.Logf("expect %d partitions proved but only see %d (dl:%d, epoch:%d)", len(tracker.partitions), tracker.count(bm.t), dlinfo.Index, ts.Height())
poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) poolEvts, err := bm.miner.FullNode.MpoolSub(ctx)
require.NoError(bm.t, err) require.NoError(bm.t, err)
@ -132,7 +131,19 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur
msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK) msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK)
require.NoError(bm.t, err) require.NoError(bm.t, err)
for _, msg := range msgs { for _, msg := range msgs {
tracker.recordIfPost(bm.t, bm, msg) tracker.recordIfPost(bm.t, bm, &msg.Message)
}
// Account for included but not yet executed messages
for _, bc := range ts.Cids() {
msgs, err := bm.miner.FullNode.ChainGetBlockMessages(ctx, bc)
require.NoError(bm.t, err)
for _, msg := range msgs.BlsMessages {
tracker.recordIfPost(bm.t, bm, msg)
}
for _, msg := range msgs.SecpkMessages {
tracker.recordIfPost(bm.t, bm, &msg.Message)
}
} }
// post not yet in mpool, wait for it // post not yet in mpool, wait for it
@ -148,7 +159,7 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur
bm.t.Logf("pool event: %d", evt.Type) bm.t.Logf("pool event: %d", evt.Type)
if evt.Type == api.MpoolAdd { if evt.Type == api.MpoolAdd {
bm.t.Logf("incoming message %v", evt.Message) bm.t.Logf("incoming message %v", evt.Message)
if tracker.recordIfPost(bm.t, bm, evt.Message) { if tracker.recordIfPost(bm.t, bm, &evt.Message.Message) {
break POOL break POOL
} }
} }

View File

@ -868,7 +868,7 @@ func (s *WindowPoStScheduler) submitPoStMessage(ctx context.Context, proof *mine
return nil, xerrors.Errorf("pushing message to mpool: %w", err) return nil, xerrors.Errorf("pushing message to mpool: %w", err)
} }
log.Infof("Submitted window post: %s", sm.Cid()) log.Infof("Submitted window post: %s (deadline %d)", sm.Cid(), proof.Deadline)
go func() { go func() {
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
@ -878,6 +878,7 @@ func (s *WindowPoStScheduler) submitPoStMessage(ctx context.Context, proof *mine
} }
if rec.Receipt.ExitCode == 0 { if rec.Receipt.ExitCode == 0 {
log.Infow("Window post submission successful", "cid", sm.Cid(), "deadline", proof.Deadline, "epoch", rec.Height, "ts", rec.TipSet.Cids())
return return
} }