2022-08-29 14:25:30 +00:00
|
|
|
// stm: #unit
|
2022-06-14 18:03:38 +00:00
|
|
|
package sealer
|
2020-05-08 11:36:08 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2021-11-10 18:53:00 +00:00
|
|
|
"io"
|
2020-05-08 11:36:08 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
2020-09-16 22:35:09 +00:00
|
|
|
"sync"
|
2020-10-30 17:01:37 +00:00
|
|
|
"sync/atomic"
|
2020-05-08 11:36:08 +00:00
|
|
|
"testing"
|
2020-09-21 22:52:33 +00:00
|
|
|
"time"
|
2020-05-08 11:36:08 +00:00
|
|
|
|
2020-09-16 20:33:49 +00:00
|
|
|
"github.com/google/uuid"
|
|
|
|
"github.com/ipfs/go-datastore"
|
2023-05-09 16:05:31 +00:00
|
|
|
syncds "github.com/ipfs/go-datastore/sync"
|
2021-04-06 13:04:32 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2021-11-10 18:53:00 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2020-09-16 20:33:49 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2023-09-21 15:37:02 +00:00
|
|
|
"golang.org/x/xerrors"
|
2020-09-16 20:33:49 +00:00
|
|
|
|
2022-06-14 15:00:51 +00:00
|
|
|
ffi "github.com/filecoin-project/filecoin-ffi"
|
2023-09-21 15:37:02 +00:00
|
|
|
"github.com/filecoin-project/go-paramfetch"
|
2020-09-16 20:33:49 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/filecoin-project/go-state-types/proof"
|
2020-09-16 20:33:49 +00:00
|
|
|
"github.com/filecoin-project/go-statestore"
|
2021-11-10 18:53:00 +00:00
|
|
|
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
2020-09-16 20:33:49 +00:00
|
|
|
|
2023-09-21 15:37:02 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2022-06-14 18:25:52 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/paths"
|
2022-06-15 10:06:22 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
|
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
2022-06-14 18:03:38 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
2022-06-15 10:06:22 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
2020-05-08 11:36:08 +00:00
|
|
|
)
|
|
|
|
|
2020-07-09 12:40:53 +00:00
|
|
|
func init() {
|
|
|
|
logging.SetAllLoggers(logging.LevelDebug)
|
|
|
|
}
|
|
|
|
|
2022-11-01 11:01:31 +00:00
|
|
|
type testStorage storiface.StorageConfig
|
2020-05-08 11:36:08 +00:00
|
|
|
|
2020-07-06 17:19:13 +00:00
|
|
|
func (t testStorage) DiskUsage(path string) (int64, error) {
|
|
|
|
return 1, nil // close enough
|
|
|
|
}
|
|
|
|
|
2020-05-08 11:36:08 +00:00
|
|
|
func newTestStorage(t *testing.T) *testStorage {
|
2023-03-29 19:24:07 +00:00
|
|
|
tp, err := os.MkdirTemp(os.TempDir(), "sealer-test-")
|
2020-05-08 11:36:08 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
{
|
2022-11-01 11:01:31 +00:00
|
|
|
b, err := json.MarshalIndent(&storiface.LocalStorageMeta{
|
2022-01-18 10:57:04 +00:00
|
|
|
ID: storiface.ID(uuid.New().String()),
|
2020-05-08 11:36:08 +00:00
|
|
|
Weight: 1,
|
|
|
|
CanSeal: true,
|
|
|
|
CanStore: true,
|
|
|
|
}, "", " ")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-03-29 19:24:07 +00:00
|
|
|
err = os.WriteFile(filepath.Join(tp, "sectorstore.json"), b, 0644)
|
2020-05-08 11:36:08 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &testStorage{
|
2022-11-01 11:01:31 +00:00
|
|
|
StoragePaths: []storiface.LocalPath{
|
2020-05-08 11:36:08 +00:00
|
|
|
{Path: tp},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t testStorage) cleanup() {
|
2021-11-30 18:53:37 +00:00
|
|
|
noCleanup := os.Getenv("LOTUS_TEST_NO_CLEANUP") != ""
|
|
|
|
for _, path := range t.StoragePaths {
|
|
|
|
if noCleanup {
|
|
|
|
fmt.Printf("Not cleaning up test storage at %s\n", path)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := os.RemoveAll(path.Path); err != nil {
|
|
|
|
fmt.Println("Cleanup error:", err)
|
|
|
|
}
|
|
|
|
}
|
2020-05-08 11:36:08 +00:00
|
|
|
}
|
|
|
|
|
2022-11-01 11:01:31 +00:00
|
|
|
func (t testStorage) GetStorage() (storiface.StorageConfig, error) {
|
|
|
|
return storiface.StorageConfig(t), nil
|
2020-05-08 11:36:08 +00:00
|
|
|
}
|
|
|
|
|
2022-11-01 11:01:31 +00:00
|
|
|
func (t *testStorage) SetStorage(f func(*storiface.StorageConfig)) error {
|
|
|
|
f((*storiface.StorageConfig)(t))
|
2020-05-08 11:36:08 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-08 14:58:09 +00:00
|
|
|
func (t *testStorage) Stat(path string) (fsutil.FsStat, error) {
|
|
|
|
return fsutil.Statfs(path)
|
2020-05-20 16:36:46 +00:00
|
|
|
}
|
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
var _ paths.LocalStorage = &testStorage{}
|
2020-05-08 11:36:08 +00:00
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Manager, *paths.Local, *paths.Remote, *paths.Index, func()) {
|
2020-05-08 11:36:08 +00:00
|
|
|
st := newTestStorage(t)
|
|
|
|
|
2022-07-12 11:55:18 +00:00
|
|
|
si := paths.NewIndex(nil)
|
2020-05-08 11:36:08 +00:00
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
lstor, err := paths.NewLocal(ctx, st, si, nil)
|
2020-05-08 11:36:08 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-11-05 06:34:24 +00:00
|
|
|
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si})
|
2020-05-08 11:36:08 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-06-14 18:25:52 +00:00
|
|
|
stor := paths.NewRemote(lstor, si, nil, 6000, &paths.DefaultPartialFileHandler{})
|
2020-05-08 11:36:08 +00:00
|
|
|
|
2022-11-28 17:56:28 +00:00
|
|
|
sh, err := newScheduler(ctx, "")
|
2022-05-23 14:58:43 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-05-08 11:36:08 +00:00
|
|
|
m := &Manager{
|
|
|
|
ls: st,
|
|
|
|
storage: stor,
|
|
|
|
localStore: lstor,
|
2022-06-14 18:25:52 +00:00
|
|
|
remoteHnd: &paths.FetchHandler{Local: lstor},
|
2020-05-08 11:36:08 +00:00
|
|
|
index: si,
|
|
|
|
|
2022-05-23 14:58:43 +00:00
|
|
|
sched: sh,
|
2022-01-14 13:11:04 +00:00
|
|
|
windowPoStSched: newPoStScheduler(sealtasks.TTGenerateWindowPoSt),
|
|
|
|
winningPoStSched: newPoStScheduler(sealtasks.TTGenerateWinningPoSt),
|
2020-05-08 11:36:08 +00:00
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
localProver: prover,
|
2020-09-07 14:35:54 +00:00
|
|
|
|
2020-09-16 22:35:09 +00:00
|
|
|
work: statestore.New(ds),
|
|
|
|
callToWork: map[storiface.CallID]WorkID{},
|
2020-09-16 20:33:49 +00:00
|
|
|
callRes: map[storiface.CallID]chan result{},
|
2020-09-16 22:35:09 +00:00
|
|
|
results: map[WorkID]result{},
|
|
|
|
waitRes: map[WorkID]chan struct{}{},
|
2020-05-08 11:36:08 +00:00
|
|
|
}
|
|
|
|
|
2020-09-16 22:35:09 +00:00
|
|
|
m.setupWorkTracker()
|
|
|
|
|
2020-05-08 11:36:08 +00:00
|
|
|
go m.sched.runSched()
|
|
|
|
|
2020-10-28 15:15:17 +00:00
|
|
|
return m, lstor, stor, si, st.cleanup
|
2020-05-08 11:36:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestSimple(t *testing.T) {
|
|
|
|
logging.SetAllLoggers(logging.LevelDebug)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
2023-05-09 16:05:31 +00:00
|
|
|
m, lstor, _, _, cleanup := newTestMgr(ctx, t, syncds.MutexWrap(datastore.NewMapDatastore()))
|
2020-10-28 15:15:17 +00:00
|
|
|
defer cleanup()
|
2020-05-08 11:36:08 +00:00
|
|
|
|
|
|
|
localTasks := []sealtasks.TaskType{
|
2022-11-23 17:54:58 +00:00
|
|
|
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFinalizeUnsealed, sealtasks.TTFetch,
|
2020-05-08 11:36:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err := m.AddWorker(ctx, newTestWorker(WorkerConfig{
|
|
|
|
TaskTypes: localTasks,
|
2020-09-07 14:35:54 +00:00
|
|
|
}, lstor, m))
|
2020-05-08 11:36:08 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
sid := storiface.SectorRef{
|
2020-11-05 06:34:24 +00:00
|
|
|
ID: abi.SectorID{Miner: 1000, Number: 1},
|
|
|
|
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
|
|
|
}
|
2020-05-08 11:36:08 +00:00
|
|
|
|
|
|
|
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, abi.PaddedPieceSize(1024), pi.Size)
|
|
|
|
|
|
|
|
piz, err := m.AddPiece(ctx, sid, nil, 1016, bytes.NewReader(make([]byte, 1016)[:]))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, abi.PaddedPieceSize(1024), piz.Size)
|
|
|
|
|
|
|
|
pieces := []abi.PieceInfo{pi, piz}
|
|
|
|
|
|
|
|
ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9}
|
|
|
|
|
|
|
|
_, err = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
|
|
|
require.NoError(t, err)
|
2020-09-16 22:35:09 +00:00
|
|
|
}
|
|
|
|
|
2021-11-10 18:53:00 +00:00
|
|
|
type Reader struct{}
|
|
|
|
|
|
|
|
func (Reader) Read(out []byte) (int, error) {
|
|
|
|
for i := range out {
|
|
|
|
out[i] = 0
|
|
|
|
}
|
|
|
|
return len(out), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type NullReader struct {
|
|
|
|
*io.LimitedReader
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewNullReader(size abi.UnpaddedPieceSize) io.Reader {
|
|
|
|
return &NullReader{(io.LimitReader(&Reader{}, int64(size))).(*io.LimitedReader)}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m NullReader) NullBytes() int64 {
|
|
|
|
return m.N
|
|
|
|
}
|
|
|
|
|
2023-09-21 15:37:02 +00:00
|
|
|
func TestMain(m *testing.M) {
|
|
|
|
err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), build.SrsJSON(), uint64(2048))
|
|
|
|
if err != nil {
|
|
|
|
panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
code := m.Run()
|
|
|
|
os.Exit(code)
|
|
|
|
}
|
|
|
|
|
2021-11-10 18:53:00 +00:00
|
|
|
func TestSnapDeals(t *testing.T) {
|
|
|
|
logging.SetAllLoggers(logging.LevelWarn)
|
|
|
|
ctx := context.Background()
|
2023-05-09 16:05:31 +00:00
|
|
|
m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, syncds.MutexWrap(datastore.NewMapDatastore()))
|
2021-11-10 18:53:00 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
localTasks := []sealtasks.TaskType{
|
|
|
|
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit1, sealtasks.TTCommit2, sealtasks.TTFinalize,
|
2021-12-01 19:01:55 +00:00
|
|
|
sealtasks.TTFetch, sealtasks.TTReplicaUpdate, sealtasks.TTProveReplicaUpdate1, sealtasks.TTProveReplicaUpdate2, sealtasks.TTUnseal,
|
2022-11-23 17:54:58 +00:00
|
|
|
sealtasks.TTRegenSectorKey, sealtasks.TTFinalizeUnsealed,
|
2021-11-10 18:53:00 +00:00
|
|
|
}
|
2023-05-09 16:05:31 +00:00
|
|
|
wds := syncds.MutexWrap(datastore.NewMapDatastore())
|
2021-11-10 18:53:00 +00:00
|
|
|
|
|
|
|
w := NewLocalWorker(WorkerConfig{TaskTypes: localTasks}, stor, lstor, idx, m, statestore.New(wds))
|
|
|
|
err := m.AddWorker(ctx, w)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
proofType := abi.RegisteredSealProof_StackedDrg2KiBV1
|
|
|
|
ptStr := os.Getenv("LOTUS_TEST_SNAP_DEALS_PROOF_TYPE")
|
|
|
|
switch ptStr {
|
|
|
|
case "2k":
|
|
|
|
case "8M":
|
|
|
|
proofType = abi.RegisteredSealProof_StackedDrg8MiBV1
|
|
|
|
case "512M":
|
|
|
|
proofType = abi.RegisteredSealProof_StackedDrg512MiBV1
|
|
|
|
case "32G":
|
|
|
|
proofType = abi.RegisteredSealProof_StackedDrg32GiBV1
|
|
|
|
case "64G":
|
|
|
|
proofType = abi.RegisteredSealProof_StackedDrg64GiBV1
|
|
|
|
default:
|
|
|
|
log.Warn("Unspecified proof type, make sure to set LOTUS_TEST_SNAP_DEALS_PROOF_TYPE to '2k', '8M', '512M', '32G' or '64G'")
|
|
|
|
log.Warn("Continuing test with 2k sectors")
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
sid := storiface.SectorRef{
|
2021-11-10 18:53:00 +00:00
|
|
|
ID: abi.SectorID{Miner: 1000, Number: 1},
|
|
|
|
ProofType: proofType,
|
|
|
|
}
|
|
|
|
ss, err := proofType.SectorSize()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
unpaddedSectorSize := abi.PaddedPieceSize(ss).Unpadded()
|
|
|
|
|
|
|
|
// Pack sector with no pieces
|
|
|
|
p0, err := m.AddPiece(ctx, sid, nil, unpaddedSectorSize, NewNullReader(unpaddedSectorSize))
|
|
|
|
require.NoError(t, err)
|
|
|
|
ccPieces := []abi.PieceInfo{p0}
|
|
|
|
|
|
|
|
// Precommit and Seal a CC sector
|
|
|
|
fmt.Printf("PC1\n")
|
2023-09-21 15:37:02 +00:00
|
|
|
ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}
|
2021-11-10 18:53:00 +00:00
|
|
|
pc1Out, err := m.SealPreCommit1(ctx, sid, ticket, ccPieces)
|
|
|
|
require.NoError(t, err)
|
|
|
|
fmt.Printf("PC2\n")
|
|
|
|
pc2Out, err := m.SealPreCommit2(ctx, sid, pc1Out)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Now do a snap deals replica update
|
|
|
|
sectorKey := pc2Out.Sealed
|
|
|
|
|
|
|
|
// Two pieces each half the size of the sector
|
|
|
|
unpaddedPieceSize := unpaddedSectorSize / 2
|
|
|
|
p1, err := m.AddPiece(ctx, sid, nil, unpaddedPieceSize, strings.NewReader(strings.Repeat("k", int(unpaddedPieceSize))))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, unpaddedPieceSize.Padded(), p1.Size)
|
|
|
|
|
|
|
|
p2, err := m.AddPiece(ctx, sid, []abi.UnpaddedPieceSize{p1.Size.Unpadded()}, unpaddedPieceSize, strings.NewReader(strings.Repeat("j", int(unpaddedPieceSize))))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, unpaddedPieceSize.Padded(), p1.Size)
|
|
|
|
|
|
|
|
pieces := []abi.PieceInfo{p1, p2}
|
|
|
|
fmt.Printf("RU\n")
|
2021-12-01 19:01:55 +00:00
|
|
|
startRU := time.Now()
|
2021-11-10 18:53:00 +00:00
|
|
|
out, err := m.ReplicaUpdate(ctx, sid, pieces)
|
|
|
|
require.NoError(t, err)
|
2021-12-01 19:01:55 +00:00
|
|
|
fmt.Printf("RU duration (%s): %s\n", ss.ShortString(), time.Since(startRU))
|
|
|
|
|
2021-11-10 18:53:00 +00:00
|
|
|
updateProofType, err := sid.ProofType.RegisteredUpdateProof()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, out)
|
|
|
|
fmt.Printf("PR1\n")
|
2021-12-01 19:01:55 +00:00
|
|
|
startPR1 := time.Now()
|
2021-11-10 18:53:00 +00:00
|
|
|
vanillaProofs, err := m.ProveReplicaUpdate1(ctx, sid, sectorKey, out.NewSealed, out.NewUnsealed)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, vanillaProofs)
|
2021-12-01 19:01:55 +00:00
|
|
|
fmt.Printf("PR1 duration (%s): %s\n", ss.ShortString(), time.Since(startPR1))
|
2021-11-10 18:53:00 +00:00
|
|
|
fmt.Printf("PR2\n")
|
2021-12-01 19:01:55 +00:00
|
|
|
startPR2 := time.Now()
|
2021-11-10 18:53:00 +00:00
|
|
|
proof, err := m.ProveReplicaUpdate2(ctx, sid, sectorKey, out.NewSealed, out.NewUnsealed, vanillaProofs)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, proof)
|
2021-12-01 19:01:55 +00:00
|
|
|
fmt.Printf("PR2 duration (%s): %s\n", ss.ShortString(), time.Since(startPR2))
|
2021-11-10 18:53:00 +00:00
|
|
|
|
|
|
|
vInfo := proof7.ReplicaUpdateInfo{
|
|
|
|
Proof: proof,
|
|
|
|
UpdateProofType: updateProofType,
|
|
|
|
OldSealedSectorCID: sectorKey,
|
|
|
|
NewSealedSectorCID: out.NewSealed,
|
|
|
|
NewUnsealedSectorCID: out.NewUnsealed,
|
|
|
|
}
|
|
|
|
pass, err := ffiwrapper.ProofVerifier.VerifyReplicaUpdate(vInfo)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.True(t, pass)
|
2021-12-01 19:01:55 +00:00
|
|
|
|
|
|
|
fmt.Printf("Decode\n")
|
|
|
|
// Remove unsealed data and decode for retrieval
|
2022-11-16 17:11:20 +00:00
|
|
|
require.NoError(t, m.ReleaseUnsealed(ctx, sid, nil))
|
2021-12-01 19:01:55 +00:00
|
|
|
startDecode := time.Now()
|
|
|
|
require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed))
|
|
|
|
fmt.Printf("Decode duration (%s): %s\n", ss.ShortString(), time.Since(startDecode))
|
|
|
|
|
|
|
|
// Remove just the first piece and decode for retrieval
|
2022-11-16 17:11:20 +00:00
|
|
|
require.NoError(t, m.ReleaseUnsealed(ctx, sid, []storiface.Range{{Offset: p1.Size.Unpadded(), Size: p2.Size.Unpadded()}}))
|
2021-12-01 19:01:55 +00:00
|
|
|
require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed))
|
|
|
|
|
|
|
|
fmt.Printf("GSK\n")
|
|
|
|
require.NoError(t, m.ReleaseSectorKey(ctx, sid))
|
|
|
|
startGSK := time.Now()
|
|
|
|
require.NoError(t, m.GenerateSectorKeyFromData(ctx, sid, out.NewUnsealed))
|
|
|
|
fmt.Printf("GSK duration (%s): %s\n", ss.ShortString(), time.Since(startGSK))
|
|
|
|
|
2022-04-07 21:00:40 +00:00
|
|
|
fmt.Printf("Remove data\n")
|
2022-11-16 17:11:20 +00:00
|
|
|
require.NoError(t, m.ReleaseUnsealed(ctx, sid, nil))
|
2022-04-07 21:00:40 +00:00
|
|
|
fmt.Printf("Release Sector Key\n")
|
|
|
|
require.NoError(t, m.ReleaseSectorKey(ctx, sid))
|
|
|
|
fmt.Printf("Unseal Replica\n")
|
|
|
|
require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed))
|
2021-11-10 18:53:00 +00:00
|
|
|
}
|
|
|
|
|
2022-06-09 21:09:17 +00:00
|
|
|
func TestSnarkPackV2(t *testing.T) {
|
|
|
|
logging.SetAllLoggers(logging.LevelWarn)
|
|
|
|
ctx := context.Background()
|
2023-05-09 16:05:31 +00:00
|
|
|
m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, syncds.MutexWrap(datastore.NewMapDatastore()))
|
2022-06-09 21:09:17 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
localTasks := []sealtasks.TaskType{
|
|
|
|
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit1, sealtasks.TTCommit2, sealtasks.TTFinalize,
|
|
|
|
sealtasks.TTFetch, sealtasks.TTReplicaUpdate, sealtasks.TTProveReplicaUpdate1, sealtasks.TTProveReplicaUpdate2, sealtasks.TTUnseal,
|
2022-11-23 17:54:58 +00:00
|
|
|
sealtasks.TTRegenSectorKey, sealtasks.TTFinalizeUnsealed,
|
2022-06-09 21:09:17 +00:00
|
|
|
}
|
2023-05-09 16:05:31 +00:00
|
|
|
wds := syncds.MutexWrap(datastore.NewMapDatastore())
|
2022-06-09 21:09:17 +00:00
|
|
|
|
|
|
|
w := NewLocalWorker(WorkerConfig{TaskTypes: localTasks}, stor, lstor, idx, m, statestore.New(wds))
|
|
|
|
err := m.AddWorker(ctx, w)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
proofType := abi.RegisteredSealProof_StackedDrg2KiBV1
|
|
|
|
ptStr := os.Getenv("LOTUS_TEST_SNAP_DEALS_PROOF_TYPE")
|
|
|
|
switch ptStr {
|
|
|
|
case "2k":
|
|
|
|
case "8M":
|
|
|
|
proofType = abi.RegisteredSealProof_StackedDrg8MiBV1
|
|
|
|
case "512M":
|
|
|
|
proofType = abi.RegisteredSealProof_StackedDrg512MiBV1
|
|
|
|
case "32G":
|
|
|
|
proofType = abi.RegisteredSealProof_StackedDrg32GiBV1
|
|
|
|
case "64G":
|
|
|
|
proofType = abi.RegisteredSealProof_StackedDrg64GiBV1
|
|
|
|
default:
|
|
|
|
log.Warn("Unspecified proof type, make sure to set LOTUS_TEST_SNAP_DEALS_PROOF_TYPE to '2k', '8M', '512M', '32G' or '64G'")
|
|
|
|
log.Warn("Continuing test with 2k sectors")
|
|
|
|
}
|
|
|
|
|
|
|
|
mid := abi.ActorID(1000)
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
sid1 := storiface.SectorRef{
|
2022-06-09 21:09:17 +00:00
|
|
|
ID: abi.SectorID{Miner: mid, Number: 1},
|
|
|
|
ProofType: proofType,
|
|
|
|
}
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
sid2 := storiface.SectorRef{
|
2022-06-09 21:09:17 +00:00
|
|
|
ID: abi.SectorID{Miner: mid, Number: 2},
|
|
|
|
ProofType: proofType,
|
|
|
|
}
|
|
|
|
|
|
|
|
ss, err := proofType.SectorSize()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
unpaddedSectorSize := abi.PaddedPieceSize(ss).Unpadded()
|
|
|
|
|
|
|
|
// Pack sector with no pieces
|
|
|
|
p1, err := m.AddPiece(ctx, sid1, nil, unpaddedSectorSize, NewNullReader(unpaddedSectorSize))
|
|
|
|
require.NoError(t, err)
|
|
|
|
ccPieces1 := []abi.PieceInfo{p1}
|
|
|
|
|
|
|
|
p2, err := m.AddPiece(ctx, sid2, nil, unpaddedSectorSize, NewNullReader(unpaddedSectorSize))
|
|
|
|
require.NoError(t, err)
|
|
|
|
ccPieces2 := []abi.PieceInfo{p2}
|
|
|
|
|
|
|
|
// Precommit and Seal 2 CC sectors
|
|
|
|
fmt.Printf("PC1\n")
|
|
|
|
|
|
|
|
ticket1 := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9}
|
|
|
|
ticket2 := abi.SealRandomness{1, 9, 8, 9, 1, 9, 8, 9}
|
|
|
|
interactiveRandomness1 := abi.InteractiveSealRandomness{1, 9, 2, 1, 2, 5, 3, 0}
|
|
|
|
interactiveRandomness2 := abi.InteractiveSealRandomness{1, 5, 2, 2, 1, 5, 2, 2}
|
|
|
|
|
|
|
|
pc1Out1, err := m.SealPreCommit1(ctx, sid1, ticket1, ccPieces1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
pc1Out2, err := m.SealPreCommit1(ctx, sid2, ticket2, ccPieces2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
fmt.Printf("PC2\n")
|
|
|
|
|
|
|
|
pc2Out1, err := m.SealPreCommit2(ctx, sid1, pc1Out1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
pc2Out2, err := m.SealPreCommit2(ctx, sid2, pc1Out2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Commit the sector
|
|
|
|
|
|
|
|
fmt.Printf("C1\n")
|
|
|
|
|
|
|
|
c1Out1, err := m.SealCommit1(ctx, sid1, ticket1, interactiveRandomness1, ccPieces1, pc2Out1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
c1Out2, err := m.SealCommit1(ctx, sid2, ticket2, interactiveRandomness2, ccPieces2, pc2Out2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
fmt.Printf("C2\n")
|
|
|
|
|
|
|
|
c2Out1, err := m.SealCommit2(ctx, sid1, c1Out1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
c2Out2, err := m.SealCommit2(ctx, sid2, c1Out2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
fmt.Println("Aggregate")
|
|
|
|
agg, err := ffi.AggregateSealProofs(proof.AggregateSealVerifyProofAndInfos{
|
|
|
|
Miner: mid,
|
|
|
|
SealProof: proofType,
|
|
|
|
AggregateProof: abi.RegisteredAggregationProof_SnarkPackV2,
|
|
|
|
Infos: []proof.AggregateSealVerifyInfo{{
|
|
|
|
Number: sid1.ID.Number,
|
|
|
|
Randomness: ticket1,
|
|
|
|
InteractiveRandomness: interactiveRandomness1,
|
|
|
|
SealedCID: pc2Out1.Sealed,
|
|
|
|
UnsealedCID: pc2Out1.Unsealed,
|
|
|
|
}, {
|
|
|
|
Number: sid2.ID.Number,
|
|
|
|
Randomness: ticket2,
|
|
|
|
InteractiveRandomness: interactiveRandomness2,
|
|
|
|
SealedCID: pc2Out2.Sealed,
|
|
|
|
UnsealedCID: pc2Out2.Unsealed,
|
|
|
|
}},
|
|
|
|
}, [][]byte{c2Out1, c2Out2})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
fmt.Println("Verifying aggregate")
|
|
|
|
ret, err := ffi.VerifyAggregateSeals(proof.AggregateSealVerifyProofAndInfos{
|
|
|
|
Miner: mid,
|
|
|
|
SealProof: proofType,
|
|
|
|
AggregateProof: abi.RegisteredAggregationProof_SnarkPackV2,
|
|
|
|
Proof: agg,
|
|
|
|
Infos: []proof.AggregateSealVerifyInfo{{
|
|
|
|
Number: sid1.ID.Number,
|
|
|
|
Randomness: ticket1,
|
|
|
|
InteractiveRandomness: interactiveRandomness1,
|
|
|
|
SealedCID: pc2Out1.Sealed,
|
|
|
|
UnsealedCID: pc2Out1.Unsealed,
|
|
|
|
}, {
|
|
|
|
Number: sid2.ID.Number,
|
|
|
|
Randomness: ticket2,
|
|
|
|
InteractiveRandomness: interactiveRandomness2,
|
|
|
|
SealedCID: pc2Out2.Sealed,
|
|
|
|
UnsealedCID: pc2Out2.Unsealed,
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, ret, "proof should be good")
|
|
|
|
}
|
|
|
|
|
2020-09-16 22:35:09 +00:00
|
|
|
func TestRedoPC1(t *testing.T) {
|
|
|
|
logging.SetAllLoggers(logging.LevelDebug)
|
|
|
|
|
|
|
|
ctx := context.Background()
|
2023-05-09 16:05:31 +00:00
|
|
|
m, lstor, _, _, cleanup := newTestMgr(ctx, t, syncds.MutexWrap(datastore.NewMapDatastore()))
|
2020-10-28 15:15:17 +00:00
|
|
|
defer cleanup()
|
2020-09-16 22:35:09 +00:00
|
|
|
|
|
|
|
localTasks := []sealtasks.TaskType{
|
2022-11-23 17:54:58 +00:00
|
|
|
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFinalizeUnsealed, sealtasks.TTFetch,
|
2020-09-16 22:35:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tw := newTestWorker(WorkerConfig{
|
|
|
|
TaskTypes: localTasks,
|
|
|
|
}, lstor, m)
|
|
|
|
|
|
|
|
err := m.AddWorker(ctx, tw)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
sid := storiface.SectorRef{
|
2020-11-05 06:34:24 +00:00
|
|
|
ID: abi.SectorID{Miner: 1000, Number: 1},
|
|
|
|
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
|
|
|
}
|
2020-09-16 22:35:09 +00:00
|
|
|
|
|
|
|
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, abi.PaddedPieceSize(1024), pi.Size)
|
|
|
|
|
|
|
|
piz, err := m.AddPiece(ctx, sid, nil, 1016, bytes.NewReader(make([]byte, 1016)[:]))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, abi.PaddedPieceSize(1024), piz.Size)
|
|
|
|
|
|
|
|
pieces := []abi.PieceInfo{pi, piz}
|
|
|
|
|
|
|
|
ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9}
|
|
|
|
|
|
|
|
_, err = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-09-30 17:18:38 +00:00
|
|
|
// tell mock ffi that we expect PC1 again
|
|
|
|
require.NoError(t, tw.mockSeal.ForceState(sid, 0)) // sectorPacking
|
|
|
|
|
2020-09-16 22:35:09 +00:00
|
|
|
_, err = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, 2, tw.pc1s)
|
|
|
|
}
|
|
|
|
|
2020-09-21 22:52:33 +00:00
|
|
|
// Manager restarts in the middle of a task, restarts it, it completes
|
2020-09-16 22:35:09 +00:00
|
|
|
func TestRestartManager(t *testing.T) {
|
2021-12-15 14:30:42 +00:00
|
|
|
//stm: @WORKER_JOBS_001
|
2020-10-29 11:14:21 +00:00
|
|
|
test := func(returnBeforeCall bool) func(*testing.T) {
|
|
|
|
return func(t *testing.T) {
|
|
|
|
logging.SetAllLoggers(logging.LevelDebug)
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
ctx, done := context.WithCancel(context.Background())
|
|
|
|
defer done()
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2023-05-09 16:05:31 +00:00
|
|
|
ds := syncds.MutexWrap(datastore.NewMapDatastore())
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
m, lstor, _, _, cleanup := newTestMgr(ctx, t, ds)
|
|
|
|
defer cleanup()
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
localTasks := []sealtasks.TaskType{
|
2022-11-23 17:54:58 +00:00
|
|
|
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFinalizeUnsealed, sealtasks.TTFetch,
|
2020-10-29 11:14:21 +00:00
|
|
|
}
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
tw := newTestWorker(WorkerConfig{
|
|
|
|
TaskTypes: localTasks,
|
|
|
|
}, lstor, m)
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
err := m.AddWorker(ctx, tw)
|
|
|
|
require.NoError(t, err)
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
sid := storiface.SectorRef{
|
2020-11-05 06:34:24 +00:00
|
|
|
ID: abi.SectorID{Miner: 1000, Number: 1},
|
|
|
|
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
|
|
|
}
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, abi.PaddedPieceSize(1024), pi.Size)
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
piz, err := m.AddPiece(ctx, sid, nil, 1016, bytes.NewReader(make([]byte, 1016)[:]))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, abi.PaddedPieceSize(1024), piz.Size)
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
pieces := []abi.PieceInfo{pi, piz}
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
ticket := abi.SealRandomness{0, 9, 9, 9, 9, 9, 9, 9}
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
tw.pc1lk.Lock()
|
|
|
|
tw.pc1wait = &sync.WaitGroup{}
|
|
|
|
tw.pc1wait.Add(1)
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
var cwg sync.WaitGroup
|
|
|
|
cwg.Add(1)
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
var perr error
|
|
|
|
go func() {
|
|
|
|
defer cwg.Done()
|
|
|
|
_, perr = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
|
|
|
}()
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
tw.pc1wait.Wait()
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
require.NoError(t, m.Close(ctx))
|
|
|
|
tw.ret = nil
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
cwg.Wait()
|
|
|
|
require.Error(t, perr)
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
m, _, _, _, cleanup2 := newTestMgr(ctx, t, ds)
|
|
|
|
defer cleanup2()
|
2020-10-28 15:15:17 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
tw.ret = m // simulate jsonrpc auto-reconnect
|
|
|
|
err = m.AddWorker(ctx, tw)
|
|
|
|
require.NoError(t, err)
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
if returnBeforeCall {
|
|
|
|
tw.pc1lk.Unlock()
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
2020-09-16 22:35:09 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
_, err = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
|
|
|
} else {
|
|
|
|
done := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
defer close(done)
|
|
|
|
_, err = m.SealPreCommit1(ctx, sid, ticket, pieces)
|
|
|
|
}()
|
|
|
|
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
tw.pc1lk.Unlock()
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, 1, tw.pc1s)
|
2020-10-29 14:18:30 +00:00
|
|
|
|
|
|
|
ws := m.WorkerJobs()
|
|
|
|
require.Empty(t, ws)
|
2020-10-29 11:14:21 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-08 11:36:08 +00:00
|
|
|
|
2020-10-29 11:14:21 +00:00
|
|
|
t.Run("callThenReturn", test(false))
|
|
|
|
t.Run("returnThenCall", test(true))
|
2020-05-08 11:36:08 +00:00
|
|
|
}
|
2020-09-21 22:52:33 +00:00
|
|
|
|
|
|
|
// Worker restarts in the middle of a task, task fails after restart
|
|
|
|
func TestRestartWorker(t *testing.T) {
|
|
|
|
logging.SetAllLoggers(logging.LevelDebug)
|
|
|
|
|
|
|
|
ctx, done := context.WithCancel(context.Background())
|
|
|
|
defer done()
|
|
|
|
|
2023-05-09 16:05:31 +00:00
|
|
|
ds := syncds.MutexWrap(datastore.NewMapDatastore())
|
2020-09-21 22:52:33 +00:00
|
|
|
|
2020-10-28 15:15:17 +00:00
|
|
|
m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds)
|
|
|
|
defer cleanup()
|
2020-09-21 22:52:33 +00:00
|
|
|
|
|
|
|
localTasks := []sealtasks.TaskType{
|
2021-11-30 19:50:34 +00:00
|
|
|
sealtasks.TTAddPiece, sealtasks.TTFetch,
|
2020-09-21 22:52:33 +00:00
|
|
|
}
|
|
|
|
|
2023-05-09 16:05:31 +00:00
|
|
|
wds := syncds.MutexWrap(datastore.NewMapDatastore())
|
2020-09-21 22:52:33 +00:00
|
|
|
|
|
|
|
arch := make(chan chan apres)
|
2022-06-17 11:52:19 +00:00
|
|
|
w := newLocalWorker(func() (storiface.Storage, error) {
|
2020-09-21 22:52:33 +00:00
|
|
|
return &testExec{apch: arch}, nil
|
|
|
|
}, WorkerConfig{
|
|
|
|
TaskTypes: localTasks,
|
2021-11-29 14:14:57 +00:00
|
|
|
}, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds))
|
2020-09-21 22:52:33 +00:00
|
|
|
|
|
|
|
err := m.AddWorker(ctx, w)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
sid := storiface.SectorRef{
|
2020-11-05 06:34:24 +00:00
|
|
|
ID: abi.SectorID{Miner: 1000, Number: 1},
|
|
|
|
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
|
|
|
}
|
2020-09-21 22:52:33 +00:00
|
|
|
|
|
|
|
apDone := make(chan struct{})
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(apDone)
|
|
|
|
|
|
|
|
_, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
|
|
|
require.Error(t, err)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// kill the worker
|
|
|
|
<-arch
|
|
|
|
require.NoError(t, w.Close())
|
|
|
|
|
2021-12-15 14:30:42 +00:00
|
|
|
//stm: @WORKER_STATS_001
|
2020-09-21 22:52:33 +00:00
|
|
|
for {
|
2022-03-18 22:18:52 +00:00
|
|
|
if len(m.WorkerStats(ctx)) == 0 {
|
2020-09-21 22:52:33 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Millisecond * 3)
|
|
|
|
}
|
|
|
|
|
|
|
|
// restart the worker
|
2022-06-17 11:52:19 +00:00
|
|
|
w = newLocalWorker(func() (storiface.Storage, error) {
|
2020-09-21 22:52:33 +00:00
|
|
|
return &testExec{apch: arch}, nil
|
|
|
|
}, WorkerConfig{
|
|
|
|
TaskTypes: localTasks,
|
2021-11-29 14:14:57 +00:00
|
|
|
}, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds))
|
2020-09-21 22:52:33 +00:00
|
|
|
|
|
|
|
err = m.AddWorker(ctx, w)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
<-apDone
|
2020-09-21 23:00:17 +00:00
|
|
|
|
|
|
|
time.Sleep(12 * time.Millisecond)
|
|
|
|
uf, err := w.ct.unfinished()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Empty(t, uf)
|
2020-09-21 22:52:33 +00:00
|
|
|
}
|
2020-10-30 17:01:37 +00:00
|
|
|
|
|
|
|
func TestReenableWorker(t *testing.T) {
|
|
|
|
logging.SetAllLoggers(logging.LevelDebug)
|
2022-06-14 18:25:52 +00:00
|
|
|
paths.HeartbeatInterval = 5 * time.Millisecond
|
2020-10-30 17:01:37 +00:00
|
|
|
|
|
|
|
ctx, done := context.WithCancel(context.Background())
|
|
|
|
defer done()
|
|
|
|
|
2023-05-09 16:05:31 +00:00
|
|
|
ds := syncds.MutexWrap(datastore.NewMapDatastore())
|
2020-10-30 17:01:37 +00:00
|
|
|
|
|
|
|
m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
localTasks := []sealtasks.TaskType{
|
2022-11-23 17:54:58 +00:00
|
|
|
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFinalizeUnsealed, sealtasks.TTFetch,
|
2020-10-30 17:01:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
wds := datastore.NewMapDatastore()
|
|
|
|
|
|
|
|
arch := make(chan chan apres)
|
2022-06-17 11:52:19 +00:00
|
|
|
w := newLocalWorker(func() (storiface.Storage, error) {
|
2020-10-30 17:01:37 +00:00
|
|
|
return &testExec{apch: arch}, nil
|
|
|
|
}, WorkerConfig{
|
|
|
|
TaskTypes: localTasks,
|
2021-11-29 14:14:57 +00:00
|
|
|
}, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds))
|
2020-10-30 17:01:37 +00:00
|
|
|
|
|
|
|
err := m.AddWorker(ctx, w)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
time.Sleep(time.Millisecond * 100)
|
|
|
|
|
2020-10-30 17:32:59 +00:00
|
|
|
i, _ := m.sched.Info(ctx)
|
|
|
|
require.Len(t, i.(SchedDiagInfo).OpenWindows, 2)
|
|
|
|
|
2020-10-30 17:01:37 +00:00
|
|
|
// disable
|
|
|
|
atomic.StoreInt64(&w.testDisable, 1)
|
|
|
|
|
2021-12-15 14:30:42 +00:00
|
|
|
//stm: @WORKER_STATS_001
|
2020-10-30 17:01:37 +00:00
|
|
|
for i := 0; i < 100; i++ {
|
2022-03-18 22:18:52 +00:00
|
|
|
if !m.WorkerStats(ctx)[w.session].Enabled {
|
2020-10-30 17:01:37 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Millisecond * 3)
|
|
|
|
}
|
2022-03-18 22:18:52 +00:00
|
|
|
require.False(t, m.WorkerStats(ctx)[w.session].Enabled)
|
2020-10-30 17:01:37 +00:00
|
|
|
|
2020-10-30 17:32:59 +00:00
|
|
|
i, _ = m.sched.Info(ctx)
|
|
|
|
require.Len(t, i.(SchedDiagInfo).OpenWindows, 0)
|
|
|
|
|
2020-10-30 17:01:37 +00:00
|
|
|
// reenable
|
|
|
|
atomic.StoreInt64(&w.testDisable, 0)
|
|
|
|
|
|
|
|
for i := 0; i < 100; i++ {
|
2022-03-18 22:18:52 +00:00
|
|
|
if m.WorkerStats(ctx)[w.session].Enabled {
|
2020-10-30 17:01:37 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Millisecond * 3)
|
|
|
|
}
|
2022-03-18 22:18:52 +00:00
|
|
|
require.True(t, m.WorkerStats(ctx)[w.session].Enabled)
|
2020-10-30 17:32:59 +00:00
|
|
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
info, _ := m.sched.Info(ctx)
|
|
|
|
if len(info.(SchedDiagInfo).OpenWindows) != 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Millisecond * 3)
|
|
|
|
}
|
|
|
|
|
|
|
|
i, _ = m.sched.Info(ctx)
|
|
|
|
require.Len(t, i.(SchedDiagInfo).OpenWindows, 2)
|
2020-10-30 17:01:37 +00:00
|
|
|
}
|
2021-11-29 14:14:57 +00:00
|
|
|
|
|
|
|
func TestResUse(t *testing.T) {
|
|
|
|
logging.SetAllLoggers(logging.LevelDebug)
|
|
|
|
|
|
|
|
ctx, done := context.WithCancel(context.Background())
|
|
|
|
defer done()
|
|
|
|
|
2023-05-09 16:05:31 +00:00
|
|
|
ds := syncds.MutexWrap(datastore.NewMapDatastore())
|
2021-11-29 14:14:57 +00:00
|
|
|
|
|
|
|
m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
localTasks := []sealtasks.TaskType{
|
2021-11-30 19:50:34 +00:00
|
|
|
sealtasks.TTAddPiece, sealtasks.TTFetch,
|
2021-11-29 14:14:57 +00:00
|
|
|
}
|
|
|
|
|
2023-05-09 16:05:31 +00:00
|
|
|
wds := syncds.MutexWrap(datastore.NewMapDatastore())
|
2021-11-29 14:14:57 +00:00
|
|
|
|
|
|
|
arch := make(chan chan apres)
|
2022-06-17 11:52:19 +00:00
|
|
|
w := newLocalWorker(func() (storiface.Storage, error) {
|
2021-11-29 14:14:57 +00:00
|
|
|
return &testExec{apch: arch}, nil
|
|
|
|
}, WorkerConfig{
|
|
|
|
TaskTypes: localTasks,
|
|
|
|
}, func(s string) (string, bool) {
|
|
|
|
return "", false
|
|
|
|
}, stor, lstor, idx, m, statestore.New(wds))
|
|
|
|
|
|
|
|
err := m.AddWorker(ctx, w)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
sid := storiface.SectorRef{
|
2021-11-29 14:14:57 +00:00
|
|
|
ID: abi.SectorID{Miner: 1000, Number: 1},
|
|
|
|
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
_, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
|
|
|
require.Error(t, err)
|
|
|
|
}()
|
|
|
|
|
|
|
|
l:
|
|
|
|
for {
|
2022-03-18 22:18:52 +00:00
|
|
|
st := m.WorkerStats(ctx)
|
2021-11-29 14:14:57 +00:00
|
|
|
require.Len(t, st, 1)
|
|
|
|
for _, w := range st {
|
|
|
|
if w.MemUsedMax > 0 {
|
|
|
|
break l
|
|
|
|
}
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-18 22:18:52 +00:00
|
|
|
st := m.WorkerStats(ctx)
|
2021-11-29 14:14:57 +00:00
|
|
|
require.Len(t, st, 1)
|
|
|
|
for _, w := range st {
|
|
|
|
require.Equal(t, storiface.ResourceTable[sealtasks.TTAddPiece][abi.RegisteredSealProof_StackedDrg2KiBV1].MaxMemory, w.MemUsedMax)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestResOverride(t *testing.T) {
|
|
|
|
logging.SetAllLoggers(logging.LevelDebug)
|
|
|
|
|
|
|
|
ctx, done := context.WithCancel(context.Background())
|
|
|
|
defer done()
|
|
|
|
|
2023-05-09 16:05:31 +00:00
|
|
|
ds := syncds.MutexWrap(datastore.NewMapDatastore())
|
2021-11-29 14:14:57 +00:00
|
|
|
|
|
|
|
m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
localTasks := []sealtasks.TaskType{
|
2021-11-30 19:50:34 +00:00
|
|
|
sealtasks.TTAddPiece, sealtasks.TTFetch,
|
2021-11-29 14:14:57 +00:00
|
|
|
}
|
|
|
|
|
2023-05-09 16:05:31 +00:00
|
|
|
wds := syncds.MutexWrap(datastore.NewMapDatastore())
|
2021-11-29 14:14:57 +00:00
|
|
|
|
|
|
|
arch := make(chan chan apres)
|
2022-06-17 11:52:19 +00:00
|
|
|
w := newLocalWorker(func() (storiface.Storage, error) {
|
2021-11-29 14:14:57 +00:00
|
|
|
return &testExec{apch: arch}, nil
|
|
|
|
}, WorkerConfig{
|
|
|
|
TaskTypes: localTasks,
|
|
|
|
}, func(s string) (string, bool) {
|
|
|
|
if s == "AP_2K_MAX_MEMORY" {
|
|
|
|
return "99999", true
|
|
|
|
}
|
|
|
|
|
|
|
|
return "", false
|
|
|
|
}, stor, lstor, idx, m, statestore.New(wds))
|
|
|
|
|
|
|
|
err := m.AddWorker(ctx, w)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
sid := storiface.SectorRef{
|
2021-11-29 14:14:57 +00:00
|
|
|
ID: abi.SectorID{Miner: 1000, Number: 1},
|
|
|
|
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
_, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
|
|
|
|
require.Error(t, err)
|
|
|
|
}()
|
|
|
|
|
|
|
|
l:
|
|
|
|
for {
|
2022-03-18 22:18:52 +00:00
|
|
|
st := m.WorkerStats(ctx)
|
2021-11-29 14:14:57 +00:00
|
|
|
require.Len(t, st, 1)
|
|
|
|
for _, w := range st {
|
|
|
|
if w.MemUsedMax > 0 {
|
|
|
|
break l
|
|
|
|
}
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-18 22:18:52 +00:00
|
|
|
st := m.WorkerStats(ctx)
|
2021-11-29 14:14:57 +00:00
|
|
|
require.Len(t, st, 1)
|
|
|
|
for _, w := range st {
|
|
|
|
require.Equal(t, uint64(99999), w.MemUsedMax)
|
|
|
|
}
|
|
|
|
}
|