2022-01-14 13:11:04 +00:00
|
|
|
package itests
|
|
|
|
|
|
|
|
import (
|
2023-04-22 10:47:11 +00:00
|
|
|
"bytes"
|
2022-01-14 13:11:04 +00:00
|
|
|
"context"
|
2022-04-26 19:37:48 +00:00
|
|
|
"strings"
|
2022-03-18 11:32:16 +00:00
|
|
|
"sync/atomic"
|
2022-01-14 13:11:04 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-01-18 10:37:15 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2022-01-14 13:11:04 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2022-06-14 15:00:51 +00:00
|
|
|
"golang.org/x/xerrors"
|
2022-01-14 13:11:04 +00:00
|
|
|
|
2022-01-21 12:33:47 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2023-04-22 10:47:11 +00:00
|
|
|
miner11 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
|
|
|
|
"github.com/filecoin-project/go-state-types/network"
|
2022-06-14 15:00:51 +00:00
|
|
|
|
2022-08-03 14:07:02 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2022-01-18 10:37:15 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2022-01-14 13:11:04 +00:00
|
|
|
"github.com/filecoin-project/lotus/itests/kit"
|
2022-03-18 18:07:39 +00:00
|
|
|
"github.com/filecoin-project/lotus/node"
|
2023-04-22 10:47:11 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/config"
|
2022-01-21 12:33:47 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/impl"
|
2023-04-22 10:47:11 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/modules"
|
2022-03-18 18:07:39 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/repo"
|
2022-06-14 18:25:52 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/paths"
|
2022-08-03 14:07:02 +00:00
|
|
|
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
2022-06-14 18:03:38 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
|
|
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
2022-06-14 17:27:04 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/wdpost"
|
2022-01-14 13:11:04 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestWorkerPledge(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
|
2022-07-11 21:00:50 +00:00
|
|
|
kit.WithSealWorkerTasks) // no mock proofs
|
2022-01-14 13:11:04 +00:00
|
|
|
|
|
|
|
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
|
|
|
|
|
|
|
e, err := worker.Enabled(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, e)
|
|
|
|
|
|
|
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
|
|
|
}
|
|
|
|
|
2022-05-23 20:31:06 +00:00
|
|
|
func TestWorkerPledgeSpread(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(),
|
2022-07-11 21:00:50 +00:00
|
|
|
kit.WithSealWorkerTasks,
|
2022-05-23 20:31:06 +00:00
|
|
|
kit.WithAssigner("spread"),
|
|
|
|
) // no mock proofs
|
|
|
|
|
|
|
|
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
|
|
|
|
|
|
|
e, err := worker.Enabled(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, e)
|
|
|
|
|
2022-05-23 21:28:31 +00:00
|
|
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
2022-05-23 20:31:06 +00:00
|
|
|
}
|
|
|
|
|
2022-05-23 23:13:30 +00:00
|
|
|
func TestWorkerPledgeLocalFin(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(),
|
2022-07-11 21:00:50 +00:00
|
|
|
kit.WithSealWorkerTasks,
|
2022-05-23 23:13:30 +00:00
|
|
|
kit.WithDisallowRemoteFinalize(true),
|
|
|
|
) // no mock proofs
|
|
|
|
|
|
|
|
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
e, err := worker.Enabled(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, e)
|
|
|
|
|
|
|
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
|
|
|
}
|
|
|
|
|
2022-04-26 19:37:48 +00:00
|
|
|
func TestWorkerDataCid(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
_, miner, worker, _ := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
|
2022-11-16 16:07:42 +00:00
|
|
|
kit.WithSealWorkerTasks) // no mock proofs
|
2022-04-26 19:37:48 +00:00
|
|
|
|
|
|
|
e, err := worker.Enabled(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, e)
|
2022-05-24 13:59:11 +00:00
|
|
|
|
|
|
|
pi, err := miner.ComputeDataCid(ctx, 1016, strings.NewReader(strings.Repeat("a", 1016)))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, abi.PaddedPieceSize(1024), pi.Size)
|
|
|
|
require.Equal(t, "baga6ea4seaqlhznlutptgfwhffupyer6txswamerq5fc2jlwf2lys2mm5jtiaeq", pi.PieceCID.String())
|
|
|
|
|
2022-04-27 18:34:55 +00:00
|
|
|
bigPiece := abi.PaddedPieceSize(16 << 20).Unpadded()
|
2022-05-24 13:59:11 +00:00
|
|
|
pi, err = miner.ComputeDataCid(ctx, bigPiece, strings.NewReader(strings.Repeat("a", int(bigPiece))))
|
2022-04-27 18:34:55 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, bigPiece.Padded(), pi.Size)
|
|
|
|
require.Equal(t, "baga6ea4seaqmhoxl2ybw5m2wyd3pt3h4zmp7j52yumzu2rar26twns3uocq7yfa", pi.PieceCID.String())
|
2022-05-24 13:59:11 +00:00
|
|
|
|
|
|
|
nonFullPiece := abi.PaddedPieceSize(10 << 20).Unpadded()
|
|
|
|
pi, err = miner.ComputeDataCid(ctx, bigPiece, strings.NewReader(strings.Repeat("a", int(nonFullPiece))))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, bigPiece.Padded(), pi.Size)
|
|
|
|
require.Equal(t, "baga6ea4seaqbxib4pdxs5cqdn3fmtj4rcxk6rx6ztiqmrx7fcpo3ymuxbp2rodi", pi.PieceCID.String())
|
2022-04-26 19:37:48 +00:00
|
|
|
}
|
|
|
|
|
2022-01-14 13:11:04 +00:00
|
|
|
func TestWinningPostWorker(t *testing.T) {
|
|
|
|
prevIns := build.InsecurePoStValidation
|
|
|
|
build.InsecurePoStValidation = false
|
|
|
|
defer func() {
|
|
|
|
build.InsecurePoStValidation = prevIns
|
|
|
|
}()
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
client, _, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(),
|
|
|
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWinningPoSt})) // no mock proofs
|
|
|
|
|
|
|
|
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
|
|
|
|
|
|
|
e, err := worker.Enabled(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, e)
|
|
|
|
|
|
|
|
client.WaitTillChain(ctx, kit.HeightAtLeast(6))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestWindowPostWorker(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
_ = logging.SetLogLevel("storageminer", "INFO")
|
|
|
|
|
|
|
|
sectors := 2 * 48 * 2
|
|
|
|
|
|
|
|
client, miner, _, ens := kit.EnsembleWorker(t,
|
|
|
|
kit.PresealSectors(sectors), // 2 sectors per partition, 2 partitions in all 48 deadlines
|
|
|
|
kit.LatestActorsAt(-1),
|
|
|
|
kit.ThroughRPC(),
|
|
|
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}))
|
|
|
|
|
|
|
|
maddr, err := miner.ActorAddress(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
bm := ens.InterconnectAll().BeginMining(2 * time.Millisecond)[0]
|
|
|
|
|
|
|
|
di = di.NextNotElapsed()
|
|
|
|
|
|
|
|
t.Log("Running one proving period")
|
2022-06-14 17:27:04 +00:00
|
|
|
waitUntil := di.Open + di.WPoStChallengeWindow*2 + wdpost.SubmitConfidence
|
2022-01-18 11:11:59 +00:00
|
|
|
client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
2022-01-14 13:11:04 +00:00
|
|
|
|
|
|
|
t.Log("Waiting for post message")
|
|
|
|
bm.Stop()
|
|
|
|
|
2022-01-18 14:53:13 +00:00
|
|
|
var lastPending []*types.SignedMessage
|
2022-01-14 13:11:04 +00:00
|
|
|
for i := 0; i < 500; i++ {
|
2022-01-18 14:53:13 +00:00
|
|
|
lastPending, err = client.MpoolPending(ctx, types.EmptyTSK)
|
2022-01-14 13:11:04 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-01-18 14:53:13 +00:00
|
|
|
if len(lastPending) > 0 {
|
2022-01-14 13:11:04 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
time.Sleep(40 * time.Millisecond)
|
|
|
|
}
|
|
|
|
|
2022-01-18 14:53:13 +00:00
|
|
|
require.Greater(t, len(lastPending), 0)
|
2022-01-14 13:11:04 +00:00
|
|
|
|
|
|
|
t.Log("post message landed")
|
|
|
|
|
|
|
|
bm.MineBlocks(ctx, 2*time.Millisecond)
|
|
|
|
|
|
|
|
waitUntil = di.Open + di.WPoStChallengeWindow*3
|
|
|
|
t.Logf("End for head.Height > %d", waitUntil)
|
|
|
|
|
2022-01-18 11:11:59 +00:00
|
|
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
2022-01-14 13:11:04 +00:00
|
|
|
t.Logf("Now head.Height = %d", ts.Height())
|
|
|
|
|
|
|
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
|
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(sectors)))
|
2022-01-21 12:33:47 +00:00
|
|
|
|
|
|
|
mid, err := address.IDFromAddress(maddr)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Remove one sector in the next deadline (so it's skipped)
|
|
|
|
{
|
|
|
|
parts, err := client.StateMinerPartitions(ctx, maddr, di.Index+1, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Greater(t, len(parts), 0)
|
|
|
|
|
|
|
|
secs := parts[0].AllSectors
|
|
|
|
n, err := secs.Count()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, uint64(2), n)
|
|
|
|
|
|
|
|
// Drop the sector
|
|
|
|
sid, err := secs.First()
|
2022-03-18 09:59:27 +00:00
|
|
|
require.NoError(t, err)
|
2022-01-21 12:33:47 +00:00
|
|
|
|
|
|
|
t.Logf("Drop sector %d; dl %d part %d", sid, di.Index+1, 0)
|
|
|
|
|
2022-06-17 11:31:05 +00:00
|
|
|
err = miner.BaseAPI.(*impl.StorageMinerAPI).IStorageMgr.Remove(ctx, storiface.SectorRef{
|
2022-01-21 12:33:47 +00:00
|
|
|
ID: abi.SectorID{
|
|
|
|
Miner: abi.ActorID(mid),
|
|
|
|
Number: abi.SectorNumber(sid),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
waitUntil = di.Close + di.WPoStChallengeWindow
|
|
|
|
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
|
|
|
t.Logf("Now head.Height = %d", ts.Height())
|
|
|
|
|
|
|
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
|
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(sectors-1)))
|
2022-01-14 13:11:04 +00:00
|
|
|
}
|
2022-03-18 11:32:16 +00:00
|
|
|
|
|
|
|
type badWorkerStorage struct {
|
2022-06-14 18:25:52 +00:00
|
|
|
paths.Store
|
2022-03-18 11:32:16 +00:00
|
|
|
|
2022-03-18 18:07:39 +00:00
|
|
|
badsector *uint64
|
|
|
|
notBadCount int
|
2022-03-18 11:32:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (bs *badWorkerStorage) GenerateSingleVanillaProof(ctx context.Context, minerID abi.ActorID, si storiface.PostSectorChallenge, ppt abi.RegisteredPoStProof) ([]byte, error) {
|
2022-03-18 18:07:39 +00:00
|
|
|
if atomic.LoadUint64(bs.badsector) == uint64(si.SectorNumber) {
|
|
|
|
bs.notBadCount--
|
|
|
|
if bs.notBadCount < 0 {
|
|
|
|
return nil, xerrors.New("no proof for you")
|
|
|
|
}
|
2022-03-18 11:32:16 +00:00
|
|
|
}
|
|
|
|
return bs.Store.GenerateSingleVanillaProof(ctx, minerID, si, ppt)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestWindowPostWorkerSkipBadSector(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
_ = logging.SetLogLevel("storageminer", "INFO")
|
|
|
|
|
|
|
|
sectors := 2 * 48 * 2
|
|
|
|
|
2022-03-18 18:53:59 +00:00
|
|
|
var badsector uint64 = 100000
|
2022-03-18 11:32:16 +00:00
|
|
|
|
|
|
|
client, miner, _, ens := kit.EnsembleWorker(t,
|
|
|
|
kit.PresealSectors(sectors), // 2 sectors per partition, 2 partitions in all 48 deadlines
|
|
|
|
kit.LatestActorsAt(-1),
|
|
|
|
kit.ThroughRPC(),
|
|
|
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}),
|
2022-06-14 18:25:52 +00:00
|
|
|
kit.WithWorkerStorage(func(store paths.Store) paths.Store {
|
2022-03-18 18:07:39 +00:00
|
|
|
return &badWorkerStorage{
|
2022-03-18 11:32:16 +00:00
|
|
|
Store: store,
|
2022-03-18 18:53:59 +00:00
|
|
|
badsector: &badsector,
|
2022-03-18 11:32:16 +00:00
|
|
|
}
|
2022-03-18 18:07:39 +00:00
|
|
|
}),
|
|
|
|
kit.ConstructorOpts(node.ApplyIf(node.IsType(repo.StorageMiner),
|
2022-06-14 18:25:52 +00:00
|
|
|
node.Override(new(paths.Store), func(store *paths.Remote) paths.Store {
|
2022-03-18 18:07:39 +00:00
|
|
|
return &badWorkerStorage{
|
|
|
|
Store: store,
|
2022-03-18 18:53:59 +00:00
|
|
|
badsector: &badsector,
|
2022-03-18 18:07:39 +00:00
|
|
|
notBadCount: 1,
|
|
|
|
}
|
|
|
|
}))))
|
2022-03-18 11:32:16 +00:00
|
|
|
|
|
|
|
maddr, err := miner.ActorAddress(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-03-18 18:53:59 +00:00
|
|
|
bm := ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond)[0]
|
2022-03-18 11:32:16 +00:00
|
|
|
|
|
|
|
di = di.NextNotElapsed()
|
|
|
|
|
|
|
|
t.Log("Running one proving period")
|
2022-06-14 17:27:04 +00:00
|
|
|
waitUntil := di.Open + di.WPoStChallengeWindow*2 + wdpost.SubmitConfidence
|
2022-03-18 11:32:16 +00:00
|
|
|
client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
|
|
|
|
|
|
|
t.Log("Waiting for post message")
|
|
|
|
bm.Stop()
|
|
|
|
|
|
|
|
var lastPending []*types.SignedMessage
|
|
|
|
for i := 0; i < 500; i++ {
|
|
|
|
lastPending, err = client.MpoolPending(ctx, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if len(lastPending) > 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
time.Sleep(40 * time.Millisecond)
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Greater(t, len(lastPending), 0)
|
|
|
|
|
|
|
|
t.Log("post message landed")
|
|
|
|
|
2022-03-18 18:53:59 +00:00
|
|
|
bm.MineBlocksMustPost(ctx, 2*time.Millisecond)
|
2022-03-18 11:32:16 +00:00
|
|
|
|
|
|
|
waitUntil = di.Open + di.WPoStChallengeWindow*3
|
|
|
|
t.Logf("End for head.Height > %d", waitUntil)
|
|
|
|
|
|
|
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
|
|
|
t.Logf("Now head.Height = %d", ts.Height())
|
|
|
|
|
|
|
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
|
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(sectors)))
|
|
|
|
|
|
|
|
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Remove one sector in the next deadline (so it's skipped)
|
|
|
|
{
|
|
|
|
parts, err := client.StateMinerPartitions(ctx, maddr, di.Index+1, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Greater(t, len(parts), 0)
|
|
|
|
|
|
|
|
secs := parts[0].AllSectors
|
|
|
|
n, err := secs.Count()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, uint64(2), n)
|
|
|
|
|
|
|
|
// Drop the sector
|
|
|
|
sid, err := secs.First()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
t.Logf("Drop sector %d; dl %d part %d", sid, di.Index+1, 0)
|
|
|
|
|
2022-03-18 18:53:59 +00:00
|
|
|
atomic.StoreUint64(&badsector, sid)
|
2022-03-18 11:32:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
waitUntil = di.Close + di.WPoStChallengeWindow
|
|
|
|
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
|
|
|
t.Logf("Now head.Height = %d", ts.Height())
|
|
|
|
|
|
|
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
|
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(sectors-1)))
|
|
|
|
}
|
2022-03-28 20:54:22 +00:00
|
|
|
|
|
|
|
func TestWindowPostWorkerManualPoSt(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
_ = logging.SetLogLevel("storageminer", "INFO")
|
|
|
|
|
|
|
|
sectors := 2 * 48 * 2
|
|
|
|
|
2023-03-06 13:30:22 +00:00
|
|
|
client, miner, _, _ := kit.EnsembleWorker(t,
|
2022-03-28 20:54:22 +00:00
|
|
|
kit.PresealSectors(sectors), // 2 sectors per partition, 2 partitions in all 48 deadlines
|
|
|
|
kit.LatestActorsAt(-1),
|
|
|
|
kit.ThroughRPC(),
|
|
|
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}))
|
|
|
|
|
|
|
|
maddr, err := miner.ActorAddress(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
di = di.NextNotElapsed()
|
|
|
|
|
|
|
|
tryDl := func(dl uint64) {
|
|
|
|
p, err := miner.ComputeWindowPoSt(ctx, dl, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, p, 1)
|
|
|
|
require.Equal(t, dl, p[0].Deadline)
|
|
|
|
}
|
|
|
|
tryDl(0)
|
|
|
|
tryDl(40)
|
|
|
|
tryDl(di.Index + 4)
|
2023-03-06 13:30:22 +00:00
|
|
|
}
|
2022-03-28 20:54:22 +00:00
|
|
|
|
2023-03-06 13:30:22 +00:00
|
|
|
func TestWindowPostWorkerDisconnected(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
_ = logging.SetLogLevel("storageminer", "INFO")
|
|
|
|
|
|
|
|
sectors := 2 * 48 * 2
|
|
|
|
|
2023-03-06 13:46:26 +00:00
|
|
|
_, miner, badWorker, ens := kit.EnsembleWorker(t,
|
2023-03-06 13:30:22 +00:00
|
|
|
kit.PresealSectors(sectors), // 2 sectors per partition, 2 partitions in all 48 deadlines
|
|
|
|
kit.LatestActorsAt(-1),
|
|
|
|
kit.ThroughRPC(),
|
|
|
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}))
|
|
|
|
|
|
|
|
var goodWorker kit.TestWorker
|
|
|
|
ens.Worker(miner, &goodWorker, kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}), kit.ThroughRPC()).Start()
|
|
|
|
|
2023-03-06 14:11:21 +00:00
|
|
|
// wait for all workers
|
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
w, err := miner.WorkerStats(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
return len(w) == 3 // 2 post + 1 miner-builtin
|
|
|
|
}, 10*time.Second, 100*time.Millisecond)
|
|
|
|
|
2023-03-06 13:30:22 +00:00
|
|
|
tryDl := func(dl uint64) {
|
|
|
|
p, err := miner.ComputeWindowPoSt(ctx, dl, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, p, 1)
|
|
|
|
require.Equal(t, dl, p[0].Deadline)
|
|
|
|
}
|
|
|
|
tryDl(0) // this will run on the not-yet-bad badWorker
|
|
|
|
|
2023-03-06 13:46:26 +00:00
|
|
|
err := badWorker.Stop(ctx)
|
2022-03-28 20:54:22 +00:00
|
|
|
require.NoError(t, err)
|
2023-03-06 13:30:22 +00:00
|
|
|
|
|
|
|
tryDl(10) // will fail on the badWorker, then should retry on the goodWorker
|
|
|
|
|
|
|
|
time.Sleep(15 * time.Second)
|
|
|
|
|
|
|
|
tryDl(40) // after HeartbeatInterval, the badWorker should be marked as disabled
|
2022-03-28 20:54:22 +00:00
|
|
|
}
|
2022-08-03 11:05:30 +00:00
|
|
|
|
2022-08-03 14:07:02 +00:00
|
|
|
func TestSchedulerRemoveRequest(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
2022-11-06 16:38:25 +00:00
|
|
|
_, miner, worker, _ := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
|
2022-11-16 16:07:42 +00:00
|
|
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTAddPiece, sealtasks.TTPreCommit1})) // no mock proofs
|
2022-08-03 14:07:02 +00:00
|
|
|
|
2022-11-06 16:38:25 +00:00
|
|
|
//ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
2022-08-03 14:07:02 +00:00
|
|
|
|
|
|
|
e, err := worker.Enabled(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, e)
|
|
|
|
|
2022-08-03 17:02:12 +00:00
|
|
|
tocheck := miner.StartPledge(ctx, 1, 0, nil)
|
|
|
|
var sn abi.SectorNumber
|
|
|
|
for n := range tocheck {
|
|
|
|
sn = n
|
|
|
|
}
|
2022-08-03 14:07:02 +00:00
|
|
|
// Keep checking till sector state is PC2, the request should get stuck as worker cannot process PC2
|
|
|
|
for {
|
2022-08-03 17:02:12 +00:00
|
|
|
st, err := miner.SectorsStatus(ctx, sn, false)
|
2022-08-03 14:07:02 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
if st.State == api.SectorState(sealing.PreCommit2) {
|
|
|
|
break
|
|
|
|
}
|
2022-08-03 18:28:28 +00:00
|
|
|
time.Sleep(time.Second)
|
2022-08-03 14:07:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Dump current scheduler info
|
2022-11-15 15:16:03 +00:00
|
|
|
b := miner.SchedInfo(ctx)
|
2022-08-03 14:07:02 +00:00
|
|
|
|
|
|
|
// cast scheduler info and get the request UUID. Call the SealingRemoveRequest()
|
2022-08-05 08:31:27 +00:00
|
|
|
require.Len(t, b.SchedInfo.Requests, 1)
|
|
|
|
require.Equal(t, "seal/v0/precommit/2", b.SchedInfo.Requests[0].TaskType)
|
|
|
|
|
2022-11-15 15:16:03 +00:00
|
|
|
err = miner.SealingRemoveRequest(ctx, b.SchedInfo.Requests[0].SchedId)
|
2022-08-05 08:31:27 +00:00
|
|
|
require.NoError(t, err)
|
2022-08-03 14:07:02 +00:00
|
|
|
|
|
|
|
// Dump the schduler again and compare the UUID if a request is present
|
|
|
|
// If no request present then pass the test
|
2022-11-15 15:16:03 +00:00
|
|
|
a := miner.SchedInfo(ctx)
|
2022-08-03 18:28:28 +00:00
|
|
|
|
2022-08-05 08:31:27 +00:00
|
|
|
require.Len(t, a.SchedInfo.Requests, 0)
|
2022-08-03 21:05:22 +00:00
|
|
|
}
|
2022-08-03 20:54:14 +00:00
|
|
|
|
2022-08-03 11:05:30 +00:00
|
|
|
func TestWorkerName(t *testing.T) {
|
|
|
|
name := "thisstringisprobablynotahostnameihope"
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithWorkerName(name))
|
|
|
|
|
|
|
|
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
|
|
|
|
|
|
|
e, err := worker.Info(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, name, e.Hostname)
|
|
|
|
|
|
|
|
ws, err := miner.WorkerStats(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var found bool
|
|
|
|
for _, stats := range ws {
|
|
|
|
if stats.Info.Hostname == name {
|
|
|
|
found = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
require.True(t, found)
|
|
|
|
}
|
2023-04-22 10:47:11 +00:00
|
|
|
|
|
|
|
// Tests that V1_1 proofs on post workers with faults
|
|
|
|
func TestWindowPostV1P1NV20WorkerFault(t *testing.T) {
|
|
|
|
kit.QuietMiningLogs()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
blocktime := 2 * time.Millisecond
|
|
|
|
|
|
|
|
sectors := 2 * 48 * 2
|
|
|
|
var badsector uint64 = 100000
|
|
|
|
|
|
|
|
client, miner, _, ens := kit.EnsembleWorker(t,
|
|
|
|
kit.PresealSectors(sectors), // 2 sectors per partition, 2 partitions in all 48 deadlines
|
|
|
|
kit.GenesisNetworkVersion(network.Version20),
|
|
|
|
kit.ConstructorOpts(
|
|
|
|
node.Override(new(config.ProvingConfig), func() config.ProvingConfig {
|
|
|
|
c := config.DefaultStorageMiner()
|
|
|
|
c.Proving.DisableBuiltinWindowPoSt = true
|
|
|
|
return c.Proving
|
|
|
|
}),
|
|
|
|
node.Override(new(*wdpost.WindowPoStScheduler), modules.WindowPostScheduler(
|
|
|
|
config.DefaultStorageMiner().Fees,
|
|
|
|
config.ProvingConfig{
|
|
|
|
DisableBuiltinWindowPoSt: true,
|
|
|
|
DisableBuiltinWinningPoSt: false,
|
|
|
|
DisableWDPoStPreChecks: false,
|
|
|
|
},
|
|
|
|
)),
|
|
|
|
node.Override(new(paths.Store), func(store *paths.Remote) paths.Store {
|
|
|
|
return &badWorkerStorage{
|
|
|
|
Store: store,
|
|
|
|
badsector: &badsector,
|
|
|
|
notBadCount: 1,
|
|
|
|
}
|
|
|
|
})),
|
|
|
|
kit.ThroughRPC(),
|
|
|
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}),
|
|
|
|
kit.WithWorkerStorage(func(store paths.Store) paths.Store {
|
|
|
|
return &badWorkerStorage{
|
|
|
|
Store: store,
|
|
|
|
badsector: &badsector,
|
|
|
|
}
|
|
|
|
}))
|
|
|
|
|
|
|
|
bm := ens.InterconnectAll().BeginMining(blocktime)[0]
|
|
|
|
|
|
|
|
maddr, err := miner.ActorAddress(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-05-09 17:11:15 +00:00
|
|
|
// wait for sectors to be committed
|
2023-04-22 10:47:11 +00:00
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
parts, err := client.StateMinerPartitions(ctx, maddr, di.Index, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return len(parts) > 1
|
|
|
|
}, 30*time.Second, 100*time.Millisecond)
|
|
|
|
|
|
|
|
// Wait until just before a deadline opens
|
|
|
|
{
|
|
|
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
di = di.NextNotElapsed()
|
|
|
|
|
|
|
|
t.Log("Running one proving period")
|
|
|
|
waitUntil := di.Open + di.WPoStChallengeWindow - di.WPoStChallengeLookback - 1
|
|
|
|
client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
|
|
|
|
|
|
|
t.Log("Waiting for post message")
|
|
|
|
bm.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove one sector in the next deadline (so it's skipped)
|
|
|
|
{
|
|
|
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
parts, err := client.StateMinerPartitions(ctx, maddr, di.Index+1, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Greater(t, len(parts), 0)
|
|
|
|
|
|
|
|
secs := parts[0].AllSectors
|
|
|
|
n, err := secs.Count()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, uint64(2), n)
|
|
|
|
|
2023-05-09 17:11:15 +00:00
|
|
|
// Drop the sector in first partition
|
2023-04-22 10:47:11 +00:00
|
|
|
sid, err := secs.First()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-05-09 17:11:15 +00:00
|
|
|
t.Logf("Drop sector %d; dl %d part %d", sid, di.Index, 0)
|
2023-04-22 10:47:11 +00:00
|
|
|
|
|
|
|
atomic.StoreUint64(&badsector, sid)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
bm.MineBlocksMustPost(ctx, 2*time.Millisecond)
|
|
|
|
|
|
|
|
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
en := wact.Nonce
|
|
|
|
|
|
|
|
// wait for a new message to be sent from worker address, it will be a PoSt
|
|
|
|
|
|
|
|
waitForProof:
|
|
|
|
for {
|
|
|
|
//stm: @CHAIN_STATE_GET_ACTOR_001
|
|
|
|
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
if wact.Nonce > en {
|
|
|
|
break waitForProof
|
|
|
|
}
|
|
|
|
|
|
|
|
build.Clock.Sleep(blocktime)
|
|
|
|
}
|
|
|
|
|
|
|
|
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pmr, err := client.StateSearchMsg(ctx, types.EmptyTSK, slm[0], -1, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
nv, err := client.StateNetworkVersion(ctx, pmr.TipSet)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, network.Version20, nv)
|
|
|
|
|
|
|
|
require.True(t, pmr.Receipt.ExitCode.IsSuccess())
|
|
|
|
|
|
|
|
slmsg, err := client.ChainGetMessage(ctx, slm[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var params miner11.SubmitWindowedPoStParams
|
|
|
|
require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params)))
|
|
|
|
require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof)
|
|
|
|
|
|
|
|
require.Len(t, params.Partitions, 2)
|
|
|
|
sc0, err := params.Partitions[0].Skipped.Count()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, uint64(1), sc0)
|
|
|
|
sc1, err := params.Partitions[1].Skipped.Count()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, uint64(0), sc1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that V1_1 proofs on post worker
|
|
|
|
func TestWindowPostV1P1NV20Worker(t *testing.T) {
|
|
|
|
kit.QuietMiningLogs()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
blocktime := 2 * time.Millisecond
|
|
|
|
|
|
|
|
client, miner, _, ens := kit.EnsembleWorker(t,
|
|
|
|
kit.GenesisNetworkVersion(network.Version20),
|
|
|
|
kit.ConstructorOpts(
|
|
|
|
node.Override(new(config.ProvingConfig), func() config.ProvingConfig {
|
|
|
|
c := config.DefaultStorageMiner()
|
|
|
|
c.Proving.DisableBuiltinWindowPoSt = true
|
|
|
|
return c.Proving
|
|
|
|
}),
|
|
|
|
node.Override(new(*wdpost.WindowPoStScheduler), modules.WindowPostScheduler(
|
|
|
|
config.DefaultStorageMiner().Fees,
|
|
|
|
config.ProvingConfig{
|
|
|
|
DisableBuiltinWindowPoSt: true,
|
|
|
|
DisableBuiltinWinningPoSt: false,
|
|
|
|
DisableWDPoStPreChecks: false,
|
|
|
|
},
|
|
|
|
))),
|
|
|
|
kit.ThroughRPC(),
|
|
|
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}))
|
|
|
|
|
|
|
|
ens.InterconnectAll().BeginMining(blocktime)
|
|
|
|
|
|
|
|
maddr, err := miner.ActorAddress(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
en := wact.Nonce
|
|
|
|
|
|
|
|
// wait for a new message to be sent from worker address, it will be a PoSt
|
|
|
|
|
|
|
|
waitForProof:
|
|
|
|
for {
|
|
|
|
//stm: @CHAIN_STATE_GET_ACTOR_001
|
|
|
|
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
|
|
|
require.NoError(t, err)
|
|
|
|
if wact.Nonce > en {
|
|
|
|
break waitForProof
|
|
|
|
}
|
|
|
|
|
|
|
|
build.Clock.Sleep(blocktime)
|
|
|
|
}
|
|
|
|
|
|
|
|
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pmr, err := client.StateSearchMsg(ctx, types.EmptyTSK, slm[0], -1, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
nv, err := client.StateNetworkVersion(ctx, pmr.TipSet)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, network.Version20, nv)
|
|
|
|
|
|
|
|
require.True(t, pmr.Receipt.ExitCode.IsSuccess())
|
|
|
|
|
|
|
|
slmsg, err := client.ChainGetMessage(ctx, slm[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var params miner11.SubmitWindowedPoStParams
|
|
|
|
require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params)))
|
|
|
|
require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof)
|
|
|
|
}
|