diff --git a/itests/deadlines_test.go b/itests/deadlines_test.go index 9551465a5..3c8303ac0 100644 --- a/itests/deadlines_test.go +++ b/itests/deadlines_test.go @@ -3,7 +3,6 @@ package itests import ( "bytes" "context" - "fmt" "os" "testing" "time" @@ -22,12 +21,11 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/extern/sector-storage/mock" - "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/itests/kit2" "github.com/filecoin-project/lotus/node/impl" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" "github.com/stretchr/testify/require" ) @@ -58,11 +56,8 @@ func TestDeadlineToggling(t *testing.T) { if os.Getenv("LOTUS_TEST_DEADLINE_TOGGLING") != "1" { t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run") } - _ = logging.SetLogLevel("miner", "ERROR") - _ = logging.SetLogLevel("chainstore", "ERROR") - _ = logging.SetLogLevel("chain", "ERROR") - _ = logging.SetLogLevel("sub", "ERROR") - _ = logging.SetLogLevel("storageminer", "FATAL") + + kit2.QuietMiningLogs() const sectorsC, sectorsD, sectorsB = 10, 9, 8 @@ -75,21 +70,26 @@ func TestDeadlineToggling(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithNetworkUpgradeAt(network.Version12, upgradeH)}, kit.OneMiner) + var ( + client kit2.TestFullNode + minerA kit2.TestMiner + minerB kit2.TestMiner + minerC kit2.TestMiner + minerD kit2.TestMiner + minerE kit2.TestMiner + ) + opts := []kit2.NodeOpt{kit2.ConstructorOpts(kit2.NetworkUpgradeAt(network.Version12, upgradeH))} + ens := kit2.NewEnsemble(t, kit2.MockProofs()). + FullNode(&client, opts...). + Miner(&minerA, &client, opts...). + Start(). + InterconnectAll() + ens.BeginMining(blocktime) - client := n[0].FullNode.(*impl.FullNodeAPI) - minerA := sn[0] - - { - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := minerA.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - } + opts = append(opts, kit2.OwnerAddr(client.DefaultKey)) + ens.Miner(&minerB, &client, opts...). + Miner(&minerC, &client, opts...). + Start() defaultFrom, err := client.WalletDefaultAddress(ctx) require.NoError(t, err) @@ -99,28 +99,6 @@ func TestDeadlineToggling(t *testing.T) { build.Clock.Sleep(time.Second) - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := minerA.MineOne(ctx, kit.MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() - - minerB := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom) - minerC := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom) - maddrB, err := minerB.ActorAddress(ctx) require.NoError(t, err) maddrC, err := minerC.ActorAddress(ctx) @@ -131,20 +109,20 @@ func TestDeadlineToggling(t *testing.T) { // pledge sectors on C, go through a PP, check for power { - kit.PledgeSectors(t, ctx, minerC, sectorsC, 0, nil) + minerC.PledgeSectors(ctx, sectorsC, 0, nil) di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK) require.NoError(t, err) - fmt.Printf("Running one proving period (miner C)\n") - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2) + t.Log("Running one proving period (miner C)") + t.Logf("End for head.Height > %d", di.PeriodStart+di.WPoStProvingPeriod*2) for { head, err := client.ChainHead(ctx) require.NoError(t, err) if head.Height() > di.PeriodStart+provingPeriod*2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) + t.Logf("Now head.Height = %d", head.Height()) break } build.Clock.Sleep(blocktime) @@ -165,7 +143,7 @@ func TestDeadlineToggling(t *testing.T) { require.NoError(t, err) if head.Height() > upgradeH+provingPeriod { - fmt.Printf("Now head.Height = %d\n", head.Height()) + t.Logf("Now head.Height = %d", head.Height()) break } build.Clock.Sleep(blocktime) @@ -216,8 +194,9 @@ func TestDeadlineToggling(t *testing.T) { require.NoError(t, err) require.GreaterOrEqual(t, nv, network.Version12) - minerD := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom) - minerE := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom) + ens.Miner(&minerD, &client, opts...). + Miner(&minerE, &client, opts...). + Start() maddrD, err := minerD.ActorAddress(ctx) require.NoError(t, err) @@ -225,7 +204,7 @@ func TestDeadlineToggling(t *testing.T) { require.NoError(t, err) // first round of miner checks - checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK) + checkMiner(maddrA, types.NewInt(uint64(ssz)*kit2.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK) checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, true, types.EmptyTSK) checkMiner(maddrB, types.NewInt(0), false, false, types.EmptyTSK) @@ -233,10 +212,10 @@ func TestDeadlineToggling(t *testing.T) { checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK) // pledge sectors on minerB/minerD, stop post on minerC - kit.PledgeSectors(t, ctx, minerB, sectorsB, 0, nil) + minerB.PledgeSectors(ctx, sectorsB, 0, nil) checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK) - kit.PledgeSectors(t, ctx, minerD, sectorsD, 0, nil) + minerD.PledgeSectors(ctx, sectorsD, 0, nil) checkMiner(maddrD, types.NewInt(0), true, true, types.EmptyTSK) minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail() @@ -252,7 +231,7 @@ func TestDeadlineToggling(t *testing.T) { params := &miner.SectorPreCommitInfo{ Expiration: 2880 * 300, SectorNumber: 22, - SealProof: kit.TestSpt, + SealProof: kit2.TestSpt, SealedCID: cr, SealRandEpoch: head.Height() - 200, @@ -281,7 +260,7 @@ func TestDeadlineToggling(t *testing.T) { require.NoError(t, err) if head.Height() > upgradeH+provingPeriod+(provingPeriod/2) { - fmt.Printf("Now head.Height = %d\n", head.Height()) + t.Logf("Now head.Height = %d", head.Height()) break } build.Clock.Sleep(blocktime) @@ -295,14 +274,14 @@ func TestDeadlineToggling(t *testing.T) { require.NoError(t, err) if head.Height() > upgradeH+(provingPeriod*3) { - fmt.Printf("Now head.Height = %d\n", head.Height()) + t.Logf("Now head.Height = %d", head.Height()) break } build.Clock.Sleep(blocktime) } // second round of miner checks - checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK) + checkMiner(maddrA, types.NewInt(uint64(ssz)*kit2.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK) checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK) checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, true, types.EmptyTSK) checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, true, types.EmptyTSK) @@ -351,7 +330,7 @@ func TestDeadlineToggling(t *testing.T) { }, nil) require.NoError(t, err) - fmt.Println("sent termination message:", smsg.Cid()) + t.Log("sent termination message:", smsg.Cid()) r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true) require.NoError(t, err) @@ -367,13 +346,13 @@ func TestDeadlineToggling(t *testing.T) { require.NoError(t, err) if head.Height() > upgradeH+(provingPeriod*5) { - fmt.Printf("Now head.Height = %d\n", head.Height()) + t.Logf("Now head.Height = %d", head.Height()) break } build.Clock.Sleep(blocktime) } - checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK) + checkMiner(maddrA, types.NewInt(uint64(ssz)*kit2.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK) checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK) checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK) checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK) diff --git a/itests/deals_test.go b/itests/deals_test.go index 3a6d9c868..af0ef68c4 100644 --- a/itests/deals_test.go +++ b/itests/deals_test.go @@ -239,23 +239,10 @@ func TestFirstDealEnablesMining(t *testing.T) { // once the provider has mined a block, thanks to the power acquired from the deal, // we pass the test. providerMined := make(chan struct{}) - heads, err := client.ChainNotify(ctx) - require.NoError(t, err) go func() { - for chg := range heads { - for _, c := range chg { - if c.Type != "apply" { - continue - } - for _, b := range c.Val.Blocks() { - if b.Miner == provider.ActorAddr { - close(providerMined) - return - } - } - } - } + _ = client.WaitTillChain(ctx, kit2.BlockMinedBy(provider.ActorAddr)) + close(providerMined) }() // now perform the deal. diff --git a/itests/kit2/ensemble_opts.go b/itests/kit2/ensemble_opts.go index c7edb99a6..8c6d66d9e 100644 --- a/itests/kit2/ensemble_opts.go +++ b/itests/kit2/ensemble_opts.go @@ -23,8 +23,8 @@ type ensembleOpts struct { } var DefaultEnsembleOpts = ensembleOpts{ - pastOffset: 100000 * time.Second, // time sufficiently in the past to trigger catch-up mining. - proofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + pastOffset: 10000000 * time.Second, // time sufficiently in the past to trigger catch-up mining. + proofType: abi.RegisteredSealProof_StackedDrg2KiBV1_1, // default _concrete_ proof type for non-genesis miners (notice the _1). } func ProofType(proofType abi.RegisteredSealProof) EnsembleOpt { diff --git a/itests/kit2/node_full.go b/itests/kit2/node_full.go index b0b39b471..3dadb4d8d 100644 --- a/itests/kit2/node_full.go +++ b/itests/kit2/node_full.go @@ -4,8 +4,11 @@ import ( "context" "testing" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" @@ -33,3 +36,50 @@ func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int require.NoError(f.t, err) return res, path } + +// WaitTillChain waits until a specified chain condition is met. It returns +// the first tipset where the condition is met. +func (f *TestFullNode) WaitTillChain(ctx context.Context, pred ChainPredicate) *types.TipSet { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + heads, err := f.ChainNotify(ctx) + require.NoError(f.t, err) + + for chg := range heads { + for _, c := range chg { + if c.Type != "apply" { + continue + } + if ts := c.Val; pred(ts) { + return ts + } + } + } + require.Fail(f.t, "chain condition not met") + return nil +} + +// ChainPredicate encapsulates a chain condition. +type ChainPredicate func(set *types.TipSet) bool + +// HeightAtLeast returns a ChainPredicate that is satisfied when the chain +// height is equal or higher to the target. +func HeightAtLeast(target abi.ChainEpoch) ChainPredicate { + return func(ts *types.TipSet) bool { + return ts.Height() >= target + } +} + +// BlockMinedBy returns a ChainPredicate that is satisfied when we observe the +// first block mined by the specified miner. +func BlockMinedBy(miner address.Address) ChainPredicate { + return func(ts *types.TipSet) bool { + for _, b := range ts.Blocks() { + if b.Miner == miner { + return true + } + } + return false + } +} diff --git a/itests/kit2/node_opts.go b/itests/kit2/node_opts.go index bba78262a..b2dacd3cc 100644 --- a/itests/kit2/node_opts.go +++ b/itests/kit2/node_opts.go @@ -14,6 +14,8 @@ import ( // PresealSectors option. const DefaultPresealsPerBootstrapMiner = 2 +const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1 + // nodeOpts is an options accumulating struct, where functional options are // merged into. type nodeOpts struct { diff --git a/itests/kit2/node_opts_nv.go b/itests/kit2/node_opts_nv.go index 6f682bd3a..5ffd94f5e 100644 --- a/itests/kit2/node_opts_nv.go +++ b/itests/kit2/node_opts_nv.go @@ -58,10 +58,8 @@ func InstantaneousNetworkVersion(version network.Version) node.Option { } func NetworkUpgradeAt(version network.Version, upgradeHeight abi.ChainEpoch) node.Option { - fullSchedule := stmgr.UpgradeSchedule{} - schedule := stmgr.UpgradeSchedule{} - for _, upgrade := range fullSchedule { + for _, upgrade := range DefaultTestUpgradeSchedule { if upgrade.Network > version { break } @@ -89,5 +87,4 @@ func SDRUpgradeAt(calico, persian abi.ChainEpoch) node.Option { Network: network.Version8, Height: persian, }}) - } diff --git a/itests/sector_pledge_test.go b/itests/sector_pledge_test.go index e3d2a843c..8e87f2658 100644 --- a/itests/sector_pledge_test.go +++ b/itests/sector_pledge_test.go @@ -4,70 +4,38 @@ import ( "context" "fmt" "strings" - "sync/atomic" "testing" "time" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/network" + "github.com/stretchr/testify/require" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/stmgr" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" - "github.com/filecoin-project/lotus/itests/kit" - bminer "github.com/filecoin-project/lotus/miner" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/impl" - "github.com/stretchr/testify/require" + "github.com/filecoin-project/lotus/itests/kit2" ) func TestPledgeSectors(t *testing.T) { - kit.QuietMiningLogs() + kit2.QuietMiningLogs() - runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) { + blockTime := 50 * time.Millisecond + + runTest := func(t *testing.T, nSectors int) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := b(t, kit.OneFull, kit.OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] + _, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs()) + ens.InterconnectAll().BeginMining(blockTime) - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - } - }() - - kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil) - - atomic.StoreInt64(&mine, 0) - <-done + miner.PledgeSectors(ctx, nSectors, 0, nil) } t.Run("1", func(t *testing.T) { - runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 1) + runTest(t, 1) }) t.Run("100", func(t *testing.T) { - runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100) + runTest(t, 100) }) t.Run("1000", func(t *testing.T) { @@ -75,52 +43,24 @@ func TestPledgeSectors(t *testing.T) { t.Skip("skipping test in short mode") } - runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 1000) + runTest(t, 1000) }) } func TestPledgeBatching(t *testing.T) { - runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) { + blockTime := 50 * time.Millisecond + + runTest := func(t *testing.T, nSectors int) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] + opts := kit2.ConstructorOpts(kit2.LatestActorsAt(-1)) + client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blockTime) - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } + client.WaitTillChain(ctx, kit2.HeightAtLeast(10)) - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - } - }() - - for { - h, err := client.ChainHead(ctx) - require.NoError(t, err) - if h.Height() > 10 { - break - } - } - - toCheck := kit.StartPledge(t, ctx, miner, nSectors, 0, nil) + toCheck := miner.StartPledge(ctx, nSectors, 0, nil) for len(toCheck) > 0 { states := map[api.SectorState]int{} @@ -157,80 +97,27 @@ func TestPledgeBatching(t *testing.T) { build.Clock.Sleep(100 * time.Millisecond) fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) } - - atomic.StoreInt64(&mine, 0) - <-done } t.Run("100", func(t *testing.T) { - runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100) + runTest(t, 100) }) } func TestPledgeBeforeNv13(t *testing.T) { - runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) { + blocktime := 50 * time.Millisecond + + runTest := func(t *testing.T, nSectors int) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := b(t, []kit.FullNodeOpts{ - { - Opts: func(nodes []kit.TestFullNode) node.Option { - return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ - Network: network.Version9, - Height: 1, - Migration: stmgr.UpgradeActorsV2, - }, { - Network: network.Version10, - Height: 2, - Migration: stmgr.UpgradeActorsV3, - }, { - Network: network.Version12, - Height: 3, - Migration: stmgr.UpgradeActorsV4, - }, { - Network: network.Version13, - Height: 1000000000, - Migration: stmgr.UpgradeActorsV5, - }}) - }, - }, - }, kit.OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] + opts := kit2.ConstructorOpts(kit2.LatestActorsAt(1000000000)) + client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } + client.WaitTillChain(ctx, kit2.HeightAtLeast(10)) - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - } - }() - - for { - h, err := client.ChainHead(ctx) - require.NoError(t, err) - if h.Height() > 10 { - break - } - } - - toCheck := kit.StartPledge(t, ctx, miner, nSectors, 0, nil) + toCheck := miner.StartPledge(ctx, nSectors, 0, nil) for len(toCheck) > 0 { states := map[api.SectorState]int{} @@ -250,12 +137,9 @@ func TestPledgeBeforeNv13(t *testing.T) { build.Clock.Sleep(100 * time.Millisecond) fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) } - - atomic.StoreInt64(&mine, 0) - <-done } t.Run("100-before-nv13", func(t *testing.T) { - runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100) + runTest(t, 100) }) } diff --git a/itests/sector_terminate_test.go b/itests/sector_terminate_test.go index b00337c7e..faf12228c 100644 --- a/itests/sector_terminate_test.go +++ b/itests/sector_terminate_test.go @@ -2,18 +2,15 @@ package itests import ( "context" - "fmt" "os" "testing" "time" "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/itests/kit2" "github.com/stretchr/testify/require" ) @@ -22,7 +19,7 @@ func TestTerminate(t *testing.T) { t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") } - kit.QuietMiningLogs() + kit2.QuietMiningLogs() const blocktime = 2 * time.Millisecond @@ -31,42 +28,9 @@ func TestTerminate(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := kit.MockMinerBuilder(t, - []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, - []kit.StorageMiner{{Full: 0, Preseal: int(nSectors)}}, - ) - - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, kit.MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() + opts := kit2.ConstructorOpts(kit2.LatestActorsAt(-1)) + client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) @@ -79,11 +43,11 @@ func TestTerminate(t *testing.T) { require.Equal(t, p.MinerPower, p.TotalPower) require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors)) - fmt.Printf("Seal a sector\n") + t.Log("Seal a sector") - kit.PledgeSectors(t, ctx, miner, 1, 0, nil) + miner.PledgeSectors(ctx, 1, 0, nil) - fmt.Printf("wait for power\n") + t.Log("wait for power") { // Wait until proven. @@ -91,17 +55,10 @@ func TestTerminate(t *testing.T) { require.NoError(t, err) waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 - fmt.Printf("End for head.Height > %d\n", waitUntil) + t.Logf("End for head.Height > %d", waitUntil) - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > waitUntil { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - } + ts := client.WaitTillChain(ctx, kit2.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) } nSectors++ @@ -111,7 +68,7 @@ func TestTerminate(t *testing.T) { require.Equal(t, p.MinerPower, p.TotalPower) require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors)) - fmt.Println("Terminate a sector") + t.Log("Terminate a sector") toTerminate := abi.SectorNumber(3) @@ -124,7 +81,7 @@ loop: si, err := miner.SectorsStatus(ctx, toTerminate, false) require.NoError(t, err) - fmt.Println("state: ", si.State, msgTriggerred) + t.Log("state: ", si.State, msgTriggerred) switch sealing.SectorState(si.State) { case sealing.Terminating: @@ -140,7 +97,7 @@ loop: require.NoError(t, err) if c != nil { msgTriggerred = true - fmt.Println("terminate message:", c) + t.Log("terminate message:", c) { p, err := miner.SectorTerminatePending(ctx) @@ -180,18 +137,11 @@ loop: di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - build.Clock.Sleep(blocktime) - } - require.NoError(t, err) - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) + waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d", waitUntil) + ts := client.WaitTillChain(ctx, kit2.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) diff --git a/itests/wdpost_test.go b/itests/wdpost_test.go index f59465f05..608c377ca 100644 --- a/itests/wdpost_test.go +++ b/itests/wdpost_test.go @@ -7,18 +7,18 @@ import ( "testing" "time" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/itests/kit" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/extern/sector-storage/mock" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/mock" + "github.com/filecoin-project/lotus/itests/kit2" "github.com/filecoin-project/lotus/node/impl" ) @@ -27,7 +27,7 @@ func TestWindowedPost(t *testing.T) { t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") } - kit.QuietMiningLogs() + kit2.QuietMiningLogs() var ( blocktime = 2 * time.Millisecond @@ -41,50 +41,20 @@ func TestWindowedPost(t *testing.T) { } { height := height // copy to satisfy lints t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) { - testWindowPostUpgrade(t, kit.MockMinerBuilder, blocktime, nSectors, height) + testWindowPostUpgrade(t, blocktime, nSectors, height) }) } } -func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int, upgradeHeight abi.ChainEpoch) { +func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, upgradeHeight abi.ChainEpoch) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(upgradeHeight)}, kit.OneMiner) + opts := kit2.ConstructorOpts(kit2.LatestActorsAt(upgradeHeight)) + client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, kit.MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() - - kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil) + miner.PledgeSectors(ctx, nSectors, 0, nil) maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) @@ -95,19 +65,12 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati mid, err := address.IDFromAddress(maddr) require.NoError(t, err) - fmt.Printf("Running one proving period\n") - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) + t.Log("Running one proving period") + waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d", waitUntil) - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - build.Clock.Sleep(blocktime) - } + ts := client.WaitTillChain(ctx, kit2.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -116,9 +79,9 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati require.NoError(t, err) require.Equal(t, p.MinerPower, p.TotalPower) - require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit.GenesisPreseals))) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit2.DefaultPresealsPerBootstrapMiner))) - fmt.Printf("Drop some sectors\n") + t.Log("Drop some sectors") // Drop 2 sectors from deadline 2 partition 0 (full partition / deadline) { @@ -162,7 +125,7 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati all, err := secs.All(2) require.NoError(t, err) - fmt.Println("the sectors", all) + t.Log("the sectors", all) s = storage.SectorRef{ ID: abi.SectorID{ @@ -178,20 +141,12 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) - fmt.Printf("Go through another PP, wait for sectors to become faulty\n") - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) + t.Log("Go through another PP, wait for sectors to become faulty") + waitUntil = di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d", waitUntil) - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - - build.Clock.Sleep(blocktime) - } + ts = client.WaitTillChain(ctx, kit2.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -199,9 +154,9 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati require.Equal(t, p.MinerPower, p.TotalPower) sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz) - require.Equal(t, nSectors+kit.GenesisPreseals-3, int(sectors)) // -3 just removed sectors + require.Equal(t, nSectors+kit2.DefaultPresealsPerBootstrapMiner-3, int(sectors)) // -3 just removed sectors - fmt.Printf("Recover one sector\n") + t.Log("Recover one sector") err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false) require.NoError(t, err) @@ -209,19 +164,11 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) + waitUntil = di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d", waitUntil) - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - - build.Clock.Sleep(blocktime) - } + ts = client.WaitTillChain(ctx, kit2.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -229,11 +176,11 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati require.Equal(t, p.MinerPower, p.TotalPower) sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) - require.Equal(t, nSectors+kit.GenesisPreseals-2, int(sectors)) // -2 not recovered sectors + require.Equal(t, nSectors+kit2.DefaultPresealsPerBootstrapMiner-2, int(sectors)) // -2 not recovered sectors // pledge a sector after recovery - kit.PledgeSectors(t, ctx, miner, 1, nSectors, nil) + miner.PledgeSectors(ctx, 1, nSectors, nil) { // Wait until proven. @@ -241,17 +188,10 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati require.NoError(t, err) waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 - fmt.Printf("End for head.Height > %d\n", waitUntil) + t.Logf("End for head.Height > %d\n", waitUntil) - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > waitUntil { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - } + ts := client.WaitTillChain(ctx, kit2.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) } p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) @@ -260,7 +200,7 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati require.Equal(t, p.MinerPower, p.TotalPower) sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) - require.Equal(t, nSectors+kit.GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged + require.Equal(t, nSectors+kit2.DefaultPresealsPerBootstrapMiner-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged } func TestWindowPostBaseFeeNoBurn(t *testing.T) { @@ -268,7 +208,7 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) { t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") } - kit.QuietMiningLogs() + kit2.QuietMiningLogs() var ( blocktime = 2 * time.Millisecond @@ -281,11 +221,8 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) { och := build.UpgradeClausHeight build.UpgradeClausHeight = 10 - n, sn := kit.MockMinerBuilder(t, kit.DefaultFullOpts(1), kit.OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - bm := kit.ConnectAndStartMining(t, blocktime, miner, client) - t.Cleanup(bm.Stop) + client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs()) + ens.InterconnectAll().BeginMining(blocktime) maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) @@ -293,7 +230,7 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) { mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) require.NoError(t, err) - kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil) + miner.PledgeSectors(ctx, nSectors, 0, nil) wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) require.NoError(t, err) en := wact.Nonce @@ -327,18 +264,16 @@ func TestWindowPostBaseFeeBurn(t *testing.T) { t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") } - kit.QuietMiningLogs() + kit2.QuietMiningLogs() ctx, cancel := context.WithCancel(context.Background()) defer cancel() blocktime := 2 * time.Millisecond - n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - bm := kit.ConnectAndStartMining(t, blocktime, miner, client) - t.Cleanup(bm.Stop) + opts := kit2.ConstructorOpts(kit2.LatestActorsAt(-1)) + client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) @@ -346,7 +281,7 @@ func TestWindowPostBaseFeeBurn(t *testing.T) { mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) require.NoError(t, err) - kit.PledgeSectors(t, ctx, miner, 10, 0, nil) + miner.PledgeSectors(ctx, 10, 0, nil) wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) require.NoError(t, err) en := wact.Nonce