Merge branch 'raulk/itests-refactor-kit' into nonsense/split-market-miner-processes

This commit is contained in:
Anton Evangelatov 2021-06-17 18:12:48 +02:00
commit 338bd042b9
27 changed files with 2466 additions and 846 deletions

View File

@ -8,33 +8,30 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/assert"
"github.com/filecoin-project/lotus/itests/kit2"
"github.com/stretchr/testify/require"
)
func TestAPI(t *testing.T) {
t.Run("direct", func(t *testing.T) {
runAPITest(t, kit.Builder)
runAPITest(t)
})
t.Run("rpc", func(t *testing.T) {
runAPITest(t, kit.RPCBuilder)
runAPITest(t, kit2.ThroughRPC())
})
}
type apiSuite struct {
makeNodes kit.APIBuilder
opts []interface{}
}
// runAPITest is the entry point to API test suite
func runAPITest(t *testing.T, b kit.APIBuilder) {
ts := apiSuite{
makeNodes: b,
}
func runAPITest(t *testing.T, opts ...interface{}) {
ts := apiSuite{opts: opts}
t.Run("version", ts.testVersion)
t.Run("id", ts.testID)
@ -51,145 +48,114 @@ func (ts *apiSuite) testVersion(t *testing.T) {
lapi.RunningNodeType = lapi.NodeUnknown
})
ctx := context.Background()
apis, _ := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
napi := apis[0]
full, _, _ := kit2.EnsembleMinimal(t, ts.opts...)
v, err := full.Version(context.Background())
require.NoError(t, err)
v, err := napi.Version(ctx)
if err != nil {
t.Fatal(err)
}
versions := strings.Split(v.Version, "+")
if len(versions) <= 0 {
t.Fatal("empty version")
}
require.NotZero(t, len(versions), "empty version")
require.Equal(t, versions[0], build.BuildVersion)
}
func (ts *apiSuite) testSearchMsg(t *testing.T) {
apis, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
func (ts *apiSuite) testID(t *testing.T) {
ctx := context.Background()
api := apis[0]
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
senderAddr, err := api.WalletDefaultAddress(ctx)
full, _, _ := kit2.EnsembleMinimal(t, ts.opts...)
id, err := full.ID(ctx)
if err != nil {
t.Fatal(err)
}
require.Regexp(t, "^12", id.Pretty())
}
func (ts *apiSuite) testConnectTwo(t *testing.T) {
ctx := context.Background()
one, two, _, ens := kit2.EnsembleTwoOne(t, ts.opts...)
p, err := one.NetPeers(ctx)
require.NoError(t, err)
require.Empty(t, p, "node one has peers")
p, err = two.NetPeers(ctx)
require.NoError(t, err)
require.Empty(t, p, "node two has peers")
ens.InterconnectAll()
peers, err := one.NetPeers(ctx)
require.NoError(t, err)
require.Lenf(t, peers, 2, "node one doesn't have 2 peers")
peers, err = two.NetPeers(ctx)
require.NoError(t, err)
require.Lenf(t, peers, 2, "node two doesn't have 2 peers")
}
func (ts *apiSuite) testSearchMsg(t *testing.T) {
ctx := context.Background()
full, _, ens := kit2.EnsembleMinimal(t, ts.opts...)
senderAddr, err := full.WalletDefaultAddress(ctx)
require.NoError(t, err)
msg := &types.Message{
From: senderAddr,
To: senderAddr,
Value: big.Zero(),
}
bm := kit.NewBlockMiner(t, miners[0])
bm.MineBlocks(ctx, 100*time.Millisecond)
defer bm.Stop()
sm, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal(err)
}
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
ens.BeginMining(100 * time.Millisecond)
searchRes, err := api.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
sm, err := full.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
if searchRes.TipSet != res.TipSet {
t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
}
res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
require.NoError(t, err)
}
require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful")
func (ts *apiSuite) testID(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
api := apis[0]
searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
require.NoError(t, err)
id, err := api.ID(ctx)
if err != nil {
t.Fatal(err)
}
assert.Regexp(t, "^12", id.Pretty())
}
func (ts *apiSuite) testConnectTwo(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, kit.TwoFull, kit.OneMiner)
p, err := apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 0 has a peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 1 has a peer")
}
addrs, err := apis[1].NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := apis[0].NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
p, err = apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
require.Equalf(t, res.TipSet, searchRes.TipSet, "search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
}
func (ts *apiSuite) testMining(t *testing.T) {
ctx := context.Background()
fulls, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
api := fulls[0]
newHeads, err := api.ChainNotify(ctx)
full, miner, _ := kit2.EnsembleMinimal(t, ts.opts...)
newHeads, err := full.ChainNotify(ctx)
require.NoError(t, err)
initHead := (<-newHeads)[0]
baseHeight := initHead.Val.Height()
h1, err := api.ChainHead(ctx)
h1, err := full.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, int64(h1.Height()), int64(baseHeight))
bm := kit.NewBlockMiner(t, miners[0])
bm.MineUntilBlock(ctx, fulls[0], nil)
bm := kit2.NewBlockMiner(t, miner)
bm.MineUntilBlock(ctx, full, nil)
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
h2, err := full.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
bm.MineUntilBlock(ctx, full, nil)
require.NoError(t, err)
<-newHeads
h3, err := full.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
}
func (ts *apiSuite) testMiningReal(t *testing.T) {
@ -198,66 +164,26 @@ func (ts *apiSuite) testMiningReal(t *testing.T) {
build.InsecurePoStValidation = true
}()
ctx := context.Background()
fulls, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
api := fulls[0]
newHeads, err := api.ChainNotify(ctx)
require.NoError(t, err)
at := (<-newHeads)[0].Val.Height()
h1, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, int64(at), int64(h1.Height()))
bm := kit.NewBlockMiner(t, miners[0])
bm.MineUntilBlock(ctx, fulls[0], nil)
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
bm.MineUntilBlock(ctx, fulls[0], nil)
require.NoError(t, err)
<-newHeads
h3, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
ts.testMining(t)
}
func (ts *apiSuite) testNonGenesisMiner(t *testing.T) {
ctx := context.Background()
n, sn := ts.makeNodes(t,
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
[]kit.StorageMiner{{Full: 0, Preseal: kit.PresealGenesis}},
)
full, ok := n[0].FullNode.(*impl.FullNodeAPI)
if !ok {
t.Skip("not testing with a full node")
return
}
genesisMiner := sn[0]
full, genesisMiner, ens := kit2.EnsembleMinimal(t, ts.opts...)
bm := kit.NewBlockMiner(t, genesisMiner)
bm.MineBlocks(ctx, 4*time.Millisecond)
t.Cleanup(bm.Stop)
ens.BeginMining(4 * time.Millisecond)
gaa, err := genesisMiner.ActorAddress(ctx)
require.NoError(t, err)
gmi, err := full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
_, err = full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
require.NoError(t, err)
testm := n[0].Stb(ctx, t, kit.TestSpt, gmi.Owner)
var newMiner kit2.TestMiner
ens.Miner(&newMiner, full, kit2.OwnerAddr(full.DefaultKey)).Start()
ta, err := testm.ActorAddress(ctx)
ta, err := newMiner.ActorAddress(ctx)
require.NoError(t, err)
tid, err := address.IDFromAddress(ta)

View File

@ -10,16 +10,15 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/itests/kit2"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/stretchr/testify/require"
)
func TestBatchDealInput(t *testing.T) {
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
var (
blockTime = 10 * time.Millisecond
@ -32,50 +31,40 @@ func TestBatchDealInput(t *testing.T) {
run := func(piece, deals, expectSectors int) func(t *testing.T) {
return func(t *testing.T) {
ctx := context.Background()
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(deals)
// Set max deals per publish deals message to maxDealsPerMsg
minerDef := []kit.StorageMiner{{
Full: 0,
Opts: node.Options(
node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
return func() (sealiface.Config, error) {
return sealiface.Config{
MaxWaitDealsSectors: 2,
MaxSealingSectors: 1,
MaxSealingSectorsForDeals: 3,
AlwaysKeepUnsealedCopy: true,
WaitDealsDelay: time.Hour,
}, nil
opts := kit2.ConstructorOpts(node.Options(
node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
return func() (sealiface.Config, error) {
return sealiface.Config{
MaxWaitDealsSectors: 2,
MaxSealingSectors: 1,
MaxSealingSectorsForDeals: 3,
AlwaysKeepUnsealedCopy: true,
WaitDealsDelay: time.Hour,
}, nil
}),
),
Preseal: kit.PresealGenesis,
}}
// Create a connect client and miner node
n, sn := kit.MockMinerBuilder(t, kit.OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
blockMiner := kit.ConnectAndStartMining(t, blockTime, miner, client)
t.Cleanup(blockMiner.Stop)
dh := kit.NewDealHarness(t, client, miner)
ctx := context.Background()
}, nil
}),
))
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blockTime)
dh := kit2.NewDealHarness(t, client, miner)
err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
require.NoError(t, err)
checkNoPadding := func() {
sl, err := sn[0].SectorsList(ctx)
sl, err := miner.SectorsList(ctx)
require.NoError(t, err)
sort.Slice(sl, func(i, j int) bool {
@ -83,7 +72,7 @@ func TestBatchDealInput(t *testing.T) {
})
for _, snum := range sl {
si, err := sn[0].SectorsStatus(ctx, snum, false)
si, err := miner.SectorsStatus(ctx, snum, false)
require.NoError(t, err)
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
@ -98,7 +87,7 @@ func TestBatchDealInput(t *testing.T) {
// Starts a deal and waits until it's published
runDealTillSeal := func(rseed int) {
res, _, _, err := kit.CreateImportFile(ctx, client, rseed, piece)
res, _, _, err := kit2.CreateImportFile(ctx, client, rseed, piece)
require.NoError(t, err)
deal := dh.StartDeal(ctx, res.Root, false, dealStartEpoch)
@ -122,7 +111,7 @@ func TestBatchDealInput(t *testing.T) {
checkNoPadding()
sl, err := sn[0].SectorsList(ctx)
sl, err := miner.SectorsList(ctx)
require.NoError(t, err)
require.Equal(t, len(sl), expectSectors)
}

View File

@ -3,21 +3,19 @@ package itests
import (
"context"
"fmt"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/itests/kit2"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/impl"
)
func TestCCUpgrade(t *testing.T) {
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
for _, height := range []abi.ChainEpoch{
-1, // before
@ -27,60 +25,33 @@ func TestCCUpgrade(t *testing.T) {
} {
height := height // make linters happy by copying
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
runTestCCUpgrade(t, kit.MockMinerBuilder, 5*time.Millisecond, height)
runTestCCUpgrade(t, height)
})
}
}
func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) {
ctx := context.Background()
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(upgradeHeight)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
blockTime := 5 * time.Millisecond
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
t.Error(err)
}
}
}()
opts := kit2.ConstructorOpts(kit2.LatestActorsAt(upgradeHeight))
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blockTime)
maddr, err := miner.ActorAddress(ctx)
if err != nil {
t.Fatal(err)
}
CC := abi.SectorNumber(kit.GenesisPreseals + 1)
CC := abi.SectorNumber(kit2.DefaultPresealsPerBootstrapMiner + 1)
Upgraded := CC + 1
kit.PledgeSectors(t, ctx, miner, 1, 0, nil)
miner.PledgeSectors(ctx, 1, 0, nil)
sl, err := miner.SectorsList(ctx)
if err != nil {
t.Fatal(err)
}
if len(sl) != 1 {
t.Fatal("expected 1 sector")
}
if sl[0] != CC {
t.Fatal("bad")
}
require.NoError(t, err)
require.Len(t, sl, 1, "expected 1 sector")
require.Equal(t, CC, sl[0], "unexpected sector number")
{
si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
@ -88,13 +59,12 @@ func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, u
require.Less(t, 50000, int(si.Expiration))
}
if err := miner.SectorMarkForUpgrade(ctx, sl[0]); err != nil {
t.Fatal(err)
}
err = miner.SectorMarkForUpgrade(ctx, sl[0])
require.NoError(t, err)
dh := kit.NewDealHarness(t, client, miner)
dh := kit2.NewDealHarness(t, client, miner)
dh.MakeFullDeal(context.Background(), 6, false, false, 0)
dh.MakeOnlineDeal(context.Background(), 6, false, 0)
// Validate upgrade
@ -123,10 +93,6 @@ func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, u
}
t.Log("waiting for sector to expire")
// wait one deadline per loop.
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime)
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blockTime)
}
fmt.Println("shutting down mining")
atomic.AddInt64(&mine, -1)
<-done
}

View File

@ -1,22 +1,21 @@
package itests
import (
"context"
"os"
"testing"
"time"
"github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/itests/kit2"
)
// TestClient does a basic test to exercise the client CLI commands.
func TestClient(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
clientNode, _ := kit.StartOneNodeOneMiner(ctx, t, blocktime)
kit.RunClientTest(t, cli.Commands, clientNode)
blockTime := 5 * time.Millisecond
client, _, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), kit2.ThroughRPC())
ens.InterconnectAll().BeginMining(blockTime)
kit2.RunClientTest(t, cli.Commands, *client)
}

View File

@ -4,11 +4,7 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sync/atomic"
"testing"
"time"
@ -17,108 +13,133 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/itests/kit2"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
func TestDealCycle(t *testing.T) {
kit.QuietMiningLogs()
func TestDealCyclesConcurrent(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
kit2.QuietMiningLogs()
blockTime := 10 * time.Millisecond
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
dealStartEpoch := abi.ChainEpoch(2 << 12)
startEpoch := abi.ChainEpoch(2 << 12)
t.Run("TestFullDealCycle_Single", func(t *testing.T) {
runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
})
t.Run("TestFullDealCycle_Two", func(t *testing.T) {
runFullDealCycles(t, 2, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
})
t.Run("WithExportedCAR", func(t *testing.T) {
runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, true, false, dealStartEpoch)
})
t.Run("TestFastRetrievalDealCycle", func(t *testing.T) {
runFastRetrievalDealFlowT(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
})
t.Run("TestZeroPricePerByteRetrievalDealFlow", func(t *testing.T) {
runZeroPricePerByteRetrievalDealFlow(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
})
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs())
ens.InterconnectAll().BeginMining(blockTime)
dh := kit2.NewDealHarness(t, client, miner)
runConcurrentDeals(t, dh, fullDealCyclesOpts{
n: n,
fastRetrieval: fastRetrieval,
carExport: carExport,
startEpoch: startEpoch,
})
}
cycles := []int{1, 2, 4, 8}
for _, n := range cycles {
n := n
ns := fmt.Sprintf("%d", n)
t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) })
t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) })
t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) })
t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) })
}
}
func TestAPIDealFlowReal(t *testing.T) {
type fullDealCyclesOpts struct {
n int
fastRetrieval bool
carExport bool
startEpoch abi.ChainEpoch
}
func runConcurrentDeals(t *testing.T, dh *kit2.DealHarness, opts fullDealCyclesOpts) {
errgrp, _ := errgroup.WithContext(context.Background())
for i := 0; i < opts.n; i++ {
i := i
errgrp.Go(func() (err error) {
defer func() {
// This is necessary because golang can't deal with test
// failures being reported from children goroutines ¯\_(ツ)_/¯
if r := recover(); r != nil {
err = fmt.Errorf("deal failed: %s", r)
}
}()
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), 5+i, opts.fastRetrieval, opts.startEpoch)
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, opts.carExport)
kit2.AssertFilesEqual(t, inPath, outPath)
return nil
})
}
require.NoError(t, errgrp.Wait())
}
func TestDealsWithSealingAndRPC(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
// TODO: just set this globally?
oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5)
t.Cleanup(func() {
policy.SetPreCommitChallengeDelay(oldDelay)
var blockTime = 1 * time.Second
client, miner, ens := kit2.EnsembleMinimal(t, kit2.ThroughRPC()) // no mock proofs.
ens.InterconnectAll().BeginMining(blockTime)
dh := kit2.NewDealHarness(t, client, miner)
t.Run("stdretrieval", func(t *testing.T) {
runConcurrentDeals(t, dh, fullDealCyclesOpts{n: 1})
})
t.Run("basic", func(t *testing.T) {
runFullDealCycles(t, 1, kit.Builder, time.Second, false, false, 0)
t.Run("fastretrieval", func(t *testing.T) {
runConcurrentDeals(t, dh, fullDealCyclesOpts{n: 1, fastRetrieval: true})
})
t.Run("fast-retrieval", func(t *testing.T) {
runFullDealCycles(t, 1, kit.Builder, time.Second, false, true, 0)
})
t.Run("retrieval-second", func(t *testing.T) {
runSecondDealRetrievalTest(t, kit.Builder, time.Second)
t.Run("fastretrieval-twodeals-sequential", func(t *testing.T) {
runConcurrentDeals(t, dh, fullDealCyclesOpts{n: 1, fastRetrieval: true})
runConcurrentDeals(t, dh, fullDealCyclesOpts{n: 1, fastRetrieval: true})
})
}
func TestPublishDealsBatching(t *testing.T) {
ctx := context.Background()
var (
ctx = context.Background()
publishPeriod = 10 * time.Second
maxDealsPerMsg = uint64(2) // Set max deals per publish deals message to 2
startEpoch = abi.ChainEpoch(2 << 12)
)
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
b := kit.MockMinerBuilder
blocktime := 10 * time.Millisecond
startEpoch := abi.ChainEpoch(2 << 12)
opts := node.Override(new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
}),
)
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(2)
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), kit2.ConstructorOpts(opts))
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
// Set max deals per publish deals message to 2
minerDef := []kit.StorageMiner{{
Full: 0,
Opts: node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
Preseal: kit.PresealGenesis,
}}
// Create a connect client and miner node
n, sn := b(t, kit.OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
dh := kit2.NewDealHarness(t, client, miner)
// Starts a deal and waits until it's published
runDealTillPublish := func(rseed int) {
res, _, _, err := kit.CreateImportFile(ctx, client, rseed, 0)
require.NoError(t, err)
res, _ := client.CreateImportFile(ctx, rseed, 0)
upds, err := client.ClientGetDealUpdates(ctx)
require.NoError(t, err)
@ -183,133 +204,69 @@ func TestPublishDealsBatching(t *testing.T) {
}
}
func TestDealMining(t *testing.T) {
func TestFirstDealEnablesMining(t *testing.T) {
// test making a deal with a fresh miner, and see if it starts to mine.
if testing.Short() {
t.Skip("skipping test in short mode")
}
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
b := kit.MockMinerBuilder
blocktime := 50 * time.Millisecond
var (
client kit2.TestFullNode
genMiner kit2.TestMiner // bootstrap
provider kit2.TestMiner // no sectors, will need to create one
)
ens := kit2.NewEnsemble(t, kit2.MockProofs())
ens.FullNode(&client)
ens.Miner(&genMiner, &client)
ens.Miner(&provider, &client, kit2.PresealSectors(0))
ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond)
ctx := context.Background()
fulls, miners := b(t,
kit.OneFull,
[]kit.StorageMiner{
{Full: 0, Preseal: kit.PresealGenesis},
{Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node
})
client := fulls[0].FullNode.(*impl.FullNodeAPI)
genesisMiner := miners[0]
provider := miners[1]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
dh := kit2.NewDealHarness(t, &client, &provider)
if err := provider.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
ref, _ := client.CreateImportFile(ctx, 5, 0)
if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
t.Log("FILE CID:", ref.Root)
time.Sleep(time.Second)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
data := make([]byte, 600)
rand.New(rand.NewSource(5)).Read(data)
r := bytes.NewReader(data)
fcid, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
var mine int32 = 1
done := make(chan struct{})
minedTwo := make(chan struct{})
m2addr, err := miners[1].ActorAddress(context.TODO())
if err != nil {
t.Fatal(err)
}
// start a goroutine to monitor head changes from the client
// once the provider has mined a block, thanks to the power acquired from the deal,
// we pass the test.
providerMined := make(chan struct{})
heads, err := client.ChainNotify(ctx)
require.NoError(t, err)
go func() {
defer close(done)
complChan := minedTwo
for atomic.LoadInt32(&mine) != 0 {
wait := make(chan int)
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
n := 0
if mined {
n = 1
for chg := range heads {
for _, c := range chg {
if c.Type != "apply" {
continue
}
wait <- n
}
if err := miners[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
t.Error(err)
}
if err := miners[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
t.Error(err)
}
expect := <-wait
expect += <-wait
time.Sleep(blocktime)
if expect == 0 {
// null block
continue
}
var nodeOneMined bool
for _, node := range miners {
mb, err := node.MiningBase(ctx)
if err != nil {
t.Error(err)
return
}
for _, b := range mb.Blocks() {
if b.Miner == m2addr {
nodeOneMined = true
break
for _, b := range c.Val.Blocks() {
if b.Miner == provider.ActorAddr {
close(providerMined)
return
}
}
}
if nodeOneMined && complChan != nil {
close(complChan)
complChan = nil
}
}
}()
dh := kit.NewDealHarness(t, client, provider)
deal := dh.StartDeal(ctx, fcid, false, 0)
// now perform the deal.
deal := dh.StartDeal(ctx, ref.Root, false, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal, false, false, nil)
<-minedTwo
atomic.StoreInt32(&mine, 0)
fmt.Println("shutting down mining")
<-done
<-providerMined
}
func TestOfflineDealFlow(t *testing.T) {
@ -322,22 +279,19 @@ func TestOfflineDealFlow(t *testing.T) {
runTest := func(t *testing.T, fastRet bool) {
ctx := context.Background()
fulls, miners := kit.MockMinerBuilder(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs())
ens.InterconnectAll().BeginMining(blocktime)
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
dh := kit2.NewDealHarness(t, client, miner)
// Create a random file and import on the client.
res, path, data, err := kit.CreateImportFile(ctx, client, 1, 0)
require.NoError(t, err)
res, inFile := client.CreateImportFile(ctx, 1, 0)
// Get the piece size and commP
fcid := res.Root
pieceInfo, err := client.ClientDealPieceCID(ctx, fcid)
rootCid := res.Root
pieceInfo, err := client.ClientDealPieceCID(ctx, rootCid)
require.NoError(t, err)
fmt.Println("FILE CID: ", fcid)
t.Log("FILE CID:", rootCid)
// Create a storage deal with the miner
maddr, err := miner.ActorAddress(ctx)
@ -349,7 +303,7 @@ func TestOfflineDealFlow(t *testing.T) {
// Manual storage deal (offline deal)
dataRef := &storagemarket.DataRef{
TransferType: storagemarket.TTManual,
Root: fcid,
Root: rootCid,
PieceCid: &pieceInfo.PieceCID,
PieceSize: pieceInfo.PieceSize.Unpadded(),
}
@ -374,10 +328,9 @@ func TestOfflineDealFlow(t *testing.T) {
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
// Create a CAR file from the raw file
carFileDir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-car")
require.NoError(t, err)
carFileDir := t.TempDir()
carFilePath := filepath.Join(carFileDir, "out.car")
err = client.ClientGenCar(ctx, api.FileRef{Path: path}, carFilePath)
err = client.ClientGenCar(ctx, api.FileRef{Path: inFile}, carFilePath)
require.NoError(t, err)
// Import the CAR file on the miner - this is the equivalent to
@ -391,127 +344,32 @@ func TestOfflineDealFlow(t *testing.T) {
t.Logf("deal published, retrieving")
// Retrieve the deal
dh.TestRetrieval(ctx, fcid, &pieceInfo.PieceCID, false, data)
outFile := dh.PerformRetrieval(ctx, proposalCid, rootCid, false)
kit2.AssertFilesEqual(t, inFile, outFile)
}
t.Run("NormalRetrieval", func(t *testing.T) {
runTest(t, false)
})
t.Run("FastRetrieval", func(t *testing.T) {
runTest(t, true)
})
t.Run("stdretrieval", func(t *testing.T) { runTest(t, false) })
t.Run("fastretrieval", func(t *testing.T) { runTest(t, true) })
}
func runFullDealCycles(t *testing.T, n int, b kit.APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
baseseed := 6
for i := 0; i < n; i++ {
dh.MakeFullDeal(context.Background(), baseseed+i, carExport, fastRet, startEpoch)
func TestZeroPricePerByteRetrieval(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
}
func runFastRetrievalDealFlowT(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
kit2.QuietMiningLogs()
var (
blockTime = 10 * time.Millisecond
startEpoch = abi.ChainEpoch(2 << 12)
)
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs())
ens.InterconnectAll().BeginMining(blockTime)
ctx := context.Background()
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
data := make([]byte, 1600)
rand.New(rand.NewSource(int64(8))).Read(data)
r := bytes.NewReader(data)
fcid, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
deal := dh.StartDeal(ctx, fcid, true, startEpoch)
dh.WaitDealPublished(ctx, deal)
fmt.Println("deal published, retrieving")
// Retrieval
info, err := client.ClientGetDealInfo(ctx, *deal)
require.NoError(t, err)
dh.TestRetrieval(ctx, fcid, &info.PieceCID, false, data)
}
func runSecondDealRetrievalTest(t *testing.T, b kit.APIBuilder, blocktime time.Duration) {
ctx := context.Background()
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
{
data1 := make([]byte, 800)
rand.New(rand.NewSource(int64(3))).Read(data1)
r := bytes.NewReader(data1)
fcid1, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
data2 := make([]byte, 800)
rand.New(rand.NewSource(int64(9))).Read(data2)
r2 := bytes.NewReader(data2)
fcid2, err := client.ClientImportLocal(ctx, r2)
if err != nil {
t.Fatal(err)
}
deal1 := dh.StartDeal(ctx, fcid1, true, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal1, true, false, nil)
deal2 := dh.StartDeal(ctx, fcid2, true, 0)
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal2, false, false, nil)
// Retrieval
info, err := client.ClientGetDealInfo(ctx, *deal2)
require.NoError(t, err)
rf, _ := miner.SectorsRefs(ctx)
fmt.Printf("refs: %+v\n", rf)
dh.TestRetrieval(ctx, fcid2, &info.PieceCID, false, data2)
}
}
func runZeroPricePerByteRetrievalDealFlow(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
ctx := context.Background()
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
// Set price-per-byte to zero
ask, err := miner.MarketGetRetrievalAsk(ctx)
require.NoError(t, err)
@ -519,5 +377,9 @@ func runZeroPricePerByteRetrievalDealFlow(t *testing.T, b kit.APIBuilder, blockt
err = miner.MarketSetRetrievalAsk(ctx, ask)
require.NoError(t, err)
dh.MakeFullDeal(ctx, 6, false, false, startEpoch)
dh := kit2.NewDealHarness(t, client, miner)
runConcurrentDeals(t, dh, fullDealCyclesOpts{
n: 1,
startEpoch: startEpoch,
})
}

124
itests/kit2/blockminer.go Normal file
View File

@ -0,0 +1,124 @@
package kit2
import (
"context"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/miner"
"github.com/stretchr/testify/require"
)
// BlockMiner is a utility that makes a test miner Mine blocks on a timer.
type BlockMiner struct {
t *testing.T
miner *TestMiner
nextNulls int64
wg sync.WaitGroup
cancel context.CancelFunc
}
func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
return &BlockMiner{
t: t,
miner: miner,
cancel: func() {},
}
}
func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) {
time.Sleep(time.Second)
// wrap context in a cancellable context.
ctx, bm.cancel = context.WithCancel(ctx)
bm.wg.Add(1)
go func() {
defer bm.wg.Done()
for {
select {
case <-time.After(blocktime):
case <-ctx.Done():
return
}
nulls := atomic.SwapInt64(&bm.nextNulls, 0)
err := bm.miner.MineOne(ctx, miner.MineReq{
InjectNulls: abi.ChainEpoch(nulls),
Done: func(bool, abi.ChainEpoch, error) {},
})
switch {
case err == nil: // wrap around
case ctx.Err() != nil: // context fired.
return
default: // log error
bm.t.Error(err)
}
}
}()
}
// InjectNulls injects the specified amount of null rounds in the next
// mining rounds.
func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) {
atomic.AddInt64(&bm.nextNulls, int64(rounds))
}
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb func(abi.ChainEpoch)) {
for i := 0; i < 1000; i++ {
var (
success bool
err error
epoch abi.ChainEpoch
wait = make(chan struct{})
)
doneFn := func(win bool, ep abi.ChainEpoch, e error) {
success = win
err = e
epoch = ep
wait <- struct{}{}
}
mineErr := bm.miner.MineOne(ctx, miner.MineReq{Done: doneFn})
require.NoError(bm.t, mineErr)
<-wait
require.NoError(bm.t, err)
if success {
// Wait until it shows up on the given full nodes ChainHead
nloops := 200
for i := 0; i < nloops; i++ {
ts, err := fn.ChainHead(ctx)
require.NoError(bm.t, err)
if ts.Height() == epoch {
break
}
require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node")
time.Sleep(time.Millisecond * 10)
}
if cb != nil {
cb(epoch)
}
return
}
bm.t.Log("did not Mine block, trying again", i)
}
bm.t.Fatal("failed to Mine 1000 times in a row...")
}
// Stop stops the block miner.
func (bm *BlockMiner) Stop() {
bm.t.Log("shutting down mining")
bm.cancel()
bm.wg.Wait()
}

146
itests/kit2/client.go Normal file
View File

@ -0,0 +1,146 @@
package kit2
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/stretchr/testify/require"
lcli "github.com/urfave/cli/v2"
)
// RunClientTest exercises some of the Client CLI commands
func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
// Create mock CLI
mockCLI := NewMockCLI(ctx, t, cmds)
clientCLI := mockCLI.Client(clientNode.ListenAddr)
// Get the Miner address
addrs, err := clientNode.StateListMiners(ctx, types.EmptyTSK)
require.NoError(t, err)
require.Len(t, addrs, 1)
minerAddr := addrs[0]
fmt.Println("Miner:", minerAddr)
// client query-ask <Miner addr>
out := clientCLI.RunCmd("client", "query-ask", minerAddr.String())
require.Regexp(t, regexp.MustCompile("Ask:"), out)
// Create a deal (non-interactive)
// client deal --start-epoch=<start epoch> <cid> <Miner addr> 1000000attofil <duration>
res, _, _, err := CreateImportFile(ctx, clientNode, 1, 0)
require.NoError(t, err)
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
dataCid := res.Root
price := "1000000attofil"
duration := fmt.Sprintf("%d", build.MinDealDuration)
out = clientCLI.RunCmd("client", "deal", startEpoch, dataCid.String(), minerAddr.String(), price, duration)
fmt.Println("client deal", out)
// Create a deal (interactive)
// client deal
// <cid>
// <duration> (in days)
// <miner addr>
// "no" (verified Client)
// "yes" (confirm deal)
res, _, _, err = CreateImportFile(ctx, clientNode, 2, 0)
require.NoError(t, err)
dataCid2 := res.Root
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
cmd := []string{"client", "deal"}
interactiveCmds := []string{
dataCid2.String(),
duration,
minerAddr.String(),
"no",
"yes",
}
out = clientCLI.RunInteractiveCmd(cmd, interactiveCmds)
fmt.Println("client deal:\n", out)
// Wait for provider to start sealing deal
dealStatus := ""
for {
// client list-deals
out = clientCLI.RunCmd("client", "list-deals")
fmt.Println("list-deals:\n", out)
lines := strings.Split(out, "\n")
require.GreaterOrEqual(t, len(lines), 2)
re := regexp.MustCompile(`\s+`)
parts := re.Split(lines[1], -1)
if len(parts) < 4 {
require.Fail(t, "bad list-deals output format")
}
dealStatus = parts[3]
fmt.Println(" Deal status:", dealStatus)
st := CategorizeDealState(dealStatus)
require.NotEqual(t, TestDealStateFailed, st)
if st == TestDealStateComplete {
break
}
time.Sleep(time.Second)
}
// Retrieve the first file from the Miner
// client retrieve <cid> <file path>
tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-Client")
require.NoError(t, err)
path := filepath.Join(tmpdir, "outfile.dat")
out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path)
fmt.Println("retrieve:\n", out)
require.Regexp(t, regexp.MustCompile("Success"), out)
}
func CreateImportFile(ctx context.Context, client api.FullNode, rseed int, size int) (res *api.ImportRes, path string, data []byte, err error) {
data, path, err = createRandomFile(rseed, size)
if err != nil {
return nil, "", nil, err
}
res, err = client.ClientImport(ctx, api.FileRef{Path: path})
if err != nil {
return nil, "", nil, err
}
return res, path, data, nil
}
func createRandomFile(rseed, size int) ([]byte, string, error) {
if size == 0 {
size = 1600
}
data := make([]byte, size)
rand.New(rand.NewSource(int64(rseed))).Read(data)
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
if err != nil {
return nil, "", err
}
path := filepath.Join(dir, "sourcefile.dat")
err = ioutil.WriteFile(path, data, 0644)
if err != nil {
return nil, "", err
}
return data, path, nil
}

242
itests/kit2/deals.go Normal file
View File

@ -0,0 +1,242 @@
package kit2
import (
"context"
"io/ioutil"
"os"
"testing"
"time"
"github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
"github.com/ipld/go-car"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
dstest "github.com/ipfs/go-merkledag/test"
unixfile "github.com/ipfs/go-unixfs/file"
)
type DealHarness struct {
t *testing.T
client *TestFullNode
miner *TestMiner
}
// NewDealHarness creates a test harness that contains testing utilities for deals.
func NewDealHarness(t *testing.T, client *TestFullNode, miner *TestMiner) *DealHarness {
return &DealHarness{
t: t,
client: client,
miner: miner,
}
}
// MakeOnlineDeal makes an online deal, generating a random file with the
// supplied seed, and setting the specified fast retrieval flag and start epoch
// on the storage deal. It returns when the deal is sealed.
//
// TODO: convert input parameters to struct, and add size as an input param.
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, rseed int, fastRet bool, startEpoch abi.ChainEpoch) (deal *cid.Cid, res *api.ImportRes, path string) {
res, path = dh.client.CreateImportFile(ctx, rseed, 0)
dh.t.Logf("FILE CID: %s", res.Root)
deal = dh.StartDeal(ctx, res.Root, fastRet, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal, false, false, nil)
return deal, res, path
}
// StartDeal starts a storage deal between the client and the miner.
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
maddr, err := dh.miner.ActorAddress(ctx)
require.NoError(dh.t, err)
addr, err := dh.client.WalletDefaultAddress(ctx)
require.NoError(dh.t, err)
deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{
Data: &storagemarket.DataRef{
TransferType: storagemarket.TTGraphsync,
Root: fcid,
},
Wallet: addr,
Miner: maddr,
EpochPrice: types.NewInt(1000000),
DealStartEpoch: startEpoch,
MinBlocksDuration: uint64(build.MinDealDuration),
FastRetrieval: fastRet,
})
require.NoError(dh.t, err)
return deal
}
// WaitDealSealed waits until the deal is sealed.
func (dh *DealHarness) WaitDealSealed(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
loop:
for {
di, err := dh.client.ClientGetDealInfo(ctx, *deal)
require.NoError(dh.t, err)
switch di.State {
case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
if noseal {
return
}
if !noSealStart {
dh.StartSealingWaiting(ctx)
}
case storagemarket.StorageDealProposalRejected:
dh.t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
dh.t.Fatal("deal failed")
case storagemarket.StorageDealError:
dh.t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealActive:
dh.t.Log("COMPLETE", di)
break loop
}
mds, err := dh.miner.MarketListIncompleteDeals(ctx)
require.NoError(dh.t, err)
var minerState storagemarket.StorageDealStatus
for _, md := range mds {
if md.DealID == di.DealID {
minerState = md.State
break
}
}
dh.t.Logf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
time.Sleep(time.Second / 2)
if cb != nil {
cb()
}
}
}
// WaitDealPublished waits until the deal is published.
func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
updates, err := dh.miner.MarketGetDealUpdates(subCtx)
require.NoError(dh.t, err)
for {
select {
case <-ctx.Done():
dh.t.Fatal("context timeout")
case di := <-updates:
if deal.Equals(di.ProposalCid) {
switch di.State {
case storagemarket.StorageDealProposalRejected:
dh.t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
dh.t.Fatal("deal failed")
case storagemarket.StorageDealError:
dh.t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
dh.t.Log("COMPLETE", di)
return
}
dh.t.Log("Deal state: ", storagemarket.DealStates[di.State])
}
}
}
}
func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
snums, err := dh.miner.SectorsList(ctx)
require.NoError(dh.t, err)
for _, snum := range snums {
si, err := dh.miner.SectorsStatus(ctx, snum, false)
require.NoError(dh.t, err)
dh.t.Logf("Sector state: %s", si.State)
if si.State == api.SectorState(sealing.WaitDeals) {
require.NoError(dh.t, dh.miner.SectorStartSealing(ctx, snum))
}
dh.miner.FlushSealingBatches(ctx)
}
}
func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool) (path string) {
// perform retrieval.
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
require.NoError(dh.t, err)
offers, err := dh.client.ClientFindData(ctx, root, &info.PieceCID)
require.NoError(dh.t, err)
require.NotEmpty(dh.t, offers, "no offers")
carFile, err := ioutil.TempFile(dh.t.TempDir(), "ret-car")
require.NoError(dh.t, err)
defer carFile.Close() //nolint:errcheck
caddr, err := dh.client.WalletDefaultAddress(ctx)
require.NoError(dh.t, err)
ref := &api.FileRef{
Path: carFile.Name(),
IsCAR: carExport,
}
updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
require.NoError(dh.t, err)
for update := range updates {
require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err)
}
ret := carFile.Name()
if carExport {
actualFile := dh.ExtractFileFromCAR(ctx, carFile)
ret = actualFile.Name()
_ = actualFile.Close() //nolint:errcheck
}
return ret
}
func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, file *os.File) (out *os.File) {
bserv := dstest.Bserv()
ch, err := car.LoadCar(bserv.Blockstore(), file)
require.NoError(dh.t, err)
b, err := bserv.GetBlock(ctx, ch.Roots[0])
require.NoError(dh.t, err)
nd, err := ipld.Decode(b)
require.NoError(dh.t, err)
dserv := dag.NewDAGService(bserv)
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
require.NoError(dh.t, err)
tmpfile, err := ioutil.TempFile(dh.t.TempDir(), "file-in-car")
require.NoError(dh.t, err)
defer tmpfile.Close() //nolint:errcheck
err = files.WriteTo(fil, tmpfile.Name())
require.NoError(dh.t, err)
return tmpfile
}

View File

@ -0,0 +1,21 @@
package kit2
type TestDealState int
const (
TestDealStateFailed = TestDealState(-1)
TestDealStateInProgress = TestDealState(0)
TestDealStateComplete = TestDealState(1)
)
// CategorizeDealState categorizes deal states into one of three states:
// Complete, InProgress, Failed.
func CategorizeDealState(dealStatus string) TestDealState {
switch dealStatus {
case "StorageDealFailing", "StorageDealError":
return TestDealStateFailed
case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed":
return TestDealStateComplete
}
return TestDealStateInProgress
}

623
itests/kit2/ensemble.go Normal file
View File

@ -0,0 +1,623 @@
package kit2
import (
"bytes"
"context"
"crypto/rand"
"io/ioutil"
"sync"
"testing"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/go-storedcounter"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/gen"
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
"github.com/filecoin-project/lotus/genesis"
lotusminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/mockstorage"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power"
"github.com/ipfs/go-datastore"
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
"github.com/stretchr/testify/require"
)
func init() {
chain.BootstrapPeerThreshold = 1
messagepool.HeadChangeCoalesceMinDelay = time.Microsecond
messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
}
// Ensemble is a collection of nodes instantiated within a test.
//
// Create a new ensemble with:
//
// ens := kit.NewEnsemble()
//
// Create full nodes and miners:
//
// var full TestFullNode
// var miner TestMiner
// ens.FullNode(&full, opts...) // populates a full node
// ens.Miner(&miner, &full, opts...) // populates a miner, using the full node as its chain daemon
//
// It is possible to pass functional options to set initial balances,
// presealed sectors, owner keys, etc.
//
// After the initial nodes are added, call `ens.Start()` to forge genesis
// and start the network. Mining will NOT be started automatically. It needs
// to be started explicitly by calling `BeginMining`.
//
// Nodes also need to be connected with one another, either via `ens.Connect()`
// or `ens.InterconnectAll()`. A common inchantation for simple tests is to do:
//
// ens.InterconnectAll().BeginMining(blocktime)
//
// You can continue to add more nodes, but you must always follow with
// `ens.Start()` to activate the new nodes.
//
// The API is chainable, so it's possible to do a lot in a very succinct way:
//
// kit.NewEnsemble().FullNode(&full).Miner(&miner, &full).Start().InterconnectAll().BeginMining()
//
// You can also find convenient fullnode:miner presets, such as 1:1, 1:2,
// and 2:1, e.g.:
//
// kit.EnsembleMinimal()
// kit.EnsembleOneTwo()
// kit.EnsembleTwoOne()
//
type Ensemble struct {
t *testing.T
bootstrapped bool
genesisBlock bytes.Buffer
mn mocknet.Mocknet
options *ensembleOpts
inactive struct {
fullnodes []*TestFullNode
miners []*TestMiner
}
active struct {
fullnodes []*TestFullNode
miners []*TestMiner
}
genesis struct {
miners []genesis.Miner
accounts []genesis.Actor
}
}
// NewEnsemble instantiates a new blank Ensemble.
func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble {
options := DefaultEnsembleOpts
for _, o := range opts {
err := o(&options)
require.NoError(t, err)
}
n := &Ensemble{t: t, options: &options}
// add accounts from ensemble options to genesis.
for _, acc := range options.accounts {
n.genesis.accounts = append(n.genesis.accounts, genesis.Actor{
Type: genesis.TAccount,
Balance: acc.initialBalance,
Meta: (&genesis.AccountMeta{Owner: acc.key.Address}).ActorMeta(),
})
}
return n
}
// FullNode enrolls a new full node.
func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
options := DefaultNodeOpts
for _, o := range opts {
err := o(&options)
require.NoError(n.t, err)
}
var key *wallet.Key
if !n.bootstrapped && !options.balance.IsZero() {
// create a key+address, and assign it some FIL; this will be set as the default wallet.
var err error
key, err = wallet.GenerateKey(types.KTBLS)
require.NoError(n.t, err)
genacc := genesis.Actor{
Type: genesis.TAccount,
Balance: options.balance,
Meta: (&genesis.AccountMeta{Owner: key.Address}).ActorMeta(),
}
n.genesis.accounts = append(n.genesis.accounts, genacc)
}
*full = TestFullNode{t: n.t, options: options, DefaultKey: key}
n.inactive.fullnodes = append(n.inactive.fullnodes, full)
return n
}
// Miner enrolls a new miner, using the provided full node for chain
// interactions.
func (n *Ensemble) Miner(miner *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
require.NotNil(n.t, full, "full node required when instantiating miner")
options := DefaultNodeOpts
for _, o := range opts {
err := o(&options)
require.NoError(n.t, err)
}
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
require.NoError(n.t, err)
peerId, err := peer.IDFromPrivateKey(privkey)
require.NoError(n.t, err)
tdir, err := ioutil.TempDir("", "preseal-memgen")
require.NoError(n.t, err)
minerCnt := len(n.inactive.miners) + len(n.active.miners)
actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt))
require.NoError(n.t, err)
ownerKey := options.ownerKey
if !n.bootstrapped {
var (
sectors = options.sectors
k *types.KeyInfo
genm *genesis.Miner
)
// create the preseal commitment.
if n.options.mockProofs {
genm, k, err = mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, actorAddr, sectors)
} else {
genm, k, err = seed.PreSeal(actorAddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, sectors, tdir, []byte("make genesis mem random"), nil, true)
}
require.NoError(n.t, err)
genm.PeerId = peerId
// create an owner key, and assign it some FIL.
ownerKey, err = wallet.NewKey(*k)
require.NoError(n.t, err)
genacc := genesis.Actor{
Type: genesis.TAccount,
Balance: options.balance,
Meta: (&genesis.AccountMeta{Owner: ownerKey.Address}).ActorMeta(),
}
n.genesis.miners = append(n.genesis.miners, *genm)
n.genesis.accounts = append(n.genesis.accounts, genacc)
} else {
require.NotNil(n.t, ownerKey, "worker key can't be null if initializing a miner after genesis")
}
*miner = TestMiner{
t: n.t,
ActorAddr: actorAddr,
OwnerKey: ownerKey,
FullNode: full,
PresealDir: tdir,
options: options,
}
miner.Libp2p.PeerID = peerId
miner.Libp2p.PrivKey = privkey
n.inactive.miners = append(n.inactive.miners, miner)
return n
}
// Start starts all enrolled nodes.
func (n *Ensemble) Start() *Ensemble {
ctx, cancel := context.WithCancel(context.Background())
n.t.Cleanup(cancel)
var gtempl *genesis.Template
if !n.bootstrapped {
// We haven't been bootstrapped yet, we need to generate genesis and
// create the networking backbone.
gtempl = n.generateGenesis()
n.mn = mocknet.New(ctx)
}
// ---------------------
// FULL NODES
// ---------------------
// Create all inactive full nodes.
for i, full := range n.inactive.fullnodes {
opts := []node.Option{
node.FullAPI(&full.FullNode, node.Lite(full.options.lite)),
node.Online(),
node.Repo(repo.NewMemory(nil)),
node.MockHost(n.mn),
node.Test(),
// so that we subscribe to pubsub topics immediately
node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)),
}
// append any node builder options.
opts = append(opts, full.options.extraNodeOpts...)
// Either generate the genesis or inject it.
if i == 0 && !n.bootstrapped {
opts = append(opts, node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&n.genesisBlock, *gtempl)))
} else {
opts = append(opts, node.Override(new(modules.Genesis), modules.LoadGenesis(n.genesisBlock.Bytes())))
}
// Are we mocking proofs?
if n.options.mockProofs {
opts = append(opts,
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
)
}
// Construct the full node.
stop, err := node.New(ctx, opts...)
// fullOpts[i].Opts(fulls),
require.NoError(n.t, err)
addr, err := full.WalletImport(context.Background(), &full.DefaultKey.KeyInfo)
require.NoError(n.t, err)
err = full.WalletSetDefault(context.Background(), addr)
require.NoError(n.t, err)
// Are we hitting this node through its RPC?
if full.options.rpc {
withRPC := fullRpc(n.t, full)
n.inactive.fullnodes[i] = withRPC
}
n.t.Cleanup(func() { _ = stop(context.Background()) })
n.active.fullnodes = append(n.active.fullnodes, full)
}
// If we are here, we have processed all inactive fullnodes and moved them
// to active, so clear the slice.
n.inactive.fullnodes = n.inactive.fullnodes[:0]
// Link all the nodes.
err := n.mn.LinkAll()
require.NoError(n.t, err)
// ---------------------
// MINERS
// ---------------------
// Create all inactive miners.
for i, m := range n.inactive.miners {
if n.bootstrapped {
// this is a miner created after genesis, so it won't have a preseal.
// we need to create it on chain.
params, aerr := actors.SerializeParams(&power2.CreateMinerParams{
Owner: m.OwnerKey.Address,
Worker: m.OwnerKey.Address,
SealProofType: n.options.proofType,
Peer: abi.PeerID(m.Libp2p.PeerID),
})
require.NoError(n.t, aerr)
createStorageMinerMsg := &types.Message{
From: m.OwnerKey.Address,
To: power.Address,
Value: big.Zero(),
Method: power.Methods.CreateMiner,
Params: params,
GasLimit: 0,
GasPremium: big.NewInt(5252),
}
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
require.NoError(n.t, err)
mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(n.t, err)
require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode)
var retval power2.CreateMinerReturn
err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return))
require.NoError(n.t, err, "failed to create miner")
m.ActorAddr = retval.IDAddress
}
has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address)
require.NoError(n.t, err)
// Only import the owner's full key into our companion full node, if we
// don't have it still.
if !has {
_, err = m.FullNode.WalletImport(ctx, &m.OwnerKey.KeyInfo)
require.NoError(n.t, err)
}
// // Set it as the default address.
// err = m.FullNode.WalletSetDefault(ctx, m.OwnerAddr.Address)
// require.NoError(n.t, err)
r := repo.NewMemory(nil)
lr, err := r.Lock(repo.StorageMiner)
require.NoError(n.t, err)
ks, err := lr.KeyStore()
require.NoError(n.t, err)
pk, err := m.Libp2p.PrivKey.Bytes()
require.NoError(n.t, err)
err = ks.Put("libp2p-host", types.KeyInfo{
Type: "libp2p-host",
PrivateKey: pk,
})
require.NoError(n.t, err)
ds, err := lr.Datastore(context.TODO(), "/metadata")
require.NoError(n.t, err)
err = ds.Put(datastore.NewKey("miner-address"), m.ActorAddr.Bytes())
require.NoError(n.t, err)
nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix))
for i := 0; i < m.options.sectors; i++ {
_, err := nic.Next()
require.NoError(n.t, err)
}
_, err = nic.Next()
require.NoError(n.t, err)
err = lr.Close()
require.NoError(n.t, err)
enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)})
require.NoError(n.t, err)
msg := &types.Message{
From: m.OwnerKey.Address,
To: m.ActorAddr,
Method: miner.Methods.ChangePeerID,
Params: enc,
Value: types.NewInt(0),
}
_, err = m.FullNode.MpoolPushMessage(ctx, msg, nil)
require.NoError(n.t, err)
var mineBlock = make(chan lotusminer.MineReq)
opts := []node.Option{
node.StorageMiner(&m.StorageMiner),
node.Online(),
node.Repo(r),
node.Test(),
node.MockHost(n.mn),
node.Override(new(v1api.FullNode), m.FullNode),
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
}
// append any node builder options.
opts = append(opts, m.options.extraNodeOpts...)
idAddr, err := address.IDFromAddress(m.ActorAddr)
require.NoError(n.t, err)
// preload preseals if the network still hasn't bootstrapped.
var presealSectors []abi.SectorID
if !n.bootstrapped {
sectors := n.genesis.miners[i].Sectors
for _, sector := range sectors {
presealSectors = append(presealSectors, abi.SectorID{
Miner: abi.ActorID(idAddr),
Number: sector.SectorID,
})
}
}
if n.options.mockProofs {
opts = append(opts,
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
return mock.NewMockSectorMgr(presealSectors), nil
}),
node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))),
node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))),
node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))),
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
node.Unset(new(*sectorstorage.Manager)),
)
}
// start node
stop, err := node.New(ctx, opts...)
require.NoError(n.t, err)
// using real proofs, therefore need real sectors.
if !n.bootstrapped && !n.options.mockProofs {
err := m.StorageAddLocal(ctx, m.PresealDir)
require.NoError(n.t, err)
}
n.t.Cleanup(func() { _ = stop(context.Background()) })
// Are we hitting this node through its RPC?
if m.options.rpc {
withRPC := minerRpc(n.t, m)
n.inactive.miners[i] = withRPC
}
mineOne := func(ctx context.Context, req lotusminer.MineReq) error {
select {
case mineBlock <- req:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
m.MineOne = mineOne
m.Stop = stop
n.active.miners = append(n.active.miners, m)
}
// If we are here, we have processed all inactive miners and moved them
// to active, so clear the slice.
n.inactive.miners = n.inactive.miners[:0]
// Link all the nodes.
err = n.mn.LinkAll()
require.NoError(n.t, err)
if !n.bootstrapped && len(n.active.miners) > 0 {
// We have *just* bootstrapped, so mine 2 blocks to setup some CE stuff in some actors
var wait sync.Mutex
wait.Lock()
observer := n.active.fullnodes[0]
bm := NewBlockMiner(n.t, n.active.miners[0])
n.t.Cleanup(bm.Stop)
bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
}
n.bootstrapped = true
return n
}
// InterconnectAll connects all miners and full nodes to one another.
func (n *Ensemble) InterconnectAll() *Ensemble {
// connect full nodes to miners.
for _, from := range n.active.fullnodes {
for _, to := range n.active.miners {
// []*TestMiner to []api.CommonAPI type coercion not possible
// so cannot use variadic form.
n.Connect(from, to)
}
}
// connect full nodes between each other, skipping ourselves.
last := len(n.active.fullnodes) - 1
for i, from := range n.active.fullnodes {
if i == last {
continue
}
for _, to := range n.active.fullnodes[i+1:] {
n.Connect(from, to)
}
}
return n
}
// Connect connects one full node to the provided full nodes.
func (n *Ensemble) Connect(from api.Common, to ...api.Common) *Ensemble {
addr, err := from.NetAddrsListen(context.Background())
require.NoError(n.t, err)
for _, other := range to {
err = other.NetConnect(context.Background(), addr)
require.NoError(n.t, err)
}
return n
}
// BeginMining kicks off mining for the specified miners. If nil or 0-length,
// it will kick off mining for all enrolled and active miners. It also adds a
// cleanup function to stop all mining operations on test teardown.
func (n *Ensemble) BeginMining(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner {
ctx := context.Background()
// wait one second to make sure that nodes are connected and have handshaken.
// TODO make this deterministic by listening to identify events on the
// libp2p eventbus instead (or something else).
time.Sleep(1 * time.Second)
var bms []*BlockMiner
if len(miners) == 0 {
miners = n.active.miners
}
for _, m := range miners {
bm := NewBlockMiner(n.t, m)
bm.MineBlocks(ctx, blocktime)
n.t.Cleanup(bm.Stop)
bms = append(bms, bm)
}
return bms
}
func (n *Ensemble) generateGenesis() *genesis.Template {
var verifRoot = gen.DefaultVerifregRootkeyActor
if k := n.options.verifiedRoot.key; k != nil {
verifRoot = genesis.Actor{
Type: genesis.TAccount,
Balance: n.options.verifiedRoot.initialBalance,
Meta: (&genesis.AccountMeta{Owner: k.Address}).ActorMeta(),
}
}
templ := &genesis.Template{
NetworkVersion: network.Version0,
Accounts: n.genesis.accounts,
Miners: n.genesis.miners,
NetworkName: "test",
Timestamp: uint64(time.Now().Unix() - int64(n.options.pastOffset.Seconds())),
VerifregRootKey: verifRoot,
RemainderAccount: gen.DefaultRemainderAccountActor,
}
return templ
}

View File

@ -0,0 +1,64 @@
package kit2
import (
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/wallet"
)
type EnsembleOpt func(opts *ensembleOpts) error
type genesisAccount struct {
key *wallet.Key
initialBalance abi.TokenAmount
}
type ensembleOpts struct {
pastOffset time.Duration
proofType abi.RegisteredSealProof
verifiedRoot genesisAccount
accounts []genesisAccount
mockProofs bool
}
var DefaultEnsembleOpts = ensembleOpts{
pastOffset: 100000 * time.Second, // time sufficiently in the past to trigger catch-up mining.
proofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
}
func ProofType(proofType abi.RegisteredSealProof) EnsembleOpt {
return func(opts *ensembleOpts) error {
opts.proofType = proofType
return nil
}
}
// MockProofs activates mock proofs for the entire ensemble.
func MockProofs() EnsembleOpt {
return func(opts *ensembleOpts) error {
opts.mockProofs = true
return nil
}
}
// RootVerifier specifies the key to be enlisted as the verified registry root,
// as well as the initial balance to be attributed during genesis.
func RootVerifier(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt {
return func(opts *ensembleOpts) error {
opts.verifiedRoot.key = key
opts.verifiedRoot.initialBalance = balance
return nil
}
}
// Account sets up an account at genesis with the specified key and balance.
func Account(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt {
return func(opts *ensembleOpts) error {
opts.accounts = append(opts.accounts, genesisAccount{
key: key,
initialBalance: balance,
})
return nil
}
}

View File

@ -0,0 +1,70 @@
package kit2
import "testing"
// EnsembleMinimal creates and starts an Ensemble with a single full node and a single miner.
// It does not interconnect nodes nor does it begin mining.
//
// This function supports passing both ensemble and node functional options.
// Functional options are applied to all nodes.
func EnsembleMinimal(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *Ensemble) {
eopts, nopts := siftOptions(t, opts)
var (
full TestFullNode
miner TestMiner
)
ens := NewEnsemble(t, eopts...).FullNode(&full, nopts...).Miner(&miner, &full, nopts...).Start()
return &full, &miner, ens
}
// EnsembleTwoOne creates and starts an Ensemble with two full nodes and one miner.
// It does not interconnect nodes nor does it begin mining.
//
// This function supports passing both ensemble and node functional options.
// Functional options are applied to all nodes.
func EnsembleTwoOne(t *testing.T, opts ...interface{}) (*TestFullNode, *TestFullNode, *TestMiner, *Ensemble) {
eopts, nopts := siftOptions(t, opts)
var (
one, two TestFullNode
miner TestMiner
)
ens := NewEnsemble(t, eopts...).FullNode(&one, nopts...).FullNode(&two, nopts...).Miner(&miner, &one, nopts...).Start()
return &one, &two, &miner, ens
}
// EnsembleOneTwo creates and starts an Ensemble with one full node and two miners.
// It does not interconnect nodes nor does it begin mining.
//
// This function supports passing both ensemble and node functional options.
// Functional options are applied to all nodes.
func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) {
eopts, nopts := siftOptions(t, opts)
var (
full TestFullNode
one, two TestMiner
)
ens := NewEnsemble(t, eopts...).
FullNode(&full, nopts...).
Miner(&one, &full, nopts...).
Miner(&two, &full, nopts...).
Start()
return &full, &one, &two, ens
}
func siftOptions(t *testing.T, opts []interface{}) (eopts []EnsembleOpt, nopts []NodeOpt) {
for _, v := range opts {
switch o := v.(type) {
case EnsembleOpt:
eopts = append(eopts, o)
case NodeOpt:
nopts = append(nopts, o)
default:
t.Fatalf("invalid option type: %T", o)
}
}
return eopts, nopts
}

58
itests/kit2/files.go Normal file
View File

@ -0,0 +1,58 @@
package kit2
import (
"bytes"
"io"
"math/rand"
"os"
"testing"
"github.com/minio/blake2b-simd"
"github.com/stretchr/testify/require"
)
// CreateRandomFile creates a random file with the provided seed and the
// provided size.
func CreateRandomFile(t *testing.T, rseed, size int) (path string) {
if size == 0 {
size = 1600
}
source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size))
file, err := os.CreateTemp(t.TempDir(), "sourcefile.dat")
require.NoError(t, err)
n, err := io.Copy(file, source)
require.NoError(t, err)
require.EqualValues(t, n, size)
return file.Name()
}
// AssertFilesEqual compares two files by blake2b hash equality and
// fails the test if unequal.
func AssertFilesEqual(t *testing.T, left, right string) {
// initialize hashes.
leftH, rightH := blake2b.New256(), blake2b.New256()
// open files.
leftF, err := os.Open(left)
require.NoError(t, err)
rightF, err := os.Open(right)
require.NoError(t, err)
// feed hash functions.
_, err = io.Copy(leftH, leftF)
require.NoError(t, err)
_, err = io.Copy(rightH, rightF)
require.NoError(t, err)
// compute digests.
leftD, rightD := leftH.Sum(nil), rightH.Sum(nil)
require.True(t, bytes.Equal(leftD, rightD))
}

34
itests/kit2/funds.go Normal file
View File

@ -0,0 +1,34 @@
package kit2
import (
"context"
"testing"
"github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
)
// SendFunds sends funds from the default wallet of the specified sender node
// to the recipient address.
func SendFunds(ctx context.Context, t *testing.T, sender TestFullNode, recipient address.Address, amount abi.TokenAmount) {
senderAddr, err := sender.WalletDefaultAddress(ctx)
require.NoError(t, err)
msg := &types.Message{
From: senderAddr,
To: recipient,
Value: amount,
}
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode, "did not successfully send funds")
}

29
itests/kit2/init.go Normal file
View File

@ -0,0 +1,29 @@
package kit2
import (
"fmt"
"os"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
logging "github.com/ipfs/go-log/v2"
)
func init() {
_ = logging.SetLogLevel("*", "INFO")
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
build.InsecurePoStValidation = true
if err := os.Setenv("BELLMAN_NO_GPU", "1"); err != nil {
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
}
if err := os.Setenv("LOTUS_DISABLE_WATCHDOG", "1"); err != nil {
panic(fmt.Sprintf("failed to set LOTUS_DISABLE_WATCHDOG env variable: %s", err))
}
}

19
itests/kit2/log.go Normal file
View File

@ -0,0 +1,19 @@
package kit2
import (
"github.com/filecoin-project/lotus/lib/lotuslog"
logging "github.com/ipfs/go-log/v2"
)
func QuietMiningLogs() {
lotuslog.SetupLogLevels()
_ = logging.SetLogLevel("miner", "ERROR") // set this to INFO to watch mining happen.
_ = logging.SetLogLevel("chainstore", "ERROR")
_ = logging.SetLogLevel("chain", "ERROR")
_ = logging.SetLogLevel("sub", "ERROR")
_ = logging.SetLogLevel("storageminer", "ERROR")
_ = logging.SetLogLevel("pubsub", "ERROR")
_ = logging.SetLogLevel("gen", "ERROR")
_ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR")
}

141
itests/kit2/mockcli.go Normal file
View File

@ -0,0 +1,141 @@
package kit2
import (
"bytes"
"context"
"flag"
"strings"
"testing"
"github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
lcli "github.com/urfave/cli/v2"
)
type MockCLI struct {
t *testing.T
cmds []*lcli.Command
cctx *lcli.Context
out *bytes.Buffer
}
func NewMockCLI(ctx context.Context, t *testing.T, cmds []*lcli.Command) *MockCLI {
// Create a CLI App with an --api-url flag so that we can specify which node
// the command should be executed against
app := &lcli.App{
Flags: []lcli.Flag{
&lcli.StringFlag{
Name: "api-url",
Hidden: true,
},
},
Commands: cmds,
}
var out bytes.Buffer
app.Writer = &out
app.Setup()
cctx := lcli.NewContext(app, &flag.FlagSet{}, nil)
cctx.Context = ctx
return &MockCLI{t: t, cmds: cmds, cctx: cctx, out: &out}
}
func (c *MockCLI) Client(addr multiaddr.Multiaddr) *MockCLIClient {
return &MockCLIClient{t: c.t, cmds: c.cmds, addr: addr, cctx: c.cctx, out: c.out}
}
// MockCLIClient runs commands against a particular node
type MockCLIClient struct {
t *testing.T
cmds []*lcli.Command
addr multiaddr.Multiaddr
cctx *lcli.Context
out *bytes.Buffer
}
func (c *MockCLIClient) RunCmd(input ...string) string {
out, err := c.RunCmdRaw(input...)
require.NoError(c.t, err, "output:\n%s", out)
return out
}
// Given an input, find the corresponding command or sub-command.
// eg "paych add-funds"
func (c *MockCLIClient) cmdByNameSub(input []string) (*lcli.Command, []string) {
name := input[0]
for _, cmd := range c.cmds {
if cmd.Name == name {
return c.findSubcommand(cmd, input[1:])
}
}
return nil, []string{}
}
func (c *MockCLIClient) findSubcommand(cmd *lcli.Command, input []string) (*lcli.Command, []string) {
// If there are no sub-commands, return the current command
if len(cmd.Subcommands) == 0 {
return cmd, input
}
// Check each sub-command for a match against the name
subName := input[0]
for _, subCmd := range cmd.Subcommands {
if subCmd.Name == subName {
// Found a match, recursively search for sub-commands
return c.findSubcommand(subCmd, input[1:])
}
}
return nil, []string{}
}
func (c *MockCLIClient) RunCmdRaw(input ...string) (string, error) {
cmd, input := c.cmdByNameSub(input)
if cmd == nil {
panic("Could not find command " + input[0] + " " + input[1])
}
// prepend --api-url=<node api listener address>
apiFlag := "--api-url=" + c.addr.String()
input = append([]string{apiFlag}, input...)
fs := c.flagSet(cmd)
err := fs.Parse(input)
require.NoError(c.t, err)
err = cmd.Action(lcli.NewContext(c.cctx.App, fs, c.cctx))
// Get the output
str := strings.TrimSpace(c.out.String())
c.out.Reset()
return str, err
}
func (c *MockCLIClient) flagSet(cmd *lcli.Command) *flag.FlagSet {
// Apply app level flags (so we can process --api-url flag)
fs := &flag.FlagSet{}
for _, f := range c.cctx.App.Flags {
err := f.Apply(fs)
if err != nil {
c.t.Fatal(err)
}
}
// Apply command level flags
for _, f := range cmd.Flags {
err := f.Apply(fs)
if err != nil {
c.t.Fatal(err)
}
}
return fs
}
func (c *MockCLIClient) RunInteractiveCmd(cmd []string, interactive []string) string {
c.toStdin(strings.Join(interactive, "\n") + "\n")
return c.RunCmd(cmd...)
}
func (c *MockCLIClient) toStdin(s string) {
c.cctx.App.Metadata["stdin"] = bytes.NewBufferString(s)
}

35
itests/kit2/node_full.go Normal file
View File

@ -0,0 +1,35 @@
package kit2
import (
"context"
"testing"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
)
// TestFullNode represents a full node enrolled in an Ensemble.
type TestFullNode struct {
v1api.FullNode
t *testing.T
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node.
ListenAddr multiaddr.Multiaddr
DefaultKey *wallet.Key
options nodeOpts
}
// CreateImportFile creates a random file with the specified seed and size, and
// imports it into the full node.
func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, path string) {
path = CreateRandomFile(f.t, rseed, size)
res, err := f.ClientImport(ctx, api.FileRef{Path: path})
require.NoError(f.t, err)
return res, path
}

121
itests/kit2/node_miner.go Normal file
View File

@ -0,0 +1,121 @@
package kit2
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/wallet"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/miner"
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
)
// TestMiner represents a miner enrolled in an Ensemble.
type TestMiner struct {
api.StorageMiner
t *testing.T
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
ActorAddr address.Address
OwnerKey *wallet.Key
MineOne func(context.Context, miner.MineReq) error
Stop func(context.Context) error
FullNode *TestFullNode
PresealDir string
Libp2p struct {
PeerID peer.ID
PrivKey libp2pcrypto.PrivKey
}
options nodeOpts
}
func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNotif <-chan struct{}) {
toCheck := tm.StartPledge(ctx, n, existing, blockNotif)
for len(toCheck) > 0 {
tm.FlushSealingBatches(ctx)
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := tm.StorageMiner.SectorsStatus(ctx, n, false)
require.NoError(tm.t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
tm.t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
}
func (tm *TestMiner) StartPledge(ctx context.Context, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} {
for i := 0; i < n; i++ {
if i%3 == 0 && blockNotif != nil {
<-blockNotif
tm.t.Log("WAIT")
}
tm.t.Logf("PLEDGING %d", i)
_, err := tm.StorageMiner.PledgeSector(ctx)
require.NoError(tm.t, err)
}
for {
s, err := tm.StorageMiner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
require.NoError(tm.t, err)
fmt.Printf("Sectors: %d\n", len(s))
if len(s) >= n+existing {
break
}
build.Clock.Sleep(100 * time.Millisecond)
}
fmt.Printf("All sectors is fsm\n")
s, err := tm.StorageMiner.SectorsList(ctx)
require.NoError(tm.t, err)
toCheck := map[abi.SectorNumber]struct{}{}
for _, number := range s {
toCheck[number] = struct{}{}
}
return toCheck
}
func (tm *TestMiner) FlushSealingBatches(ctx context.Context) {
pcb, err := tm.StorageMiner.SectorPreCommitFlush(ctx)
require.NoError(tm.t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
}
cb, err := tm.StorageMiner.SectorCommitFlush(ctx)
require.NoError(tm.t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %+v\n", cb)
}
}

89
itests/kit2/node_opts.go Normal file
View File

@ -0,0 +1,89 @@
package kit2
import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/lotus/node"
)
// DefaultPresealsPerBootstrapMiner is the number of preseals that every
// bootstrap miner has by default. It can be overridden through the
// PresealSectors option.
const DefaultPresealsPerBootstrapMiner = 2
// nodeOpts is an options accumulating struct, where functional options are
// merged into.
type nodeOpts struct {
balance abi.TokenAmount
lite bool
sectors int
rpc bool
ownerKey *wallet.Key
extraNodeOpts []node.Option
}
// DefaultNodeOpts are the default options that will be applied to test nodes.
var DefaultNodeOpts = nodeOpts{
balance: big.Mul(big.NewInt(100000000), types.NewInt(build.FilecoinPrecision)),
sectors: DefaultPresealsPerBootstrapMiner,
}
// NodeOpt is a functional option for test nodes.
type NodeOpt func(opts *nodeOpts) error
// OwnerBalance specifies the balance to be attributed to a miner's owner
// account. Only relevant when creating a miner.
func OwnerBalance(balance abi.TokenAmount) NodeOpt {
return func(opts *nodeOpts) error {
opts.balance = balance
return nil
}
}
// LiteNode specifies that this node will be a lite node. Only relevant when
// creating a fullnode.
func LiteNode() NodeOpt {
return func(opts *nodeOpts) error {
opts.lite = true
return nil
}
}
// PresealSectors specifies the amount of preseal sectors to give to a miner
// at genesis. Only relevant when creating a miner.
func PresealSectors(sectors int) NodeOpt {
return func(opts *nodeOpts) error {
opts.sectors = sectors
return nil
}
}
// ThroughRPC makes interactions with this node throughout the test flow through
// the JSON-RPC API.
func ThroughRPC() NodeOpt {
return func(opts *nodeOpts) error {
opts.rpc = true
return nil
}
}
// OwnerAddr sets the owner address of a miner. Only relevant when creating
// a miner.
func OwnerAddr(wk *wallet.Key) NodeOpt {
return func(opts *nodeOpts) error {
opts.ownerKey = wk
return nil
}
}
// ConstructorOpts are Lotus node constructor options that are passed as-is to
// the node.
func ConstructorOpts(extra ...node.Option) NodeOpt {
return func(opts *nodeOpts) error {
opts.extraNodeOpts = extra
return nil
}
}

View File

@ -0,0 +1,93 @@
package kit2
import (
"context"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node"
"github.com/ipfs/go-cid"
)
// DefaultTestUpgradeSchedule
var DefaultTestUpgradeSchedule = stmgr.UpgradeSchedule{{
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 4,
Migration: stmgr.UpgradeActorsV5,
}}
func LatestActorsAt(upgradeHeight abi.ChainEpoch) node.Option {
// Attention: Update this when introducing new actor versions or your tests will be sad
return NetworkUpgradeAt(network.Version13, upgradeHeight)
}
// InstantaneousNetworkVersion starts the network instantaneously at the
// specified version in height 1.
func InstantaneousNetworkVersion(version network.Version) node.Option {
// composes all migration functions
var mf stmgr.MigrationFunc = func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error) {
var state = oldState
for _, u := range DefaultTestUpgradeSchedule {
if u.Network > version {
break
}
state, err = u.Migration(ctx, sm, cache, cb, state, height, ts)
if err != nil {
return cid.Undef, err
}
}
return state, nil
}
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{
{Network: version, Height: 1, Migration: mf},
})
}
func NetworkUpgradeAt(version network.Version, upgradeHeight abi.ChainEpoch) node.Option {
fullSchedule := stmgr.UpgradeSchedule{}
schedule := stmgr.UpgradeSchedule{}
for _, upgrade := range fullSchedule {
if upgrade.Network > version {
break
}
schedule = append(schedule, upgrade)
}
if upgradeHeight > 0 {
schedule[len(schedule)-1].Height = upgradeHeight
}
return node.Override(new(stmgr.UpgradeSchedule), schedule)
}
func SDRUpgradeAt(calico, persian abi.ChainEpoch) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version6,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version7,
Height: calico,
Migration: stmgr.UpgradeCalico,
}, {
Network: network.Version8,
Height: persian,
}})
}

53
itests/kit2/rpc.go Normal file
View File

@ -0,0 +1,53 @@
package kit2
import (
"context"
"net/http"
"net/http/httptest"
"testing"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/node"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/stretchr/testify/require"
)
func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) {
testServ := httptest.NewServer(handler)
t.Cleanup(testServ.Close)
t.Cleanup(testServ.CloseClientConnections)
addr := testServ.Listener.Addr()
maddr, err := manet.FromNetAddr(addr)
require.NoError(t, err)
return testServ, maddr
}
func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode {
handler, err := node.FullNodeHandler(f.FullNode, false)
require.NoError(t, err)
srv, maddr := CreateRPCServer(t, handler)
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
require.NoError(t, err)
t.Cleanup(stop)
f.ListenAddr, f.FullNode = maddr, cl
return f
}
func minerRpc(t *testing.T, m *TestMiner) *TestMiner {
handler, err := node.MinerHandler(m.StorageMiner, false)
require.NoError(t, err)
srv, maddr := CreateRPCServer(t, handler)
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil)
require.NoError(t, err)
t.Cleanup(stop)
m.ListenAddr, m.StorageMiner = maddr, cl
return m
}

View File

@ -2,14 +2,13 @@ package itests
import (
"context"
"fmt"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
cbor "github.com/ipfs/go-ipld-cbor"
@ -24,69 +23,52 @@ import (
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/events/state"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit2"
)
func TestPaymentChannelsAPI(t *testing.T) {
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
ctx := context.Background()
n, sn := kit.MockMinerBuilder(t, kit.TwoFull, kit.OneMiner)
blockTime := 5 * time.Millisecond
paymentCreator := n[0]
paymentReceiver := n[1]
miner := sn[0]
var (
paymentCreator kit2.TestFullNode
paymentReceiver kit2.TestFullNode
miner kit2.TestMiner
)
// get everyone connected
addrs, err := paymentCreator.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := paymentReceiver.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
// start mining blocks
bm := kit.NewBlockMiner(t, miner)
bm.MineBlocks(ctx, 5*time.Millisecond)
t.Cleanup(bm.Stop)
ens := kit2.NewEnsemble(t, kit2.MockProofs()).
FullNode(&paymentCreator).
FullNode(&paymentReceiver).
Miner(&miner, &paymentCreator).
Start().
InterconnectAll()
bms := ens.BeginMining(blockTime)
bm := bms[0]
// send some funds to register the receiver
receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
kit.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
kit2.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
// setup the payment channel
createrAddr, err := paymentCreator.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
channelAmt := int64(7000)
channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt))
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
// allocate three lanes
var lanes []uint64
for i := 0; i < 3; i++ {
lane, err := paymentCreator.PaychAllocateLane(ctx, channel)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
lanes = append(lanes, lane)
}
@ -95,45 +77,28 @@ func TestPaymentChannelsAPI(t *testing.T) {
// supersedes the voucher with a value of 1000
for _, lane := range lanes {
vouch1, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), lane)
if err != nil {
t.Fatal(err)
}
if vouch1.Voucher == nil {
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch1.Shortfall))
}
require.NoError(t, err)
require.NotNil(t, vouch1.Voucher, "Not enough funds to create voucher: missing %d", vouch1.Shortfall)
vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane)
if err != nil {
t.Fatal(err)
}
if vouch2.Voucher == nil {
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch2.Shortfall))
}
require.NoError(t, err)
require.NotNil(t, vouch2.Voucher, "Not enough funds to create voucher: missing %d", vouch2.Shortfall)
delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1.Voucher, nil, abi.NewTokenAmount(1000))
if err != nil {
t.Fatal(err)
}
if !delta1.Equals(abi.NewTokenAmount(1000)) {
t.Fatal("voucher didn't have the right amount")
}
require.NoError(t, err)
require.EqualValues(t, abi.NewTokenAmount(1000), delta1, "voucher didn't have the right amount")
delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2.Voucher, nil, abi.NewTokenAmount(1000))
if err != nil {
t.Fatal(err)
}
if !delta2.Equals(abi.NewTokenAmount(1000)) {
t.Fatal("voucher didn't have the right amount")
}
require.NoError(t, err)
require.EqualValues(t, abi.NewTokenAmount(1000), delta2, "voucher didn't have the right amount")
}
// settle the payment channel
settleMsgCid, err := paymentCreator.PaychSettle(ctx, channel)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle")
if res.Receipt.ExitCode != 0 {
t.Fatal("Unable to settle payment channel")
}
require.EqualValues(t, 0, res.Receipt.ExitCode, "Unable to settle payment channel")
creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(paymentCreator)))
@ -170,9 +135,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
}, int(build.MessageConfidence)+1, build.Finality, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
select {
case <-finished:
@ -182,75 +145,49 @@ func TestPaymentChannelsAPI(t *testing.T) {
// Create a new voucher now that some vouchers have already been submitted
vouchRes, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), 3)
if err != nil {
t.Fatal(err)
}
if vouchRes.Voucher == nil {
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouchRes.Shortfall))
}
require.NoError(t, err)
require.NotNil(t, vouchRes.Voucher, "Not enough funds to create voucher: missing %d", vouchRes.Shortfall)
vdelta, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouchRes.Voucher, nil, abi.NewTokenAmount(1000))
if err != nil {
t.Fatal(err)
}
if !vdelta.Equals(abi.NewTokenAmount(1000)) {
t.Fatal("voucher didn't have the right amount")
}
require.NoError(t, err)
require.EqualValues(t, abi.NewTokenAmount(1000), vdelta, "voucher didn't have the right amount")
// Create a new voucher whose value would exceed the channel balance
excessAmt := abi.NewTokenAmount(1000)
vouchRes, err = paymentCreator.PaychVoucherCreate(ctx, channel, excessAmt, 4)
if err != nil {
t.Fatal(err)
}
if vouchRes.Voucher != nil {
t.Fatal("Expected not to be able to create voucher whose value would exceed channel balance")
}
if !vouchRes.Shortfall.Equals(excessAmt) {
t.Fatal(fmt.Errorf("Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall))
}
require.NoError(t, err)
require.Nil(t, vouchRes.Voucher, "Expected not to be able to create voucher whose value would exceed channel balance")
require.EqualValues(t, excessAmt, vouchRes.Shortfall, "Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall)
// Add a voucher whose value would exceed the channel balance
vouch := &paych.SignedVoucher{ChannelAddr: channel, Amount: excessAmt, Lane: 4, Nonce: 1}
vb, err := vouch.SigningBytes()
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
sig, err := paymentCreator.WalletSign(ctx, createrAddr, vb)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
vouch.Signature = sig
_, err = paymentReceiver.PaychVoucherAdd(ctx, channel, vouch, nil, abi.NewTokenAmount(1000))
if err == nil {
t.Fatal(fmt.Errorf("Expected shortfall error of %d", excessAmt))
}
require.Errorf(t, err, "Expected shortfall error of %d", excessAmt)
// wait for the settlement period to pass before collecting
waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, policy.PaychSettleDelay)
creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
// collect funds (from receiver, though either party can do it)
collectMsg, err := paymentReceiver.PaychCollect(ctx, channel)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("unable to collect on payment channel")
}
require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel")
// Finally, check the balance for the creator
currentCreatorBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
// The highest nonce voucher that the creator sent on each lane is 2000
totalVouchers := int64(len(lanes) * 2000)
@ -260,15 +197,10 @@ func TestPaymentChannelsAPI(t *testing.T) {
// channel amount - total voucher value
expectedRefund := channelAmt - totalVouchers
delta := big.Sub(currentCreatorBalance, creatorPreCollectBalance)
if !delta.Equals(abi.NewTokenAmount(expectedRefund)) {
t.Fatalf("did not send correct funds from creator: expected %d, got %d", expectedRefund, delta)
}
// shut down mining
bm.Stop()
require.EqualValues(t, abi.NewTokenAmount(expectedRefund), delta, "did not send correct funds from creator: expected %d, got %d", expectedRefund, delta)
}
func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymentReceiver kit.TestFullNode, receiverAddr address.Address, count int) {
func waitForBlocks(ctx context.Context, t *testing.T, bm *kit2.BlockMiner, paymentReceiver kit2.TestFullNode, receiverAddr address.Address, count int) {
// We need to add null blocks in batches, if we add too many the chain can't sync
batchSize := 60
for i := 0; i < count; i += batchSize {
@ -286,30 +218,23 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymen
From: receiverAddr,
Value: types.NewInt(0),
}, nil)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
_, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1, api.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
}
}
func waitForMessage(ctx context.Context, t *testing.T, paymentCreator kit.TestFullNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup {
func waitForMessage(ctx context.Context, t *testing.T, paymentCreator kit2.TestFullNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup {
ctx, cancel := context.WithTimeout(ctx, duration)
defer cancel()
fmt.Println("Waiting for", desc)
t.Log("Waiting for", desc)
res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1, api.LookbackNoLimit, true)
if err != nil {
fmt.Println("Error waiting for", desc, err)
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatalf("did not successfully send %s", desc)
}
fmt.Println("Confirmed", desc)
require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode, "did not successfully send %s", desc)
t.Log("Confirmed", desc)
return res
}

View File

@ -11,7 +11,7 @@ import (
"time"
"github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/itests/kit2"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
@ -37,18 +37,19 @@ func init() {
// commands
func TestPaymentChannelsBasic(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
paymentReceiver := nodes[1]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
var (
paymentCreator kit2.TestFullNode
paymentReceiver kit2.TestFullNode
)
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
// Create mock CLI
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
mockCLI := kit2.NewMockCLI(ctx, t, cli.Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
@ -70,12 +71,16 @@ func TestPaymentChannelsBasic(t *testing.T) {
// creator: paych settle <channel>
creatorCLI.RunCmd("paych", "settle", chAddr.String())
t.Log("wait for chain to reach settle height")
// Wait for the chain to reach the settle height
chState := getPaychState(ctx, t, paymentReceiver, chAddr)
sa, err := chState.SettlingAt()
require.NoError(t, err)
waitForHeight(ctx, t, paymentReceiver, sa)
t.Log("settle height reached")
// receiver: paych collect <channel>
receiverCLI.RunCmd("paych", "collect", chAddr.String())
}
@ -89,17 +94,18 @@ type voucherSpec struct {
// TestPaymentChannelStatus tests the payment channel status CLI command
func TestPaymentChannelStatus(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
var (
paymentCreator kit2.TestFullNode
paymentReceiver kit2.TestFullNode
)
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
// Create mock CLI
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
mockCLI := kit2.NewMockCLI(ctx, t, cli.Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
// creator: paych status-by-from-to <creator> <receiver>
@ -168,18 +174,18 @@ func TestPaymentChannelStatus(t *testing.T) {
// channel voucher commands
func TestPaymentChannelVouchers(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
paymentReceiver := nodes[1]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
var (
paymentCreator kit2.TestFullNode
paymentReceiver kit2.TestFullNode
)
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
// Create mock CLI
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
mockCLI := kit2.NewMockCLI(ctx, t, cli.Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr)
@ -300,17 +306,18 @@ func TestPaymentChannelVouchers(t *testing.T) {
// is greater than what's left in the channel, voucher create fails
func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
var (
paymentCreator kit2.TestFullNode
paymentReceiver kit2.TestFullNode
)
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
// Create mock CLI
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
mockCLI := kit2.NewMockCLI(ctx, t, cli.Commands)
creatorCLI := mockCLI.Client(paymentCreator.ListenAddr)
// creator: paych add-funds <creator> <receiver> <amount>
@ -378,7 +385,7 @@ func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) {
}
// waitForHeight waits for the node to reach the given chain epoch
func waitForHeight(ctx context.Context, t *testing.T, node kit.TestFullNode, height abi.ChainEpoch) {
func waitForHeight(ctx context.Context, t *testing.T, node kit2.TestFullNode, height abi.ChainEpoch) {
atHeight := make(chan struct{})
chainEvents := events.NewEvents(ctx, node)
err := chainEvents.ChainAt(func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error {
@ -396,7 +403,7 @@ func waitForHeight(ctx context.Context, t *testing.T, node kit.TestFullNode, hei
}
// getPaychState gets the state of the payment channel with the given address
func getPaychState(ctx context.Context, t *testing.T, node kit.TestFullNode, chAddr address.Address) paych.State {
func getPaychState(ctx context.Context, t *testing.T, node kit2.TestFullNode, chAddr address.Address) paych.State {
act, err := node.StateGetActor(ctx, chAddr, types.EmptyTSK)
require.NoError(t, err)
@ -406,3 +413,25 @@ func getPaychState(ctx context.Context, t *testing.T, node kit.TestFullNode, chA
return chState
}
func startPaychCreatorReceiverMiner(ctx context.Context, t *testing.T, paymentCreator *kit2.TestFullNode, paymentReceiver *kit2.TestFullNode, blocktime time.Duration) (address.Address, address.Address) {
var miner kit2.TestMiner
opts := kit2.ThroughRPC()
kit2.NewEnsemble(t, kit2.MockProofs()).
FullNode(paymentCreator, opts).
FullNode(paymentReceiver, opts).
Miner(&miner, paymentCreator).
Start().
InterconnectAll().
BeginMining(blocktime)
// Send some funds to the second node
receiverAddr, err := paymentReceiver.WalletDefaultAddress(ctx)
require.NoError(t, err)
kit2.SendFunds(ctx, t, *paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
// Get the first node's address
creatorAddr, err := paymentCreator.WalletDefaultAddress(ctx)
require.NoError(t, err)
return creatorAddr, receiverAddr
}

View File

@ -10,15 +10,14 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/itests/kit2"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSDRUpgrade(t *testing.T) {
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
// oldDelay := policy.GetPreCommitChallengeDelay()
// policy.SetPreCommitChallengeDelay(5)
@ -31,18 +30,10 @@ func TestSDRUpgrade(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithSDRAt(500, 1000)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
opts := kit2.ConstructorOpts(kit2.SDRUpgradeAt(500, 1000))
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), opts)
ens.InterconnectAll()
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
pledge := make(chan struct{})
@ -53,7 +44,7 @@ func TestSDRUpgrade(t *testing.T) {
round := 0
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
if err := miner.MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
@ -88,7 +79,7 @@ func TestSDRUpgrade(t *testing.T) {
}()
// before.
kit.PledgeSectors(t, ctx, miner, 9, 0, pledge)
miner.PledgeSectors(ctx, 9, 0, pledge)
s, err := miner.SectorsList(ctx)
require.NoError(t, err)

View File

@ -2,7 +2,6 @@ package itests
import (
"context"
"fmt"
"testing"
"time"
@ -11,24 +10,23 @@ import (
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/itests/kit2"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/require"
)
func TestTapeFix(t *testing.T) {
kit.QuietMiningLogs()
kit2.QuietMiningLogs()
var blocktime = 2 * time.Millisecond
// The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case
// TODO: Make the mock sector size configurable and reenable this
// t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
t.Run("after", func(t *testing.T) { testTapeFix(t, kit.MockMinerBuilder, blocktime, true) })
t.Run("after", func(t *testing.T) { testTapeFix(t, blocktime, true) })
}
func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after bool) {
func testTapeFix(t *testing.T, blocktime time.Duration, after bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -44,46 +42,14 @@ func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after
})
}
n, sn := b(t, []kit.FullNodeOpts{{Opts: func(_ []kit.TestFullNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule)
}}}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
nopts := kit2.ConstructorOpts(node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule))
_, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), nopts)
ens.InterconnectAll().BeginMining(blocktime)
sid, err := miner.PledgeSector(ctx)
require.NoError(t, err)
fmt.Printf("All sectors is fsm\n")
t.Log("All sectors is fsm")
// If before, we expect the precommit to fail
successState := api.SectorState(sealing.CommitFailed)
@ -101,6 +67,6 @@ func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after
}
require.NotEqual(t, failureState, st.State)
build.Clock.Sleep(100 * time.Millisecond)
fmt.Println("WaitSeal")
t.Log("WaitSeal")
}
}

View File

@ -2,122 +2,127 @@ package itests
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/chain/wallet"
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
"github.com/stretchr/testify/require"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/node/impl"
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit2"
"github.com/filecoin-project/lotus/node/impl"
)
func TestVerifiedClientTopUp(t *testing.T) {
blockTime := 100 * time.Millisecond
test := func(nv network.Version, shouldWork bool) func(*testing.T) {
return func(t *testing.T) {
nodes, miners := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithNetworkUpgradeAt(nv, -1)}, kit.OneMiner)
api := nodes[0].FullNode.(*impl.FullNodeAPI)
rootKey, err := wallet.GenerateKey(types.KTSecp256k1)
require.NoError(t, err)
verifierKey, err := wallet.GenerateKey(types.KTSecp256k1)
require.NoError(t, err)
verifiedClientKey, err := wallet.GenerateKey(types.KTBLS)
require.NoError(t, err)
bal, err := types.ParseFIL("100fil")
require.NoError(t, err)
node, _, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(),
kit2.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())),
kit2.Account(verifierKey, abi.NewTokenAmount(bal.Int64())), // assign some balance to the verifier so they can send an AddClient message.
kit2.ConstructorOpts(kit2.InstantaneousNetworkVersion(nv)))
ens.InterconnectAll().BeginMining(blockTime)
api := node.FullNode.(*impl.FullNodeAPI)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
//Get VRH
// get VRH
vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
if err != nil {
t.Fatal(err)
}
fmt.Println(vrh.String())
require.NoError(t, err)
//Add verifier
verifier, err := api.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
// import the root key.
rootAddr, err := api.WalletImport(ctx, &rootKey.KeyInfo)
require.NoError(t, err)
// import the verifier's key.
verifierAddr, err := api.WalletImport(ctx, &verifierKey.KeyInfo)
require.NoError(t, err)
// import the verified client's key.
verifiedClientAddr, err := api.WalletImport(ctx, &verifiedClientKey.KeyInfo)
require.NoError(t, err)
params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifierAddr, Allowance: big.NewInt(100000000000)})
require.NoError(t, err)
params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifier, Allowance: big.NewInt(100000000000)})
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
From: rootAddr,
To: verifreg.Address,
From: vrh,
Method: verifreg.Methods.AddVerifier,
Params: params,
Value: big.Zero(),
}
bm := kit.NewBlockMiner(t, miners[0])
bm.MineBlocks(ctx, 100*time.Millisecond)
t.Cleanup(bm.Stop)
sm, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal("AddVerifier failed: ", err)
}
require.NoError(t, err, "AddVerifier failed")
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode)
//Assign datacap to a client
// assign datacap to a client
datacap := big.NewInt(10000)
clientAddress, err := api.WalletNew(ctx, types.KTBLS)
if err != nil {
t.Fatal(err)
}
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
if err != nil {
t.Fatal(err)
}
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap})
require.NoError(t, err)
msg = &types.Message{
From: verifierAddr,
To: verifreg.Address,
From: verifier,
Method: verifreg.Methods.AddVerifiedClient,
Params: params,
Value: big.Zero(),
}
sm, err = api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal("AddVerifiedClient faield: ", err)
}
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
require.NoError(t, err)
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode)
// check datacap balance
dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
require.NoError(t, err)
//check datacap balance
dcap, err := api.StateVerifiedClientStatus(ctx, clientAddress, types.EmptyTSK)
if err != nil {
t.Fatal(err)
}
if !dcap.Equals(datacap) {
t.Fatal("")
}
//try to assign datacap to the same client should fail for actor v4 and below
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
// try to assign datacap to the same client should fail for actor v4 and below
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap})
if err != nil {
t.Fatal(err)
}
msg = &types.Message{
From: verifierAddr,
To: verifreg.Address,
From: verifier,
Method: verifreg.Methods.AddVerifiedClient,
Params: params,
Value: big.Zero(),