move new kit into kit2, re-enable unmigrated tests against kit1.
This commit is contained in:
parent
8b037e2da3
commit
e84b8ab3a0
@ -40,7 +40,7 @@ func TestMinerAllInfo(t *testing.T) {
|
|||||||
policy.SetPreCommitChallengeDelay(oldDelay)
|
policy.SetPreCommitChallengeDelay(oldDelay)
|
||||||
})
|
})
|
||||||
|
|
||||||
n, sn := kit.FullNodeBuilder(t, kit.OneFull, kit.OneMiner)
|
n, sn := kit.Builder(t, kit.OneFull, kit.OneMiner)
|
||||||
client, miner := n[0].FullNode, sn[0]
|
client, miner := n[0].FullNode, sn[0]
|
||||||
kit.ConnectAndStartMining(t, time.Second, miner, client.(*impl.FullNodeAPI))
|
kit.ConnectAndStartMining(t, time.Second, miner, client.(*impl.FullNodeAPI))
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ func TestMinerAllInfo(t *testing.T) {
|
|||||||
t.Run("pre-info-all", run)
|
t.Run("pre-info-all", run)
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
dh.MakeOnlineDeal(context.Background(), 6, false, false, 0)
|
dh.MakeFullDeal(context.Background(), 6, false, false, 0)
|
||||||
|
|
||||||
t.Run("post-info-all", run)
|
t.Run("post-info-all", run)
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ import (
|
|||||||
lapi "github.com/filecoin-project/lotus/api"
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit2"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -21,16 +21,16 @@ func TestAPI(t *testing.T) {
|
|||||||
runAPITest(t)
|
runAPITest(t)
|
||||||
})
|
})
|
||||||
t.Run("rpc", func(t *testing.T) {
|
t.Run("rpc", func(t *testing.T) {
|
||||||
runAPITest(t, kit.ThroughRPC())
|
runAPITest(t, kit2.ThroughRPC())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type apiSuite struct {
|
type apiSuite struct {
|
||||||
opts []kit.NodeOpt
|
opts []kit2.NodeOpt
|
||||||
}
|
}
|
||||||
|
|
||||||
// runAPITest is the entry point to API test suite
|
// runAPITest is the entry point to API test suite
|
||||||
func runAPITest(t *testing.T, opts ...kit.NodeOpt) {
|
func runAPITest(t *testing.T, opts ...kit2.NodeOpt) {
|
||||||
ts := apiSuite{opts: opts}
|
ts := apiSuite{opts: opts}
|
||||||
|
|
||||||
t.Run("version", ts.testVersion)
|
t.Run("version", ts.testVersion)
|
||||||
@ -48,7 +48,7 @@ func (ts *apiSuite) testVersion(t *testing.T) {
|
|||||||
lapi.RunningNodeType = lapi.NodeUnknown
|
lapi.RunningNodeType = lapi.NodeUnknown
|
||||||
})
|
})
|
||||||
|
|
||||||
full, _, _ := kit.EnsembleMinimal(t, ts.opts...)
|
full, _, _ := kit2.EnsembleMinimal(t, ts.opts...)
|
||||||
|
|
||||||
v, err := full.Version(context.Background())
|
v, err := full.Version(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -61,7 +61,7 @@ func (ts *apiSuite) testVersion(t *testing.T) {
|
|||||||
func (ts *apiSuite) testID(t *testing.T) {
|
func (ts *apiSuite) testID(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
full, _, _ := kit.EnsembleMinimal(t, ts.opts...)
|
full, _, _ := kit2.EnsembleMinimal(t, ts.opts...)
|
||||||
|
|
||||||
id, err := full.ID(ctx)
|
id, err := full.ID(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -73,7 +73,7 @@ func (ts *apiSuite) testID(t *testing.T) {
|
|||||||
func (ts *apiSuite) testConnectTwo(t *testing.T) {
|
func (ts *apiSuite) testConnectTwo(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
one, two, _, ens := kit.EnsembleTwoOne(t, ts.opts...)
|
one, two, _, ens := kit2.EnsembleTwoOne(t, ts.opts...)
|
||||||
|
|
||||||
p, err := one.NetPeers(ctx)
|
p, err := one.NetPeers(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -97,7 +97,7 @@ func (ts *apiSuite) testConnectTwo(t *testing.T) {
|
|||||||
func (ts *apiSuite) testSearchMsg(t *testing.T) {
|
func (ts *apiSuite) testSearchMsg(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
full, _, ens := kit.EnsembleMinimal(t, ts.opts...)
|
full, _, ens := kit2.EnsembleMinimal(t, ts.opts...)
|
||||||
|
|
||||||
senderAddr, err := full.WalletDefaultAddress(ctx)
|
senderAddr, err := full.WalletDefaultAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -127,7 +127,7 @@ func (ts *apiSuite) testSearchMsg(t *testing.T) {
|
|||||||
func (ts *apiSuite) testMining(t *testing.T) {
|
func (ts *apiSuite) testMining(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
full, miner, _ := kit.EnsembleMinimal(t, ts.opts...)
|
full, miner, _ := kit2.EnsembleMinimal(t, ts.opts...)
|
||||||
|
|
||||||
newHeads, err := full.ChainNotify(ctx)
|
newHeads, err := full.ChainNotify(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -138,7 +138,7 @@ func (ts *apiSuite) testMining(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(h1.Height()), int64(baseHeight))
|
require.Equal(t, int64(h1.Height()), int64(baseHeight))
|
||||||
|
|
||||||
bm := kit.NewBlockMiner(t, miner)
|
bm := kit2.NewBlockMiner(t, miner)
|
||||||
bm.MineUntilBlock(ctx, full, nil)
|
bm.MineUntilBlock(ctx, full, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -170,7 +170,7 @@ func (ts *apiSuite) testMiningReal(t *testing.T) {
|
|||||||
func (ts *apiSuite) testNonGenesisMiner(t *testing.T) {
|
func (ts *apiSuite) testNonGenesisMiner(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
full, genesisMiner, ens := kit.EnsembleMinimal(t, ts.opts...)
|
full, genesisMiner, ens := kit2.EnsembleMinimal(t, ts.opts...)
|
||||||
|
|
||||||
ens.BeginMining(4 * time.Millisecond)
|
ens.BeginMining(4 * time.Millisecond)
|
||||||
|
|
||||||
@ -180,8 +180,8 @@ func (ts *apiSuite) testNonGenesisMiner(t *testing.T) {
|
|||||||
_, err = full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
|
_, err = full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var newMiner kit.TestMiner
|
var newMiner kit2.TestMiner
|
||||||
ens.Miner(&newMiner, full, kit.OwnerAddr(full.DefaultKey)).Start()
|
ens.Miner(&newMiner, full, kit2.OwnerAddr(full.DefaultKey)).Start()
|
||||||
|
|
||||||
ta, err := newMiner.ActorAddress(ctx)
|
ta, err := newMiner.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -14,7 +14,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit2"
|
||||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||||
"github.com/filecoin-project/lotus/node"
|
"github.com/filecoin-project/lotus/node"
|
||||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||||
@ -23,38 +23,30 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestDealCyclesConcurrent(t *testing.T) {
|
func TestDealCyclesConcurrent(t *testing.T) {
|
||||||
kit.QuietMiningLogs()
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
blockTime := 10 * time.Millisecond
|
blockTime := 10 * time.Millisecond
|
||||||
|
|
||||||
// For these tests where the block time is artificially short, just use
|
// For these tests where the block time is artificially short, just use
|
||||||
// a deal start epoch that is guaranteed to be far enough in the future
|
// a deal start epoch that is guaranteed to be far enough in the future
|
||||||
// so that the deal starts sealing in time
|
// so that the deal starts sealing in time
|
||||||
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
startEpoch := abi.ChainEpoch(2 << 12)
|
||||||
|
|
||||||
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
|
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs())
|
||||||
ens.InterconnectAll().BeginMining(blockTime)
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
dh := kit2.NewDealHarness(t, client, miner)
|
||||||
|
|
||||||
errgrp, _ := errgroup.WithContext(context.Background())
|
runConcurrentDeals(t, dh, fullDealCyclesOpts{
|
||||||
for i := 0; i < n; i++ {
|
n: n,
|
||||||
i := i
|
fastRetrieval: fastRetrieval,
|
||||||
errgrp.Go(func() (err error) {
|
carExport: carExport,
|
||||||
defer func() {
|
startEpoch: startEpoch,
|
||||||
// This is necessary because we use require, which invokes t.Fatal,
|
})
|
||||||
// and that's not
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
err = fmt.Errorf("deal failed: %s", r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), 5+i, fastRetrieval, dealStartEpoch)
|
|
||||||
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, carExport)
|
|
||||||
kit.FilesEqual(t, inPath, outPath)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
require.NoError(t, errgrp.Wait())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cycles := []int{1, 2, 4, 8}
|
cycles := []int{1, 2, 4, 8}
|
||||||
@ -67,32 +59,60 @@ func TestDealCyclesConcurrent(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// func TestAPIDealFlowReal(t *testing.T) {
|
type fullDealCyclesOpts struct {
|
||||||
// if testing.Short() {
|
n int
|
||||||
// t.Skip("skipping test in short mode")
|
fastRetrieval bool
|
||||||
// }
|
carExport bool
|
||||||
//
|
startEpoch abi.ChainEpoch
|
||||||
// kit.QuietMiningLogs()
|
}
|
||||||
//
|
|
||||||
// // TODO: just set this globally?
|
func runConcurrentDeals(t *testing.T, dh *kit2.DealHarness, opts fullDealCyclesOpts) {
|
||||||
// oldDelay := policy.GetPreCommitChallengeDelay()
|
errgrp, _ := errgroup.WithContext(context.Background())
|
||||||
// policy.SetPreCommitChallengeDelay(5)
|
for i := 0; i < opts.n; i++ {
|
||||||
// t.Cleanup(func() {
|
i := i
|
||||||
// policy.SetPreCommitChallengeDelay(oldDelay)
|
errgrp.Go(func() (err error) {
|
||||||
// })
|
defer func() {
|
||||||
//
|
// This is necessary because golang can't deal with test
|
||||||
// t.Run("basic", func(t *testing.T) {
|
// failures being reported from children goroutines ¯\_(ツ)_/¯
|
||||||
// runFullDealCycles(t, 1, kit.FullNodeBuilder, time.Second, false, false, 0)
|
if r := recover(); r != nil {
|
||||||
// })
|
err = fmt.Errorf("deal failed: %s", r)
|
||||||
//
|
}
|
||||||
// t.Run("fast-retrieval", func(t *testing.T) {
|
}()
|
||||||
// runFullDealCycles(t, 1, kit.FullNodeBuilder, time.Second, false, true, 0)
|
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), 5+i, opts.fastRetrieval, opts.startEpoch)
|
||||||
// })
|
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, opts.carExport)
|
||||||
//
|
kit2.AssertFilesEqual(t, inPath, outPath)
|
||||||
// t.Run("retrieval-second", func(t *testing.T) {
|
return nil
|
||||||
// runSecondDealRetrievalTest(t, kit.FullNodeBuilder, time.Second)
|
})
|
||||||
// })
|
}
|
||||||
// }
|
require.NoError(t, errgrp.Wait())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDealsWithSealingAndRPC(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
|
var blockTime = 1 * time.Second
|
||||||
|
|
||||||
|
client, miner, ens := kit2.EnsembleMinimal(t, kit2.ThroughRPC()) // no mock proofs.
|
||||||
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
|
dh := kit2.NewDealHarness(t, client, miner)
|
||||||
|
|
||||||
|
t.Run("stdretrieval", func(t *testing.T) {
|
||||||
|
runConcurrentDeals(t, dh, fullDealCyclesOpts{n: 1})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("fastretrieval", func(t *testing.T) {
|
||||||
|
runConcurrentDeals(t, dh, fullDealCyclesOpts{n: 1, fastRetrieval: true})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("fastretrieval-twodeals-sequential", func(t *testing.T) {
|
||||||
|
runConcurrentDeals(t, dh, fullDealCyclesOpts{n: 1, fastRetrieval: true})
|
||||||
|
runConcurrentDeals(t, dh, fullDealCyclesOpts{n: 1, fastRetrieval: true})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestPublishDealsBatching(t *testing.T) {
|
func TestPublishDealsBatching(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
@ -102,7 +122,7 @@ func TestPublishDealsBatching(t *testing.T) {
|
|||||||
startEpoch = abi.ChainEpoch(2 << 12)
|
startEpoch = abi.ChainEpoch(2 << 12)
|
||||||
)
|
)
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
opts := node.Override(new(*storageadapter.DealPublisher),
|
opts := node.Override(new(*storageadapter.DealPublisher),
|
||||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||||
@ -111,10 +131,10 @@ func TestPublishDealsBatching(t *testing.T) {
|
|||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ExtraNodeOpts(opts))
|
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs(), kit2.ConstructorOpts(opts))
|
||||||
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
|
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
dh := kit2.NewDealHarness(t, client, miner)
|
||||||
|
|
||||||
// Starts a deal and waits until it's published
|
// Starts a deal and waits until it's published
|
||||||
runDealTillPublish := func(rseed int) {
|
runDealTillPublish := func(rseed int) {
|
||||||
@ -189,23 +209,23 @@ func TestFirstDealEnablesMining(t *testing.T) {
|
|||||||
t.Skip("skipping test in short mode")
|
t.Skip("skipping test in short mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit2.QuietMiningLogs()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
client kit.TestFullNode
|
client kit2.TestFullNode
|
||||||
genMiner kit.TestMiner // bootstrap
|
genMiner kit2.TestMiner // bootstrap
|
||||||
provider kit.TestMiner // no sectors, will need to create one
|
provider kit2.TestMiner // no sectors, will need to create one
|
||||||
)
|
)
|
||||||
|
|
||||||
ens := kit.NewEnsemble(t)
|
ens := kit2.NewEnsemble(t, kit2.MockProofs())
|
||||||
ens.FullNode(&client, kit.MockProofs())
|
ens.FullNode(&client)
|
||||||
ens.Miner(&genMiner, &client, kit.MockProofs())
|
ens.Miner(&genMiner, &client)
|
||||||
ens.Miner(&provider, &client, kit.MockProofs(), kit.PresealSectors(0))
|
ens.Miner(&provider, &client, kit2.PresealSectors(0))
|
||||||
ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond)
|
ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, &client, &provider)
|
dh := kit2.NewDealHarness(t, &client, &provider)
|
||||||
|
|
||||||
ref, _ := client.CreateImportFile(ctx, 5, 0)
|
ref, _ := client.CreateImportFile(ctx, 5, 0)
|
||||||
|
|
||||||
@ -258,10 +278,10 @@ func TestOfflineDealFlow(t *testing.T) {
|
|||||||
|
|
||||||
runTest := func(t *testing.T, fastRet bool) {
|
runTest := func(t *testing.T, fastRet bool) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
client, miner, ens := kit2.EnsembleMinimal(t, kit2.MockProofs())
|
||||||
ens.InterconnectAll().BeginMining(blocktime)
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
dh := kit2.NewDealHarness(t, client, miner)
|
||||||
|
|
||||||
// Create a random file and import on the client.
|
// Create a random file and import on the client.
|
||||||
res, inFile := client.CreateImportFile(ctx, 1, 0)
|
res, inFile := client.CreateImportFile(ctx, 1, 0)
|
||||||
@ -325,49 +345,13 @@ func TestOfflineDealFlow(t *testing.T) {
|
|||||||
// Retrieve the deal
|
// Retrieve the deal
|
||||||
outFile := dh.PerformRetrieval(ctx, proposalCid, rootCid, false)
|
outFile := dh.PerformRetrieval(ctx, proposalCid, rootCid, false)
|
||||||
|
|
||||||
equal := kit.FilesEqual(t, inFile, outFile)
|
kit2.AssertFilesEqual(t, inFile, outFile)
|
||||||
require.True(t, equal)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("NormalRetrieval", func(t *testing.T) { runTest(t, false) })
|
t.Run("stdretrieval", func(t *testing.T) { runTest(t, false) })
|
||||||
t.Run("FastRetrieval", func(t *testing.T) { runTest(t, true) })
|
t.Run("fastretrieval", func(t *testing.T) { runTest(t, true) })
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// func runFastRetrievalDealFlowT(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
|
||||||
// ctx := context.Background()
|
|
||||||
//
|
|
||||||
// var (
|
|
||||||
// nb = kit.NewNodeBuilder(t)
|
|
||||||
// full = nb.FullNode()
|
|
||||||
// miner = nb.Miner(full)
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// nb.Create()
|
|
||||||
//
|
|
||||||
// kit.ConnectAndStartMining(t, blocktime, miner, full)
|
|
||||||
//
|
|
||||||
// dh := kit.NewDealHarness(t, full, miner)
|
|
||||||
// data := make([]byte, 1600)
|
|
||||||
// rand.New(rand.NewSource(int64(8))).Read(data)
|
|
||||||
//
|
|
||||||
// r := bytes.NewReader(data)
|
|
||||||
// fcid, err := full.FullNode.(*impl.FullNodeAPI).ClientImportLocal(ctx, r)
|
|
||||||
// require.NoError(t, err)
|
|
||||||
//
|
|
||||||
// fmt.Println("FILE CID: ", fcid)
|
|
||||||
//
|
|
||||||
// deal := dh.StartDeal(ctx, fcid, true, startEpoch)
|
|
||||||
// dh.WaitDealPublished(ctx, deal)
|
|
||||||
//
|
|
||||||
// fmt.Println("deal published, retrieving")
|
|
||||||
//
|
|
||||||
// // Retrieval
|
|
||||||
// info, err := full.ClientGetDealInfo(ctx, *deal)
|
|
||||||
// require.NoError(t, err)
|
|
||||||
//
|
|
||||||
// dh.PerformRetrieval(ctx, fcid, &info.PieceCID, false, data)
|
|
||||||
// }
|
|
||||||
//
|
//
|
||||||
// func runSecondDealRetrievalTest(t *testing.T, b kit.APIBuilder, blocktime time.Duration) {
|
// func runSecondDealRetrievalTest(t *testing.T, b kit.APIBuilder, blocktime time.Duration) {
|
||||||
// ctx := context.Background()
|
// ctx := context.Background()
|
||||||
|
@ -291,7 +291,7 @@ func startNodes(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
n, sn := kit.MinerRPCMockMinerBuilder(t, opts, kit.OneMiner)
|
n, sn := kit.RPCMockMinerBuilder(t, opts, kit.OneMiner)
|
||||||
|
|
||||||
full := n[0]
|
full := n[0]
|
||||||
lite := n[1]
|
lite := n[1]
|
@ -15,14 +15,14 @@ import (
|
|||||||
// BlockMiner is a utility that makes a test miner Mine blocks on a timer.
|
// BlockMiner is a utility that makes a test miner Mine blocks on a timer.
|
||||||
type BlockMiner struct {
|
type BlockMiner struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
miner *TestMiner
|
miner TestMiner
|
||||||
|
|
||||||
nextNulls int64
|
nextNulls int64
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
|
func NewBlockMiner(t *testing.T, miner TestMiner) *BlockMiner {
|
||||||
return &BlockMiner{
|
return &BlockMiner{
|
||||||
t: t,
|
t: t,
|
||||||
miner: miner,
|
miner: miner,
|
||||||
@ -69,7 +69,7 @@ func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) {
|
|||||||
atomic.AddInt64(&bm.nextNulls, int64(rounds))
|
atomic.AddInt64(&bm.nextNulls, int64(rounds))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb func(abi.ChainEpoch)) {
|
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn TestFullNode, cb func(abi.ChainEpoch)) {
|
||||||
for i := 0; i < 1000; i++ {
|
for i := 0; i < 1000; i++ {
|
||||||
var (
|
var (
|
||||||
success bool
|
success bool
|
||||||
@ -93,7 +93,7 @@ func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb f
|
|||||||
|
|
||||||
if success {
|
if success {
|
||||||
// Wait until it shows up on the given full nodes ChainHead
|
// Wait until it shows up on the given full nodes ChainHead
|
||||||
nloops := 200
|
nloops := 50
|
||||||
for i := 0; i < nloops; i++ {
|
for i := 0; i < nloops; i++ {
|
||||||
ts, err := fn.ChainHead(ctx)
|
ts, err := fn.ChainHead(ctx)
|
||||||
require.NoError(bm.t, err)
|
require.NoError(bm.t, err)
|
||||||
|
@ -3,12 +3,16 @@ package kit
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
"github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||||
@ -39,7 +43,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode)
|
|||||||
|
|
||||||
// Create a deal (non-interactive)
|
// Create a deal (non-interactive)
|
||||||
// client deal --start-epoch=<start epoch> <cid> <Miner addr> 1000000attofil <duration>
|
// client deal --start-epoch=<start epoch> <cid> <Miner addr> 1000000attofil <duration>
|
||||||
res, _ := clientNode.CreateImportFile(ctx, 1, 0)
|
res, _, _, err := CreateImportFile(ctx, clientNode, 1, 0)
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
|
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
|
||||||
@ -56,7 +60,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode)
|
|||||||
// <miner addr>
|
// <miner addr>
|
||||||
// "no" (verified Client)
|
// "no" (verified Client)
|
||||||
// "yes" (confirm deal)
|
// "yes" (confirm deal)
|
||||||
res, _ = clientNode.CreateImportFile(ctx, 2, 0)
|
res, _, _, err = CreateImportFile(ctx, clientNode, 2, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
dataCid2 := res.Root
|
dataCid2 := res.Root
|
||||||
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
|
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
|
||||||
@ -99,9 +103,44 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode)
|
|||||||
|
|
||||||
// Retrieve the first file from the Miner
|
// Retrieve the first file from the Miner
|
||||||
// client retrieve <cid> <file path>
|
// client retrieve <cid> <file path>
|
||||||
tmpdir := t.TempDir()
|
tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-Client")
|
||||||
|
require.NoError(t, err)
|
||||||
path := filepath.Join(tmpdir, "outfile.dat")
|
path := filepath.Join(tmpdir, "outfile.dat")
|
||||||
out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path)
|
out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path)
|
||||||
fmt.Println("retrieve:\n", out)
|
fmt.Println("retrieve:\n", out)
|
||||||
require.Regexp(t, regexp.MustCompile("Success"), out)
|
require.Regexp(t, regexp.MustCompile("Success"), out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CreateImportFile(ctx context.Context, client api.FullNode, rseed int, size int) (res *api.ImportRes, path string, data []byte, err error) {
|
||||||
|
data, path, err = createRandomFile(rseed, size)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err = client.ClientImport(ctx, api.FileRef{Path: path})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", nil, err
|
||||||
|
}
|
||||||
|
return res, path, data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createRandomFile(rseed, size int) ([]byte, string, error) {
|
||||||
|
if size == 0 {
|
||||||
|
size = 1600
|
||||||
|
}
|
||||||
|
data := make([]byte, size)
|
||||||
|
rand.New(rand.NewSource(int64(rseed))).Read(data)
|
||||||
|
|
||||||
|
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
path := filepath.Join(dir, "sourcefile.dat")
|
||||||
|
err = ioutil.WriteFile(path, data, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, path, nil
|
||||||
|
}
|
||||||
|
@ -3,7 +3,10 @@ package kit
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -18,6 +21,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
|
"github.com/filecoin-project/lotus/node/impl"
|
||||||
ipld "github.com/ipfs/go-ipld-format"
|
ipld "github.com/ipfs/go-ipld-format"
|
||||||
dag "github.com/ipfs/go-merkledag"
|
dag "github.com/ipfs/go-merkledag"
|
||||||
dstest "github.com/ipfs/go-merkledag/test"
|
dstest "github.com/ipfs/go-merkledag/test"
|
||||||
@ -26,12 +30,12 @@ import (
|
|||||||
|
|
||||||
type DealHarness struct {
|
type DealHarness struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
client *TestFullNode
|
client api.FullNode
|
||||||
miner *TestMiner
|
miner TestMiner
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDealHarness creates a test harness that contains testing utilities for deals.
|
// NewDealHarness creates a test harness that contains testing utilities for deals.
|
||||||
func NewDealHarness(t *testing.T, client *TestFullNode, miner *TestMiner) *DealHarness {
|
func NewDealHarness(t *testing.T, client api.FullNode, miner TestMiner) *DealHarness {
|
||||||
return &DealHarness{
|
return &DealHarness{
|
||||||
t: t,
|
t: t,
|
||||||
client: client,
|
client: client,
|
||||||
@ -39,27 +43,38 @@ func NewDealHarness(t *testing.T, client *TestFullNode, miner *TestMiner) *DealH
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, rseed int, fastRet bool, startEpoch abi.ChainEpoch) (deal *cid.Cid, res *api.ImportRes, path string) {
|
func (dh *DealHarness) MakeFullDeal(ctx context.Context, rseed int, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
||||||
res, path = dh.client.CreateImportFile(ctx, rseed, 0)
|
res, _, data, err := CreateImportFile(ctx, dh.client, rseed, 0)
|
||||||
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
dh.t.Logf("FILE CID: %s", res.Root)
|
fcid := res.Root
|
||||||
|
fmt.Println("FILE CID: ", fcid)
|
||||||
|
|
||||||
deal = dh.StartDeal(ctx, res.Root, fastRet, startEpoch)
|
deal := dh.StartDeal(ctx, fcid, fastRet, startEpoch)
|
||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||||
|
|
||||||
return deal, res, path
|
// Retrieval
|
||||||
|
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
dh.TestRetrieval(ctx, fcid, &info.PieceCID, carExport, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
||||||
maddr, err := dh.miner.ActorAddress(ctx)
|
maddr, err := dh.miner.ActorAddress(ctx)
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
addr, err := dh.client.WalletDefaultAddress(ctx)
|
addr, err := dh.client.WalletDefaultAddress(ctx)
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{
|
deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{
|
||||||
Data: &storagemarket.DataRef{
|
Data: &storagemarket.DataRef{
|
||||||
TransferType: storagemarket.TTGraphsync,
|
TransferType: storagemarket.TTGraphsync,
|
||||||
@ -72,8 +87,9 @@ func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool
|
|||||||
MinBlocksDuration: uint64(build.MinDealDuration),
|
MinBlocksDuration: uint64(build.MinDealDuration),
|
||||||
FastRetrieval: fastRet,
|
FastRetrieval: fastRet,
|
||||||
})
|
})
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
return deal
|
return deal
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,7 +114,7 @@ loop:
|
|||||||
case storagemarket.StorageDealError:
|
case storagemarket.StorageDealError:
|
||||||
dh.t.Fatal("deal errored", di.Message)
|
dh.t.Fatal("deal errored", di.Message)
|
||||||
case storagemarket.StorageDealActive:
|
case storagemarket.StorageDealActive:
|
||||||
dh.t.Log("COMPLETE", di)
|
fmt.Println("COMPLETE", di)
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,7 +129,7 @@ loop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dh.t.Logf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
|
fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
|
||||||
time.Sleep(time.Second / 2)
|
time.Sleep(time.Second / 2)
|
||||||
if cb != nil {
|
if cb != nil {
|
||||||
cb()
|
cb()
|
||||||
@ -124,10 +140,10 @@ loop:
|
|||||||
func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
||||||
subCtx, cancel := context.WithCancel(ctx)
|
subCtx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
updates, err := dh.miner.MarketGetDealUpdates(subCtx)
|
updates, err := dh.miner.MarketGetDealUpdates(subCtx)
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@ -142,10 +158,10 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
|||||||
case storagemarket.StorageDealError:
|
case storagemarket.StorageDealError:
|
||||||
dh.t.Fatal("deal errored", di.Message)
|
dh.t.Fatal("deal errored", di.Message)
|
||||||
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
||||||
dh.t.Log("COMPLETE", di)
|
fmt.Println("COMPLETE", di)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dh.t.Log("Deal state: ", storagemarket.DealStates[di.State])
|
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -164,79 +180,97 @@ func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
|
|||||||
require.NoError(dh.t, dh.miner.SectorStartSealing(ctx, snum))
|
require.NoError(dh.t, dh.miner.SectorStartSealing(ctx, snum))
|
||||||
}
|
}
|
||||||
|
|
||||||
dh.miner.FlushSealingBatches(ctx)
|
flushSealingBatches(dh.t, ctx, dh.miner)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool) (path string) {
|
func (dh *DealHarness) TestRetrieval(ctx context.Context, fcid cid.Cid, piece *cid.Cid, carExport bool, expect []byte) {
|
||||||
// perform retrieval.
|
offers, err := dh.client.ClientFindData(ctx, fcid, piece)
|
||||||
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
if err != nil {
|
||||||
require.NoError(dh.t, err)
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
offers, err := dh.client.ClientFindData(ctx, root, &info.PieceCID)
|
if len(offers) < 1 {
|
||||||
require.NoError(dh.t, err)
|
dh.t.Fatal("no offers")
|
||||||
require.NotEmpty(dh.t, offers, "no offers")
|
}
|
||||||
|
|
||||||
tmpfile, err := ioutil.TempFile(dh.t.TempDir(), "ret-car")
|
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
defer tmpfile.Close()
|
}
|
||||||
|
defer os.RemoveAll(rpath) //nolint:errcheck
|
||||||
|
|
||||||
caddr, err := dh.client.WalletDefaultAddress(ctx)
|
caddr, err := dh.client.WalletDefaultAddress(ctx)
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
ref := &api.FileRef{
|
ref := &api.FileRef{
|
||||||
Path: tmpfile.Name(),
|
Path: filepath.Join(rpath, "ret"),
|
||||||
IsCAR: carExport,
|
IsCAR: carExport,
|
||||||
}
|
}
|
||||||
|
|
||||||
updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
|
updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
for update := range updates {
|
for update := range updates {
|
||||||
require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err)
|
if update.Err != "" {
|
||||||
|
dh.t.Fatalf("retrieval failed: %s", update.Err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rdata, err := ioutil.ReadFile(tmpfile.Name())
|
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
if carExport {
|
if carExport {
|
||||||
rdata = dh.ExtractFileFromCAR(ctx, rdata)
|
rdata = dh.ExtractCarData(ctx, rdata, rpath)
|
||||||
}
|
}
|
||||||
|
|
||||||
return tmpfile.Name()
|
if !bytes.Equal(rdata, expect) {
|
||||||
|
dh.t.Fatal("wrong expect retrieved")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, rdata []byte) []byte {
|
func (dh *DealHarness) ExtractCarData(ctx context.Context, rdata []byte, rpath string) []byte {
|
||||||
bserv := dstest.Bserv()
|
bserv := dstest.Bserv()
|
||||||
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
|
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
b, err := bserv.GetBlock(ctx, ch.Roots[0])
|
b, err := bserv.GetBlock(ctx, ch.Roots[0])
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
nd, err := ipld.Decode(b)
|
nd, err := ipld.Decode(b)
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
dserv := dag.NewDAGService(bserv)
|
dserv := dag.NewDAGService(bserv)
|
||||||
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
tmpfile, err := ioutil.TempFile(dh.t.TempDir(), "file-in-car")
|
}
|
||||||
require.NoError(dh.t, err)
|
outPath := filepath.Join(rpath, "retLoadedCAR")
|
||||||
|
if err := files.WriteTo(fil, outPath); err != nil {
|
||||||
defer tmpfile.Close()
|
dh.t.Fatal(err)
|
||||||
|
}
|
||||||
err = files.WriteTo(fil, tmpfile.Name())
|
rdata, err = ioutil.ReadFile(outPath)
|
||||||
require.NoError(dh.t, err)
|
if err != nil {
|
||||||
|
dh.t.Fatal(err)
|
||||||
rdata, err = ioutil.ReadFile(tmpfile.Name())
|
}
|
||||||
require.NoError(dh.t, err)
|
|
||||||
|
|
||||||
return rdata
|
return rdata
|
||||||
}
|
}
|
||||||
|
|
||||||
func ConnectAndStartMining(t *testing.T, blocktime time.Duration, miner *TestMiner, clients ...api.FullNode) *BlockMiner {
|
type DealsScaffold struct {
|
||||||
|
Ctx context.Context
|
||||||
|
Client *impl.FullNodeAPI
|
||||||
|
Miner TestMiner
|
||||||
|
BlockMiner *BlockMiner
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConnectAndStartMining(t *testing.T, blocktime time.Duration, miner TestMiner, clients ...api.FullNode) *BlockMiner {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
for _, c := range clients {
|
for _, c := range clients {
|
||||||
|
@ -1,41 +0,0 @@
|
|||||||
package kit
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
// EnsembleMinimal creates and starts an ensemble with a single full node and a single miner.
|
|
||||||
// It does not interconnect nodes nor does it begin mining.
|
|
||||||
func EnsembleMinimal(t *testing.T, opts ...NodeOpt) (*TestFullNode, *TestMiner, *Ensemble) {
|
|
||||||
var (
|
|
||||||
full TestFullNode
|
|
||||||
miner TestMiner
|
|
||||||
)
|
|
||||||
ensemble := NewEnsemble(t).FullNode(&full, opts...).Miner(&miner, &full, opts...).Start()
|
|
||||||
return &full, &miner, ensemble
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsembleTwoOne creates and starts an ensemble with two full nodes and one miner.
|
|
||||||
// It does not interconnect nodes nor does it begin mining.
|
|
||||||
func EnsembleTwoOne(t *testing.T, opts ...NodeOpt) (*TestFullNode, *TestFullNode, *TestMiner, *Ensemble) {
|
|
||||||
var (
|
|
||||||
one, two TestFullNode
|
|
||||||
miner TestMiner
|
|
||||||
)
|
|
||||||
ensemble := NewEnsemble(t).FullNode(&one, opts...).FullNode(&two, opts...).Miner(&miner, &one, opts...).Start()
|
|
||||||
return &one, &two, &miner, ensemble
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsembleOneTwo creates and starts an ensemble with one full node and two miners.
|
|
||||||
// It does not interconnect nodes nor does it begin mining.
|
|
||||||
func EnsembleOneTwo(t *testing.T, opts ...NodeOpt) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) {
|
|
||||||
var (
|
|
||||||
full TestFullNode
|
|
||||||
one, two TestMiner
|
|
||||||
)
|
|
||||||
ensemble := NewEnsemble(t).
|
|
||||||
FullNode(&full, opts...).
|
|
||||||
Miner(&one, &full, opts...).
|
|
||||||
Miner(&two, &full, opts...).
|
|
||||||
Start()
|
|
||||||
|
|
||||||
return &full, &one, &two, ensemble
|
|
||||||
}
|
|
@ -5,7 +5,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
@ -16,7 +15,9 @@ import (
|
|||||||
// to the recipient address.
|
// to the recipient address.
|
||||||
func SendFunds(ctx context.Context, t *testing.T, sender TestFullNode, recipient address.Address, amount abi.TokenAmount) {
|
func SendFunds(ctx context.Context, t *testing.T, sender TestFullNode, recipient address.Address, amount abi.TokenAmount) {
|
||||||
senderAddr, err := sender.WalletDefaultAddress(ctx)
|
senderAddr, err := sender.WalletDefaultAddress(ctx)
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
msg := &types.Message{
|
msg := &types.Message{
|
||||||
From: senderAddr,
|
From: senderAddr,
|
||||||
@ -25,10 +26,14 @@ func SendFunds(ctx context.Context, t *testing.T, sender TestFullNode, recipient
|
|||||||
}
|
}
|
||||||
|
|
||||||
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
|
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
|
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
require.Equal(t, 0, res.Receipt.ExitCode, "did not successfully send funds")
|
}
|
||||||
|
if res.Receipt.ExitCode != 0 {
|
||||||
|
t.Fatal("did not successfully send money")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,13 +17,9 @@ func init() {
|
|||||||
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
||||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||||
|
|
||||||
build.InsecurePoStValidation = true
|
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
|
if err != nil {
|
||||||
if err := os.Setenv("BELLMAN_NO_GPU", "1"); err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
||||||
}
|
}
|
||||||
|
build.InsecurePoStValidation = true
|
||||||
if err := os.Setenv("LOTUS_DISABLE_WATCHDOG", "1"); err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to set LOTUS_DISABLE_WATCHDOG env variable: %s", err))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -1,42 +1,87 @@
|
|||||||
package kit
|
package kit
|
||||||
|
|
||||||
//
|
import (
|
||||||
// func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]TestFullNode, []address.Address) {
|
"context"
|
||||||
// n, sn := MinerRPCMockMinerBuilder(t, TwoFull, OneMiner)
|
"testing"
|
||||||
//
|
"time"
|
||||||
// fullNode1 := n[0]
|
|
||||||
// fullNode2 := n[1]
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
// miner := sn[0]
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
//
|
|
||||||
// // Get everyone connected
|
"github.com/filecoin-project/go-address"
|
||||||
// addrs, err := fullNode1.NetAddrsListen(ctx)
|
)
|
||||||
// if err != nil {
|
|
||||||
// t.Fatal(err)
|
func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) (TestFullNode, address.Address) {
|
||||||
// }
|
n, sn := RPCMockMinerBuilder(t, OneFull, OneMiner)
|
||||||
//
|
|
||||||
// if err := fullNode2.NetConnect(ctx, addrs); err != nil {
|
full := n[0]
|
||||||
// t.Fatal(err)
|
miner := sn[0]
|
||||||
// }
|
|
||||||
//
|
// Get everyone connected
|
||||||
// // Start mining blocks
|
addrs, err := full.NetAddrsListen(ctx)
|
||||||
// bm := NewBlockMiner(t, miner)
|
if err != nil {
|
||||||
// bm.MineBlocks(ctx, blocktime)
|
t.Fatal(err)
|
||||||
// t.Cleanup(bm.Stop)
|
}
|
||||||
//
|
|
||||||
// // Send some funds to register the second node
|
if err := miner.NetConnect(ctx, addrs); err != nil {
|
||||||
// fullNodeAddr2, err := fullNode2.WalletNew(ctx, types.KTSecp256k1)
|
t.Fatal(err)
|
||||||
// if err != nil {
|
}
|
||||||
// t.Fatal(err)
|
|
||||||
// }
|
// Start mining blocks
|
||||||
//
|
bm := NewBlockMiner(t, miner)
|
||||||
// SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18))
|
bm.MineBlocks(ctx, blocktime)
|
||||||
//
|
t.Cleanup(bm.Stop)
|
||||||
// // Get the first node's address
|
|
||||||
// fullNodeAddr1, err := fullNode1.WalletDefaultAddress(ctx)
|
// Get the full node's wallet address
|
||||||
// if err != nil {
|
fullAddr, err := full.WalletDefaultAddress(ctx)
|
||||||
// t.Fatal(err)
|
if err != nil {
|
||||||
// }
|
t.Fatal(err)
|
||||||
//
|
}
|
||||||
// // Create mock CLI
|
|
||||||
// return n, []address.Address{fullNodeAddr1, fullNodeAddr2}
|
// Create mock CLI
|
||||||
// }
|
return full, fullAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]TestFullNode, []address.Address) {
|
||||||
|
n, sn := RPCMockMinerBuilder(t, TwoFull, OneMiner)
|
||||||
|
|
||||||
|
fullNode1 := n[0]
|
||||||
|
fullNode2 := n[1]
|
||||||
|
miner := sn[0]
|
||||||
|
|
||||||
|
// Get everyone connected
|
||||||
|
addrs, err := fullNode1.NetAddrsListen(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fullNode2.NetConnect(ctx, addrs); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := miner.NetConnect(ctx, addrs); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start mining blocks
|
||||||
|
bm := NewBlockMiner(t, miner)
|
||||||
|
bm.MineBlocks(ctx, blocktime)
|
||||||
|
t.Cleanup(bm.Stop)
|
||||||
|
|
||||||
|
// Send some funds to register the second node
|
||||||
|
fullNodeAddr2, err := fullNode2.WalletNew(ctx, types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18))
|
||||||
|
|
||||||
|
// Get the first node's address
|
||||||
|
fullNodeAddr1, err := fullNode1.WalletDefaultAddress(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create mock CLI
|
||||||
|
return n, []address.Address{fullNodeAddr1, fullNodeAddr2}
|
||||||
|
}
|
||||||
|
658
itests/kit/node_builder.go
Normal file
658
itests/kit/node_builder.go
Normal file
@ -0,0 +1,658 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
"github.com/filecoin-project/go-storedcounter"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/api/client"
|
||||||
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||||
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
|
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
||||||
|
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
||||||
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||||
|
"github.com/filecoin-project/lotus/genesis"
|
||||||
|
lotusminer "github.com/filecoin-project/lotus/miner"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
"github.com/filecoin-project/lotus/node/modules"
|
||||||
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
|
"github.com/filecoin-project/lotus/storage/mockstorage"
|
||||||
|
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||||
|
power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power"
|
||||||
|
"github.com/ipfs/go-datastore"
|
||||||
|
"github.com/libp2p/go-libp2p-core/crypto"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
chain.BootstrapPeerThreshold = 1
|
||||||
|
messagepool.HeadChangeCoalesceMinDelay = time.Microsecond
|
||||||
|
messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond
|
||||||
|
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Address, act address.Address, pk crypto.PrivKey, tnd TestFullNode, mn mocknet.Mocknet, opts node.Option) TestMiner {
|
||||||
|
r := repo.NewMemory(nil)
|
||||||
|
|
||||||
|
lr, err := r.Lock(repo.StorageMiner)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ks, err := lr.KeyStore()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
kbytes, err := pk.Bytes()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = ks.Put("libp2p-host", types.KeyInfo{
|
||||||
|
Type: "libp2p-host",
|
||||||
|
PrivateKey: kbytes,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ds, err := lr.Datastore(context.TODO(), "/metadata")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = ds.Put(datastore.NewKey("miner-address"), act.Bytes())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix))
|
||||||
|
for i := 0; i < GenesisPreseals; i++ {
|
||||||
|
_, err := nic.Next()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
_, err = nic.Next()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = lr.Close()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
peerid, err := peer.IDFromPrivateKey(pk)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(peerid)})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
To: act,
|
||||||
|
From: waddr,
|
||||||
|
Method: miner.Methods.ChangePeerID,
|
||||||
|
Params: enc,
|
||||||
|
Value: types.NewInt(0),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tnd.MpoolPushMessage(ctx, msg, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// start node
|
||||||
|
var minerapi api.StorageMiner
|
||||||
|
|
||||||
|
mineBlock := make(chan lotusminer.MineReq)
|
||||||
|
stop, err := node.New(ctx,
|
||||||
|
node.StorageMiner(&minerapi),
|
||||||
|
node.Online(),
|
||||||
|
node.Repo(r),
|
||||||
|
node.Test(),
|
||||||
|
|
||||||
|
node.MockHost(mn),
|
||||||
|
|
||||||
|
node.Override(new(v1api.FullNode), tnd),
|
||||||
|
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, act)),
|
||||||
|
|
||||||
|
opts,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to construct node: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Cleanup(func() { _ = stop(context.Background()) })
|
||||||
|
|
||||||
|
/*// Bootstrap with full node
|
||||||
|
remoteAddrs, err := tnd.NetAddrsListen(Ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = minerapi.NetConnect(Ctx, remoteAddrs)
|
||||||
|
require.NoError(t, err)*/
|
||||||
|
mineOne := func(ctx context.Context, req lotusminer.MineReq) error {
|
||||||
|
select {
|
||||||
|
case mineBlock <- req:
|
||||||
|
return nil
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return TestMiner{StorageMiner: minerapi, MineOne: mineOne, Stop: stop}
|
||||||
|
}
|
||||||
|
|
||||||
|
func storageBuilder(parentNode TestFullNode, mn mocknet.Mocknet, opts node.Option) MinerBuilder {
|
||||||
|
return func(ctx context.Context, t *testing.T, spt abi.RegisteredSealProof, owner address.Address) TestMiner {
|
||||||
|
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
minerPid, err := peer.IDFromPrivateKey(pk)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
params, serr := actors.SerializeParams(&power2.CreateMinerParams{
|
||||||
|
Owner: owner,
|
||||||
|
Worker: owner,
|
||||||
|
SealProofType: spt,
|
||||||
|
Peer: abi.PeerID(minerPid),
|
||||||
|
})
|
||||||
|
require.NoError(t, serr)
|
||||||
|
|
||||||
|
createStorageMinerMsg := &types.Message{
|
||||||
|
To: power.Address,
|
||||||
|
From: owner,
|
||||||
|
Value: big.Zero(),
|
||||||
|
|
||||||
|
Method: power.Methods.CreateMiner,
|
||||||
|
Params: params,
|
||||||
|
|
||||||
|
GasLimit: 0,
|
||||||
|
GasPremium: big.NewInt(5252),
|
||||||
|
}
|
||||||
|
|
||||||
|
signed, err := parentNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
mw, err := parentNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, exitcode.Ok, mw.Receipt.ExitCode)
|
||||||
|
|
||||||
|
var retval power2.CreateMinerReturn
|
||||||
|
err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return CreateTestStorageNode(ctx, t, owner, retval.IDAddress, pk, parentNode, mn, opts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Builder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
||||||
|
return mockBuilderOpts(t, fullOpts, storage, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RPCBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
||||||
|
return mockBuilderOpts(t, fullOpts, storage, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func MockMinerBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
||||||
|
return mockMinerBuilderOpts(t, fullOpts, storage, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RPCMockMinerBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
||||||
|
return mockMinerBuilderOpts(t, fullOpts, storage, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mockBuilderOpts(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner, rpc bool) ([]TestFullNode, []TestMiner) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
mn := mocknet.New(ctx)
|
||||||
|
|
||||||
|
fulls := make([]TestFullNode, len(fullOpts))
|
||||||
|
miners := make([]TestMiner, len(storage))
|
||||||
|
|
||||||
|
// *****
|
||||||
|
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
minerPid, err := peer.IDFromPrivateKey(pk)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var genbuf bytes.Buffer
|
||||||
|
|
||||||
|
if len(storage) > 1 {
|
||||||
|
panic("need more peer IDs")
|
||||||
|
}
|
||||||
|
// *****
|
||||||
|
|
||||||
|
// PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE
|
||||||
|
// TODO: would be great if there was a better way to fake the preseals
|
||||||
|
|
||||||
|
var (
|
||||||
|
genms []genesis.Miner
|
||||||
|
maddrs []address.Address
|
||||||
|
genaccs []genesis.Actor
|
||||||
|
keys []*wallet.Key
|
||||||
|
)
|
||||||
|
|
||||||
|
var presealDirs []string
|
||||||
|
for i := 0; i < len(storage); i++ {
|
||||||
|
maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
tdir, err := ioutil.TempDir("", "preseal-memgen")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
genm, k, err := seed.PreSeal(maddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, GenesisPreseals, tdir, []byte("make genesis mem random"), nil, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
genm.PeerId = minerPid
|
||||||
|
|
||||||
|
wk, err := wallet.NewKey(*k)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
genaccs = append(genaccs, genesis.Actor{
|
||||||
|
Type: genesis.TAccount,
|
||||||
|
Balance: big.Mul(big.NewInt(400000000), types.NewInt(build.FilecoinPrecision)),
|
||||||
|
Meta: (&genesis.AccountMeta{Owner: wk.Address}).ActorMeta(),
|
||||||
|
})
|
||||||
|
|
||||||
|
keys = append(keys, wk)
|
||||||
|
presealDirs = append(presealDirs, tdir)
|
||||||
|
maddrs = append(maddrs, maddr)
|
||||||
|
genms = append(genms, *genm)
|
||||||
|
}
|
||||||
|
|
||||||
|
rkhKey, err := wallet.GenerateKey(types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
vrk := genesis.Actor{
|
||||||
|
Type: genesis.TAccount,
|
||||||
|
Balance: big.Mul(big.Div(big.NewInt(int64(build.FilBase)), big.NewInt(100)), big.NewInt(int64(build.FilecoinPrecision))),
|
||||||
|
Meta: (&genesis.AccountMeta{Owner: rkhKey.Address}).ActorMeta(),
|
||||||
|
}
|
||||||
|
keys = append(keys, rkhKey)
|
||||||
|
|
||||||
|
templ := &genesis.Template{
|
||||||
|
NetworkVersion: network.Version0,
|
||||||
|
Accounts: genaccs,
|
||||||
|
Miners: genms,
|
||||||
|
NetworkName: "test",
|
||||||
|
Timestamp: uint64(time.Now().Unix() - 10000), // some time sufficiently far in the past
|
||||||
|
VerifregRootKey: vrk,
|
||||||
|
RemainderAccount: gen.DefaultRemainderAccountActor,
|
||||||
|
}
|
||||||
|
|
||||||
|
// END PRESEAL SECTION
|
||||||
|
|
||||||
|
for i := 0; i < len(fullOpts); i++ {
|
||||||
|
var genesis node.Option
|
||||||
|
if i == 0 {
|
||||||
|
genesis = node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&genbuf, *templ))
|
||||||
|
} else {
|
||||||
|
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genbuf.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
stop, err := node.New(ctx,
|
||||||
|
node.FullAPI(&fulls[i].FullNode, node.Lite(fullOpts[i].Lite)),
|
||||||
|
node.Online(),
|
||||||
|
node.Repo(repo.NewMemory(nil)),
|
||||||
|
node.MockHost(mn),
|
||||||
|
node.Test(),
|
||||||
|
|
||||||
|
genesis,
|
||||||
|
|
||||||
|
fullOpts[i].Opts(fulls),
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Cleanup(func() { _ = stop(context.Background()) })
|
||||||
|
|
||||||
|
if rpc {
|
||||||
|
fulls[i] = fullRpc(t, fulls[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
fulls[i].Stb = storageBuilder(fulls[i], mn, node.Options())
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := fulls[0].FullNode.WalletImport(ctx, &rkhKey.KeyInfo); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, def := range storage {
|
||||||
|
// TODO: support non-bootstrap miners
|
||||||
|
if i != 0 {
|
||||||
|
t.Fatal("only one storage node supported")
|
||||||
|
}
|
||||||
|
if def.Full != 0 {
|
||||||
|
t.Fatal("storage nodes only supported on the first full node")
|
||||||
|
}
|
||||||
|
|
||||||
|
f := fulls[def.Full]
|
||||||
|
if _, err := f.FullNode.WalletImport(ctx, &keys[i].KeyInfo); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := f.FullNode.WalletSetDefault(ctx, keys[i].Address); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
genMiner := maddrs[i]
|
||||||
|
wa := genms[i].Worker
|
||||||
|
|
||||||
|
opts := def.Opts
|
||||||
|
if opts == nil {
|
||||||
|
opts = node.Options()
|
||||||
|
}
|
||||||
|
miners[i] = CreateTestStorageNode(ctx, t, wa, genMiner, pk, f, mn, opts)
|
||||||
|
if err := miners[i].StorageAddLocal(ctx, presealDirs[i]); err != nil {
|
||||||
|
t.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
sma := miners[i].StorageMiner.(*impl.StorageMinerAPI)
|
||||||
|
|
||||||
|
psd := presealDirs[i]
|
||||||
|
*/
|
||||||
|
if rpc {
|
||||||
|
miners[i] = storerRpc(t, miners[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mn.LinkAll(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(miners) > 0 {
|
||||||
|
// Mine 2 blocks to setup some CE stuff in some actors
|
||||||
|
var wait sync.Mutex
|
||||||
|
wait.Lock()
|
||||||
|
|
||||||
|
bm := NewBlockMiner(t, miners[0])
|
||||||
|
t.Cleanup(bm.Stop)
|
||||||
|
|
||||||
|
bm.MineUntilBlock(ctx, fulls[0], func(epoch abi.ChainEpoch) {
|
||||||
|
wait.Unlock()
|
||||||
|
})
|
||||||
|
|
||||||
|
wait.Lock()
|
||||||
|
bm.MineUntilBlock(ctx, fulls[0], func(epoch abi.ChainEpoch) {
|
||||||
|
wait.Unlock()
|
||||||
|
})
|
||||||
|
wait.Lock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return fulls, miners
|
||||||
|
}
|
||||||
|
|
||||||
|
func mockMinerBuilderOpts(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner, rpc bool) ([]TestFullNode, []TestMiner) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
mn := mocknet.New(ctx)
|
||||||
|
|
||||||
|
fulls := make([]TestFullNode, len(fullOpts))
|
||||||
|
miners := make([]TestMiner, len(storage))
|
||||||
|
|
||||||
|
var genbuf bytes.Buffer
|
||||||
|
|
||||||
|
// PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE
|
||||||
|
// TODO: would be great if there was a better way to fake the preseals
|
||||||
|
|
||||||
|
var (
|
||||||
|
genms []genesis.Miner
|
||||||
|
genaccs []genesis.Actor
|
||||||
|
maddrs []address.Address
|
||||||
|
keys []*wallet.Key
|
||||||
|
pidKeys []crypto.PrivKey
|
||||||
|
)
|
||||||
|
for i := 0; i < len(storage); i++ {
|
||||||
|
maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
preseals := storage[i].Preseal
|
||||||
|
if preseals == PresealGenesis {
|
||||||
|
preseals = GenesisPreseals
|
||||||
|
}
|
||||||
|
|
||||||
|
genm, k, err := mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, maddr, preseals)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
minerPid, err := peer.IDFromPrivateKey(pk)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
genm.PeerId = minerPid
|
||||||
|
|
||||||
|
wk, err := wallet.NewKey(*k)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
genaccs = append(genaccs, genesis.Actor{
|
||||||
|
Type: genesis.TAccount,
|
||||||
|
Balance: big.Mul(big.NewInt(400000000), types.NewInt(build.FilecoinPrecision)),
|
||||||
|
Meta: (&genesis.AccountMeta{Owner: wk.Address}).ActorMeta(),
|
||||||
|
})
|
||||||
|
|
||||||
|
keys = append(keys, wk)
|
||||||
|
pidKeys = append(pidKeys, pk)
|
||||||
|
maddrs = append(maddrs, maddr)
|
||||||
|
genms = append(genms, *genm)
|
||||||
|
}
|
||||||
|
|
||||||
|
rkhKey, err := wallet.GenerateKey(types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
vrk := genesis.Actor{
|
||||||
|
Type: genesis.TAccount,
|
||||||
|
Balance: big.Mul(big.Div(big.NewInt(int64(build.FilBase)), big.NewInt(100)), big.NewInt(int64(build.FilecoinPrecision))),
|
||||||
|
Meta: (&genesis.AccountMeta{Owner: rkhKey.Address}).ActorMeta(),
|
||||||
|
}
|
||||||
|
keys = append(keys, rkhKey)
|
||||||
|
|
||||||
|
templ := &genesis.Template{
|
||||||
|
NetworkVersion: network.Version0,
|
||||||
|
Accounts: genaccs,
|
||||||
|
Miners: genms,
|
||||||
|
NetworkName: "test",
|
||||||
|
Timestamp: uint64(time.Now().Unix()) - (build.BlockDelaySecs * 20000),
|
||||||
|
VerifregRootKey: vrk,
|
||||||
|
RemainderAccount: gen.DefaultRemainderAccountActor,
|
||||||
|
}
|
||||||
|
|
||||||
|
// END PRESEAL SECTION
|
||||||
|
|
||||||
|
for i := 0; i < len(fullOpts); i++ {
|
||||||
|
var genesis node.Option
|
||||||
|
if i == 0 {
|
||||||
|
genesis = node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&genbuf, *templ))
|
||||||
|
} else {
|
||||||
|
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genbuf.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
stop, err := node.New(ctx,
|
||||||
|
node.FullAPI(&fulls[i].FullNode, node.Lite(fullOpts[i].Lite)),
|
||||||
|
node.Online(),
|
||||||
|
node.Repo(repo.NewMemory(nil)),
|
||||||
|
node.MockHost(mn),
|
||||||
|
node.Test(),
|
||||||
|
|
||||||
|
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
||||||
|
node.Override(new(ffiwrapper.Prover), mock.MockProver),
|
||||||
|
|
||||||
|
// so that we subscribe to pubsub topics immediately
|
||||||
|
node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)),
|
||||||
|
|
||||||
|
genesis,
|
||||||
|
|
||||||
|
fullOpts[i].Opts(fulls),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Cleanup(func() { _ = stop(context.Background()) })
|
||||||
|
|
||||||
|
if rpc {
|
||||||
|
fulls[i] = fullRpc(t, fulls[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
fulls[i].Stb = storageBuilder(fulls[i], mn, node.Options(
|
||||||
|
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
|
||||||
|
return mock.NewMockSectorMgr(nil), nil
|
||||||
|
}),
|
||||||
|
|
||||||
|
node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))),
|
||||||
|
node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))),
|
||||||
|
node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))),
|
||||||
|
|
||||||
|
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
||||||
|
node.Override(new(ffiwrapper.Prover), mock.MockProver),
|
||||||
|
node.Unset(new(*sectorstorage.Manager)),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := fulls[0].FullNode.WalletImport(ctx, &rkhKey.KeyInfo); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, def := range storage {
|
||||||
|
// TODO: support non-bootstrap miners
|
||||||
|
|
||||||
|
minerID := abi.ActorID(genesis2.MinerStart + uint64(i))
|
||||||
|
|
||||||
|
if def.Full != 0 {
|
||||||
|
t.Fatal("storage nodes only supported on the first full node")
|
||||||
|
}
|
||||||
|
|
||||||
|
f := fulls[def.Full]
|
||||||
|
if _, err := f.FullNode.WalletImport(ctx, &keys[i].KeyInfo); err != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if err := f.FullNode.WalletSetDefault(ctx, keys[i].Address); err != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sectors := make([]abi.SectorID, len(genms[i].Sectors))
|
||||||
|
for i, sector := range genms[i].Sectors {
|
||||||
|
sectors[i] = abi.SectorID{
|
||||||
|
Miner: minerID,
|
||||||
|
Number: sector.SectorID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := def.Opts
|
||||||
|
if opts == nil {
|
||||||
|
opts = node.Options()
|
||||||
|
}
|
||||||
|
miners[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options(
|
||||||
|
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
|
||||||
|
return mock.NewMockSectorMgr(sectors), nil
|
||||||
|
}),
|
||||||
|
|
||||||
|
node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))),
|
||||||
|
node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))),
|
||||||
|
node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))),
|
||||||
|
|
||||||
|
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
||||||
|
node.Override(new(ffiwrapper.Prover), mock.MockProver),
|
||||||
|
node.Unset(new(*sectorstorage.Manager)),
|
||||||
|
opts,
|
||||||
|
))
|
||||||
|
|
||||||
|
if rpc {
|
||||||
|
miners[i] = storerRpc(t, miners[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mn.LinkAll(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bm := NewBlockMiner(t, miners[0])
|
||||||
|
|
||||||
|
if len(miners) > 0 {
|
||||||
|
// Mine 2 blocks to setup some CE stuff in some actors
|
||||||
|
var wait sync.Mutex
|
||||||
|
wait.Lock()
|
||||||
|
|
||||||
|
bm.MineUntilBlock(ctx, fulls[0], func(abi.ChainEpoch) {
|
||||||
|
wait.Unlock()
|
||||||
|
})
|
||||||
|
wait.Lock()
|
||||||
|
bm.MineUntilBlock(ctx, fulls[0], func(abi.ChainEpoch) {
|
||||||
|
wait.Unlock()
|
||||||
|
})
|
||||||
|
wait.Lock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return fulls, miners
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) {
|
||||||
|
testServ := httptest.NewServer(handler)
|
||||||
|
t.Cleanup(testServ.Close)
|
||||||
|
t.Cleanup(testServ.CloseClientConnections)
|
||||||
|
|
||||||
|
addr := testServ.Listener.Addr()
|
||||||
|
maddr, err := manet.FromNetAddr(addr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return testServ, maddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func fullRpc(t *testing.T, nd TestFullNode) TestFullNode {
|
||||||
|
handler, err := node.FullNodeHandler(nd.FullNode, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
srv, maddr := CreateRPCServer(t, handler)
|
||||||
|
|
||||||
|
var ret TestFullNode
|
||||||
|
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(stop)
|
||||||
|
ret.ListenAddr, ret.FullNode = maddr, cl
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func storerRpc(t *testing.T, nd TestMiner) TestMiner {
|
||||||
|
handler, err := node.MinerHandler(nd.StorageMiner, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
srv, maddr := CreateRPCServer(t, handler)
|
||||||
|
|
||||||
|
var ret TestMiner
|
||||||
|
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(stop)
|
||||||
|
|
||||||
|
ret.ListenAddr, ret.StorageMiner, ret.MineOne = maddr, cl, nd.MineOne
|
||||||
|
return ret
|
||||||
|
}
|
153
itests/kit/nodes.go
Normal file
153
itests/kit/nodes.go
Normal file
@ -0,0 +1,153 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
|
"github.com/filecoin-project/lotus/miner"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MinerBuilder func(context.Context, *testing.T, abi.RegisteredSealProof, address.Address) TestMiner
|
||||||
|
|
||||||
|
type TestFullNode struct {
|
||||||
|
v1api.FullNode
|
||||||
|
// ListenAddr is the address on which an API server is listening, if an
|
||||||
|
// API server is created for this Node
|
||||||
|
ListenAddr multiaddr.Multiaddr
|
||||||
|
|
||||||
|
Stb MinerBuilder
|
||||||
|
}
|
||||||
|
|
||||||
|
type TestMiner struct {
|
||||||
|
lapi.StorageMiner
|
||||||
|
// ListenAddr is the address on which an API server is listening, if an
|
||||||
|
// API server is created for this Node
|
||||||
|
ListenAddr multiaddr.Multiaddr
|
||||||
|
|
||||||
|
MineOne func(context.Context, miner.MineReq) error
|
||||||
|
Stop func(context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
var PresealGenesis = -1
|
||||||
|
|
||||||
|
const GenesisPreseals = 2
|
||||||
|
|
||||||
|
const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||||
|
|
||||||
|
// Options for setting up a mock storage Miner
|
||||||
|
type StorageMiner struct {
|
||||||
|
Full int
|
||||||
|
Opts node.Option
|
||||||
|
Preseal int
|
||||||
|
}
|
||||||
|
|
||||||
|
type OptionGenerator func([]TestFullNode) node.Option
|
||||||
|
|
||||||
|
// Options for setting up a mock full node
|
||||||
|
type FullNodeOpts struct {
|
||||||
|
Lite bool // run node in "lite" mode
|
||||||
|
Opts OptionGenerator // generate dependency injection options
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIBuilder is a function which is invoked in test suite to provide
|
||||||
|
// test nodes and networks
|
||||||
|
//
|
||||||
|
// fullOpts array defines options for each full node
|
||||||
|
// storage array defines storage nodes, numbers in the array specify full node
|
||||||
|
// index the storage node 'belongs' to
|
||||||
|
type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner)
|
||||||
|
|
||||||
|
func DefaultFullOpts(nFull int) []FullNodeOpts {
|
||||||
|
full := make([]FullNodeOpts, nFull)
|
||||||
|
for i := range full {
|
||||||
|
full[i] = FullNodeOpts{
|
||||||
|
Opts: func(nodes []TestFullNode) node.Option {
|
||||||
|
return node.Options()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return full
|
||||||
|
}
|
||||||
|
|
||||||
|
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
||||||
|
var OneFull = DefaultFullOpts(1)
|
||||||
|
var TwoFull = DefaultFullOpts(2)
|
||||||
|
|
||||||
|
var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||||
|
// Attention: Update this when introducing new actor versions or your tests will be sad
|
||||||
|
return FullNodeWithNetworkUpgradeAt(network.Version13, upgradeHeight)
|
||||||
|
}
|
||||||
|
|
||||||
|
var FullNodeWithNetworkUpgradeAt = func(version network.Version, upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||||
|
fullSchedule := stmgr.UpgradeSchedule{{
|
||||||
|
// prepare for upgrade.
|
||||||
|
Network: network.Version9,
|
||||||
|
Height: 1,
|
||||||
|
Migration: stmgr.UpgradeActorsV2,
|
||||||
|
}, {
|
||||||
|
Network: network.Version10,
|
||||||
|
Height: 2,
|
||||||
|
Migration: stmgr.UpgradeActorsV3,
|
||||||
|
}, {
|
||||||
|
Network: network.Version12,
|
||||||
|
Height: 3,
|
||||||
|
Migration: stmgr.UpgradeActorsV4,
|
||||||
|
}, {
|
||||||
|
Network: network.Version13,
|
||||||
|
Height: 4,
|
||||||
|
Migration: stmgr.UpgradeActorsV5,
|
||||||
|
}}
|
||||||
|
|
||||||
|
schedule := stmgr.UpgradeSchedule{}
|
||||||
|
for _, upgrade := range fullSchedule {
|
||||||
|
if upgrade.Network > version {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
schedule = append(schedule, upgrade)
|
||||||
|
}
|
||||||
|
|
||||||
|
if upgradeHeight > 0 {
|
||||||
|
schedule[len(schedule)-1].Height = upgradeHeight
|
||||||
|
}
|
||||||
|
|
||||||
|
return FullNodeOpts{
|
||||||
|
Opts: func(nodes []TestFullNode) node.Option {
|
||||||
|
return node.Override(new(stmgr.UpgradeSchedule), schedule)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
|
||||||
|
return FullNodeOpts{
|
||||||
|
Opts: func(nodes []TestFullNode) node.Option {
|
||||||
|
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||||
|
Network: network.Version6,
|
||||||
|
Height: 1,
|
||||||
|
Migration: stmgr.UpgradeActorsV2,
|
||||||
|
}, {
|
||||||
|
Network: network.Version7,
|
||||||
|
Height: calico,
|
||||||
|
Migration: stmgr.UpgradeCalico,
|
||||||
|
}, {
|
||||||
|
Network: network.Version8,
|
||||||
|
Height: persian,
|
||||||
|
}})
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var MineNext = miner.MineReq{
|
||||||
|
InjectNulls: 0,
|
||||||
|
Done: func(bool, abi.ChainEpoch, error) {},
|
||||||
|
}
|
88
itests/kit/pledge.go
Normal file
88
itests/kit/pledge.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func PledgeSectors(t *testing.T, ctx context.Context, miner TestMiner, n, existing int, blockNotif <-chan struct{}) { //nolint:golint
|
||||||
|
toCheck := StartPledge(t, ctx, miner, n, existing, blockNotif)
|
||||||
|
|
||||||
|
for len(toCheck) > 0 {
|
||||||
|
flushSealingBatches(t, ctx, miner)
|
||||||
|
|
||||||
|
states := map[api.SectorState]int{}
|
||||||
|
for n := range toCheck {
|
||||||
|
st, err := miner.SectorsStatus(ctx, n, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
states[st.State]++
|
||||||
|
if st.State == api.SectorState(sealing.Proving) {
|
||||||
|
delete(toCheck, n)
|
||||||
|
}
|
||||||
|
if strings.Contains(string(st.State), "Fail") {
|
||||||
|
t.Fatal("sector in a failed state", st.State)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
build.Clock.Sleep(100 * time.Millisecond)
|
||||||
|
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func flushSealingBatches(t *testing.T, ctx context.Context, miner TestMiner) { //nolint:golint
|
||||||
|
pcb, err := miner.SectorPreCommitFlush(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if pcb != nil {
|
||||||
|
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
|
||||||
|
}
|
||||||
|
|
||||||
|
cb, err := miner.SectorCommitFlush(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if cb != nil {
|
||||||
|
fmt.Printf("COMMIT BATCH: %+v\n", cb)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func StartPledge(t *testing.T, ctx context.Context, miner TestMiner, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { //nolint:golint
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
if i%3 == 0 && blockNotif != nil {
|
||||||
|
<-blockNotif
|
||||||
|
t.Log("WAIT")
|
||||||
|
}
|
||||||
|
t.Logf("PLEDGING %d", i)
|
||||||
|
_, err := miner.PledgeSector(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
|
||||||
|
require.NoError(t, err)
|
||||||
|
fmt.Printf("Sectors: %d\n", len(s))
|
||||||
|
if len(s) >= n+existing {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
build.Clock.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("All sectors is fsm\n")
|
||||||
|
|
||||||
|
s, err := miner.SectorsList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
toCheck := map[abi.SectorNumber]struct{}{}
|
||||||
|
for _, number := range s {
|
||||||
|
toCheck[number] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return toCheck
|
||||||
|
}
|
124
itests/kit2/blockminer.go
Normal file
124
itests/kit2/blockminer.go
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/miner"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlockMiner is a utility that makes a test miner Mine blocks on a timer.
|
||||||
|
type BlockMiner struct {
|
||||||
|
t *testing.T
|
||||||
|
miner *TestMiner
|
||||||
|
|
||||||
|
nextNulls int64
|
||||||
|
wg sync.WaitGroup
|
||||||
|
cancel context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
|
||||||
|
return &BlockMiner{
|
||||||
|
t: t,
|
||||||
|
miner: miner,
|
||||||
|
cancel: func() {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
// wrap context in a cancellable context.
|
||||||
|
ctx, bm.cancel = context.WithCancel(ctx)
|
||||||
|
|
||||||
|
bm.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer bm.wg.Done()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-time.After(blocktime):
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
nulls := atomic.SwapInt64(&bm.nextNulls, 0)
|
||||||
|
err := bm.miner.MineOne(ctx, miner.MineReq{
|
||||||
|
InjectNulls: abi.ChainEpoch(nulls),
|
||||||
|
Done: func(bool, abi.ChainEpoch, error) {},
|
||||||
|
})
|
||||||
|
switch {
|
||||||
|
case err == nil: // wrap around
|
||||||
|
case ctx.Err() != nil: // context fired.
|
||||||
|
return
|
||||||
|
default: // log error
|
||||||
|
bm.t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// InjectNulls injects the specified amount of null rounds in the next
|
||||||
|
// mining rounds.
|
||||||
|
func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) {
|
||||||
|
atomic.AddInt64(&bm.nextNulls, int64(rounds))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb func(abi.ChainEpoch)) {
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
var (
|
||||||
|
success bool
|
||||||
|
err error
|
||||||
|
epoch abi.ChainEpoch
|
||||||
|
wait = make(chan struct{})
|
||||||
|
)
|
||||||
|
|
||||||
|
doneFn := func(win bool, ep abi.ChainEpoch, e error) {
|
||||||
|
success = win
|
||||||
|
err = e
|
||||||
|
epoch = ep
|
||||||
|
wait <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
mineErr := bm.miner.MineOne(ctx, miner.MineReq{Done: doneFn})
|
||||||
|
require.NoError(bm.t, mineErr)
|
||||||
|
<-wait
|
||||||
|
|
||||||
|
require.NoError(bm.t, err)
|
||||||
|
|
||||||
|
if success {
|
||||||
|
// Wait until it shows up on the given full nodes ChainHead
|
||||||
|
nloops := 200
|
||||||
|
for i := 0; i < nloops; i++ {
|
||||||
|
ts, err := fn.ChainHead(ctx)
|
||||||
|
require.NoError(bm.t, err)
|
||||||
|
|
||||||
|
if ts.Height() == epoch {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node")
|
||||||
|
time.Sleep(time.Millisecond * 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cb != nil {
|
||||||
|
cb(epoch)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bm.t.Log("did not Mine block, trying again", i)
|
||||||
|
}
|
||||||
|
bm.t.Fatal("failed to Mine 1000 times in a row...")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the block miner.
|
||||||
|
func (bm *BlockMiner) Stop() {
|
||||||
|
bm.t.Log("shutting down mining")
|
||||||
|
bm.cancel()
|
||||||
|
bm.wg.Wait()
|
||||||
|
}
|
107
itests/kit2/client.go
Normal file
107
itests/kit2/client.go
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
lcli "github.com/urfave/cli/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RunClientTest exercises some of the Client CLI commands
|
||||||
|
func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Create mock CLI
|
||||||
|
mockCLI := NewMockCLI(ctx, t, cmds)
|
||||||
|
clientCLI := mockCLI.Client(clientNode.ListenAddr)
|
||||||
|
|
||||||
|
// Get the Miner address
|
||||||
|
addrs, err := clientNode.StateListMiners(ctx, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, addrs, 1)
|
||||||
|
|
||||||
|
minerAddr := addrs[0]
|
||||||
|
fmt.Println("Miner:", minerAddr)
|
||||||
|
|
||||||
|
// client query-ask <Miner addr>
|
||||||
|
out := clientCLI.RunCmd("client", "query-ask", minerAddr.String())
|
||||||
|
require.Regexp(t, regexp.MustCompile("Ask:"), out)
|
||||||
|
|
||||||
|
// Create a deal (non-interactive)
|
||||||
|
// client deal --start-epoch=<start epoch> <cid> <Miner addr> 1000000attofil <duration>
|
||||||
|
res, _ := clientNode.CreateImportFile(ctx, 1, 0)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
|
||||||
|
dataCid := res.Root
|
||||||
|
price := "1000000attofil"
|
||||||
|
duration := fmt.Sprintf("%d", build.MinDealDuration)
|
||||||
|
out = clientCLI.RunCmd("client", "deal", startEpoch, dataCid.String(), minerAddr.String(), price, duration)
|
||||||
|
fmt.Println("client deal", out)
|
||||||
|
|
||||||
|
// Create a deal (interactive)
|
||||||
|
// client deal
|
||||||
|
// <cid>
|
||||||
|
// <duration> (in days)
|
||||||
|
// <miner addr>
|
||||||
|
// "no" (verified Client)
|
||||||
|
// "yes" (confirm deal)
|
||||||
|
res, _ = clientNode.CreateImportFile(ctx, 2, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
dataCid2 := res.Root
|
||||||
|
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
|
||||||
|
cmd := []string{"client", "deal"}
|
||||||
|
interactiveCmds := []string{
|
||||||
|
dataCid2.String(),
|
||||||
|
duration,
|
||||||
|
minerAddr.String(),
|
||||||
|
"no",
|
||||||
|
"yes",
|
||||||
|
}
|
||||||
|
out = clientCLI.RunInteractiveCmd(cmd, interactiveCmds)
|
||||||
|
fmt.Println("client deal:\n", out)
|
||||||
|
|
||||||
|
// Wait for provider to start sealing deal
|
||||||
|
dealStatus := ""
|
||||||
|
for {
|
||||||
|
// client list-deals
|
||||||
|
out = clientCLI.RunCmd("client", "list-deals")
|
||||||
|
fmt.Println("list-deals:\n", out)
|
||||||
|
|
||||||
|
lines := strings.Split(out, "\n")
|
||||||
|
require.GreaterOrEqual(t, len(lines), 2)
|
||||||
|
re := regexp.MustCompile(`\s+`)
|
||||||
|
parts := re.Split(lines[1], -1)
|
||||||
|
if len(parts) < 4 {
|
||||||
|
require.Fail(t, "bad list-deals output format")
|
||||||
|
}
|
||||||
|
dealStatus = parts[3]
|
||||||
|
fmt.Println(" Deal status:", dealStatus)
|
||||||
|
|
||||||
|
st := CategorizeDealState(dealStatus)
|
||||||
|
require.NotEqual(t, TestDealStateFailed, st)
|
||||||
|
if st == TestDealStateComplete {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve the first file from the Miner
|
||||||
|
// client retrieve <cid> <file path>
|
||||||
|
tmpdir := t.TempDir()
|
||||||
|
path := filepath.Join(tmpdir, "outfile.dat")
|
||||||
|
out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path)
|
||||||
|
fmt.Println("retrieve:\n", out)
|
||||||
|
require.Regexp(t, regexp.MustCompile("Success"), out)
|
||||||
|
}
|
245
itests/kit2/deals.go
Normal file
245
itests/kit2/deals.go
Normal file
@ -0,0 +1,245 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io/ioutil"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
files "github.com/ipfs/go-ipfs-files"
|
||||||
|
"github.com/ipld/go-car"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
|
ipld "github.com/ipfs/go-ipld-format"
|
||||||
|
dag "github.com/ipfs/go-merkledag"
|
||||||
|
dstest "github.com/ipfs/go-merkledag/test"
|
||||||
|
unixfile "github.com/ipfs/go-unixfs/file"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DealHarness struct {
|
||||||
|
t *testing.T
|
||||||
|
client *TestFullNode
|
||||||
|
miner *TestMiner
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDealHarness creates a test harness that contains testing utilities for deals.
|
||||||
|
func NewDealHarness(t *testing.T, client *TestFullNode, miner *TestMiner) *DealHarness {
|
||||||
|
return &DealHarness{
|
||||||
|
t: t,
|
||||||
|
client: client,
|
||||||
|
miner: miner,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeOnlineDeal makes an online deal, generating a random file with the
|
||||||
|
// supplied seed, and setting the specified fast retrieval flag and start epoch
|
||||||
|
// on the storage deal. It returns when the deal is sealed.
|
||||||
|
//
|
||||||
|
// TODO: convert input parameters to struct, and add size as an input param.
|
||||||
|
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, rseed int, fastRet bool, startEpoch abi.ChainEpoch) (deal *cid.Cid, res *api.ImportRes, path string) {
|
||||||
|
res, path = dh.client.CreateImportFile(ctx, rseed, 0)
|
||||||
|
|
||||||
|
dh.t.Logf("FILE CID: %s", res.Root)
|
||||||
|
|
||||||
|
deal = dh.StartDeal(ctx, res.Root, fastRet, startEpoch)
|
||||||
|
|
||||||
|
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||||
|
|
||||||
|
return deal, res, path
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartDeal starts a storage deal between the client and the miner.
|
||||||
|
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
||||||
|
maddr, err := dh.miner.ActorAddress(ctx)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
addr, err := dh.client.WalletDefaultAddress(ctx)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{
|
||||||
|
Data: &storagemarket.DataRef{
|
||||||
|
TransferType: storagemarket.TTGraphsync,
|
||||||
|
Root: fcid,
|
||||||
|
},
|
||||||
|
Wallet: addr,
|
||||||
|
Miner: maddr,
|
||||||
|
EpochPrice: types.NewInt(1000000),
|
||||||
|
DealStartEpoch: startEpoch,
|
||||||
|
MinBlocksDuration: uint64(build.MinDealDuration),
|
||||||
|
FastRetrieval: fastRet,
|
||||||
|
})
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
return deal
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitDealSealed waits until the deal is sealed.
|
||||||
|
func (dh *DealHarness) WaitDealSealed(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
di, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
switch di.State {
|
||||||
|
case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
|
||||||
|
if noseal {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !noSealStart {
|
||||||
|
dh.StartSealingWaiting(ctx)
|
||||||
|
}
|
||||||
|
case storagemarket.StorageDealProposalRejected:
|
||||||
|
dh.t.Fatal("deal rejected")
|
||||||
|
case storagemarket.StorageDealFailing:
|
||||||
|
dh.t.Fatal("deal failed")
|
||||||
|
case storagemarket.StorageDealError:
|
||||||
|
dh.t.Fatal("deal errored", di.Message)
|
||||||
|
case storagemarket.StorageDealActive:
|
||||||
|
dh.t.Log("COMPLETE", di)
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
|
||||||
|
mds, err := dh.miner.MarketListIncompleteDeals(ctx)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
var minerState storagemarket.StorageDealStatus
|
||||||
|
for _, md := range mds {
|
||||||
|
if md.DealID == di.DealID {
|
||||||
|
minerState = md.State
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dh.t.Logf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
|
||||||
|
time.Sleep(time.Second / 2)
|
||||||
|
if cb != nil {
|
||||||
|
cb()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitDealSealed waits until the deal is published.
|
||||||
|
func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
||||||
|
subCtx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
updates, err := dh.miner.MarketGetDealUpdates(subCtx)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
dh.t.Fatal("context timeout")
|
||||||
|
case di := <-updates:
|
||||||
|
if deal.Equals(di.ProposalCid) {
|
||||||
|
switch di.State {
|
||||||
|
case storagemarket.StorageDealProposalRejected:
|
||||||
|
dh.t.Fatal("deal rejected")
|
||||||
|
case storagemarket.StorageDealFailing:
|
||||||
|
dh.t.Fatal("deal failed")
|
||||||
|
case storagemarket.StorageDealError:
|
||||||
|
dh.t.Fatal("deal errored", di.Message)
|
||||||
|
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
||||||
|
dh.t.Log("COMPLETE", di)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dh.t.Log("Deal state: ", storagemarket.DealStates[di.State])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
|
||||||
|
snums, err := dh.miner.SectorsList(ctx)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
for _, snum := range snums {
|
||||||
|
si, err := dh.miner.SectorsStatus(ctx, snum, false)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
dh.t.Logf("Sector state: %s", si.State)
|
||||||
|
if si.State == api.SectorState(sealing.WaitDeals) {
|
||||||
|
require.NoError(dh.t, dh.miner.SectorStartSealing(ctx, snum))
|
||||||
|
}
|
||||||
|
|
||||||
|
dh.miner.FlushSealingBatches(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool) (path string) {
|
||||||
|
// perform retrieval.
|
||||||
|
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
offers, err := dh.client.ClientFindData(ctx, root, &info.PieceCID)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
require.NotEmpty(dh.t, offers, "no offers")
|
||||||
|
|
||||||
|
tmpfile, err := ioutil.TempFile(dh.t.TempDir(), "ret-car")
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
defer tmpfile.Close()
|
||||||
|
|
||||||
|
caddr, err := dh.client.WalletDefaultAddress(ctx)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
ref := &api.FileRef{
|
||||||
|
Path: tmpfile.Name(),
|
||||||
|
IsCAR: carExport,
|
||||||
|
}
|
||||||
|
|
||||||
|
updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
for update := range updates {
|
||||||
|
require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rdata, err := ioutil.ReadFile(tmpfile.Name())
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
if carExport {
|
||||||
|
rdata = dh.ExtractFileFromCAR(ctx, rdata)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tmpfile.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, rdata []byte) []byte {
|
||||||
|
bserv := dstest.Bserv()
|
||||||
|
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
b, err := bserv.GetBlock(ctx, ch.Roots[0])
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
nd, err := ipld.Decode(b)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
dserv := dag.NewDAGService(bserv)
|
||||||
|
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
tmpfile, err := ioutil.TempFile(dh.t.TempDir(), "file-in-car")
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
defer tmpfile.Close()
|
||||||
|
|
||||||
|
err = files.WriteTo(fil, tmpfile.Name())
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
rdata, err = ioutil.ReadFile(tmpfile.Name())
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
return rdata
|
||||||
|
}
|
21
itests/kit2/deals_state.go
Normal file
21
itests/kit2/deals_state.go
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
type TestDealState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
TestDealStateFailed = TestDealState(-1)
|
||||||
|
TestDealStateInProgress = TestDealState(0)
|
||||||
|
TestDealStateComplete = TestDealState(1)
|
||||||
|
)
|
||||||
|
|
||||||
|
// CategorizeDealState categorizes deal states into one of three states:
|
||||||
|
// Complete, InProgress, Failed.
|
||||||
|
func CategorizeDealState(dealStatus string) TestDealState {
|
||||||
|
switch dealStatus {
|
||||||
|
case "StorageDealFailing", "StorageDealError":
|
||||||
|
return TestDealStateFailed
|
||||||
|
case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed":
|
||||||
|
return TestDealStateComplete
|
||||||
|
}
|
||||||
|
return TestDealStateInProgress
|
||||||
|
}
|
@ -1,12 +1,10 @@
|
|||||||
package kit
|
package kit2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -15,10 +13,8 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
"github.com/filecoin-project/go-storedcounter"
|
"github.com/filecoin-project/go-storedcounter"
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/client"
|
|
||||||
"github.com/filecoin-project/lotus/api/v1api"
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain"
|
"github.com/filecoin-project/lotus/chain"
|
||||||
@ -28,7 +24,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
||||||
@ -49,8 +44,6 @@ import (
|
|||||||
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
|
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
||||||
"github.com/multiformats/go-multiaddr"
|
|
||||||
manet "github.com/multiformats/go-multiaddr/net"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -61,33 +54,51 @@ func init() {
|
|||||||
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
|
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
|
||||||
}
|
}
|
||||||
|
|
||||||
type BuilderOpt func(opts *BuilderOpts) error
|
// Ensemble is a collection of nodes instantiated within a test.
|
||||||
|
//
|
||||||
type BuilderOpts struct {
|
// Create a new ensemble with:
|
||||||
pastOffset time.Duration
|
//
|
||||||
spt abi.RegisteredSealProof
|
// ens := kit.NewEnsemble()
|
||||||
}
|
//
|
||||||
|
// Create full nodes and miners:
|
||||||
var DefaultBuilderOpts = BuilderOpts{
|
//
|
||||||
pastOffset: 10000 * time.Second,
|
// var full TestFullNode
|
||||||
spt: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
// var miner TestMiner
|
||||||
}
|
// ens.FullNode(&full, opts...) // populates a full node
|
||||||
|
// ens.Miner(&miner, &full, opts...) // populates a miner, using the full node as its chain daemon
|
||||||
func ProofType(proofType abi.RegisteredSealProof) BuilderOpt {
|
//
|
||||||
return func(opts *BuilderOpts) error {
|
// It is possible to pass functional options to set initial balances,
|
||||||
opts.spt = proofType
|
// presealed sectors, owner keys, etc.
|
||||||
return nil
|
//
|
||||||
}
|
// After the initial nodes are added, call `ens.Start()` to forge genesis
|
||||||
}
|
// and start the network. Mining will NOT be started automatically. It needs
|
||||||
|
// to be started explicitly by calling `BeginMining`.
|
||||||
// Ensemble is a collection of nodes instantiated within a test. Ensemble
|
//
|
||||||
// supports building full nodes and miners.
|
// Nodes also need to be connected with one another, either via `ens.Connect()`
|
||||||
|
// or `ens.InterconnectAll()`. A common inchantation for simple tests is to do:
|
||||||
|
//
|
||||||
|
// ens.InterconnectAll().BeginMining(blocktime)
|
||||||
|
//
|
||||||
|
// You can continue to add more nodes, but you must always follow with
|
||||||
|
// `ens.Start()` to activate the new nodes.
|
||||||
|
//
|
||||||
|
// The API is chainable, so it's possible to do a lot in a very succinct way:
|
||||||
|
//
|
||||||
|
// kit.NewEnsemble().FullNode(&full).Miner(&miner, &full).Start().InterconnectAll().BeginMining()
|
||||||
|
//
|
||||||
|
// You can also find convenient fullnode:miner presets, such as 1:1, 1:2,
|
||||||
|
// and 2:1, e.g.:
|
||||||
|
//
|
||||||
|
// kit.EnsembleMinimal()
|
||||||
|
// kit.EnsembleOneTwo()
|
||||||
|
// kit.EnsembleTwoOne()
|
||||||
|
//
|
||||||
type Ensemble struct {
|
type Ensemble struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
bootstrapped bool
|
bootstrapped bool
|
||||||
genesisBlock bytes.Buffer
|
genesisBlock bytes.Buffer
|
||||||
mn mocknet.Mocknet
|
mn mocknet.Mocknet
|
||||||
options *BuilderOpts
|
options *ensembleOpts
|
||||||
|
|
||||||
inactive struct {
|
inactive struct {
|
||||||
fullnodes []*TestFullNode
|
fullnodes []*TestFullNode
|
||||||
@ -103,9 +114,10 @@ type Ensemble struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEnsemble
|
// NewEnsemble instantiates a new blank Ensemble. This enables you to
|
||||||
func NewEnsemble(t *testing.T, opts ...BuilderOpt) *Ensemble {
|
// programmatically
|
||||||
options := DefaultBuilderOpts
|
func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble {
|
||||||
|
options := DefaultEnsembleOpts
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
err := o(&options)
|
err := o(&options)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -113,85 +125,6 @@ func NewEnsemble(t *testing.T, opts ...BuilderOpt) *Ensemble {
|
|||||||
return &Ensemble{t: t, options: &options}
|
return &Ensemble{t: t, options: &options}
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodeOpts struct {
|
|
||||||
balance abi.TokenAmount
|
|
||||||
lite bool
|
|
||||||
sectors int
|
|
||||||
mockProofs bool
|
|
||||||
rpc bool
|
|
||||||
ownerKey *wallet.Key
|
|
||||||
extraNodeOpts []node.Option
|
|
||||||
}
|
|
||||||
|
|
||||||
const DefaultPresealsPerBootstrapMiner = 2
|
|
||||||
|
|
||||||
var DefaultNodeOpts = NodeOpts{
|
|
||||||
balance: big.Mul(big.NewInt(100000000), types.NewInt(build.FilecoinPrecision)),
|
|
||||||
sectors: DefaultPresealsPerBootstrapMiner,
|
|
||||||
}
|
|
||||||
|
|
||||||
type NodeOpt func(opts *NodeOpts) error
|
|
||||||
|
|
||||||
// OwnerBalance specifies the balance to be attributed to a miner's owner account.
|
|
||||||
//
|
|
||||||
// Only used when creating a miner.
|
|
||||||
func OwnerBalance(balance abi.TokenAmount) NodeOpt {
|
|
||||||
return func(opts *NodeOpts) error {
|
|
||||||
opts.balance = balance
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LiteNode specifies that this node will be a lite node.
|
|
||||||
//
|
|
||||||
// Only used when creating a fullnode.
|
|
||||||
func LiteNode() NodeOpt {
|
|
||||||
return func(opts *NodeOpts) error {
|
|
||||||
opts.lite = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PresealSectors specifies the amount of preseal sectors to give to a miner
|
|
||||||
// at genesis.
|
|
||||||
//
|
|
||||||
// Only used when creating a miner.
|
|
||||||
func PresealSectors(sectors int) NodeOpt {
|
|
||||||
return func(opts *NodeOpts) error {
|
|
||||||
opts.sectors = sectors
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MockProofs activates mock proofs for the entire ensemble.
|
|
||||||
func MockProofs() NodeOpt {
|
|
||||||
return func(opts *NodeOpts) error {
|
|
||||||
opts.mockProofs = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ThroughRPC() NodeOpt {
|
|
||||||
return func(opts *NodeOpts) error {
|
|
||||||
opts.rpc = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func OwnerAddr(wk *wallet.Key) NodeOpt {
|
|
||||||
return func(opts *NodeOpts) error {
|
|
||||||
opts.ownerKey = wk
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExtraNodeOpts(extra ...node.Option) NodeOpt {
|
|
||||||
return func(opts *NodeOpts) error {
|
|
||||||
opts.extraNodeOpts = extra
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FullNode enrolls a new full node.
|
// FullNode enrolls a new full node.
|
||||||
func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||||
options := DefaultNodeOpts
|
options := DefaultNodeOpts
|
||||||
@ -256,7 +189,7 @@ func (n *Ensemble) Miner(miner *TestMiner, full *TestFullNode, opts ...NodeOpt)
|
|||||||
)
|
)
|
||||||
|
|
||||||
// create the preseal commitment.
|
// create the preseal commitment.
|
||||||
if options.mockProofs {
|
if n.options.mockProofs {
|
||||||
genm, k, err = mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, actorAddr, sectors)
|
genm, k, err = mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, actorAddr, sectors)
|
||||||
} else {
|
} else {
|
||||||
genm, k, err = seed.PreSeal(actorAddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, sectors, tdir, []byte("make genesis mem random"), nil, true)
|
genm, k, err = seed.PreSeal(actorAddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, sectors, tdir, []byte("make genesis mem random"), nil, true)
|
||||||
@ -339,7 +272,7 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Are we mocking proofs?
|
// Are we mocking proofs?
|
||||||
if full.options.mockProofs {
|
if n.options.mockProofs {
|
||||||
opts = append(opts,
|
opts = append(opts,
|
||||||
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
||||||
node.Override(new(ffiwrapper.Prover), mock.MockProver),
|
node.Override(new(ffiwrapper.Prover), mock.MockProver),
|
||||||
@ -389,7 +322,7 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
params, aerr := actors.SerializeParams(&power2.CreateMinerParams{
|
params, aerr := actors.SerializeParams(&power2.CreateMinerParams{
|
||||||
Owner: m.OwnerKey.Address,
|
Owner: m.OwnerKey.Address,
|
||||||
Worker: m.OwnerKey.Address,
|
Worker: m.OwnerKey.Address,
|
||||||
SealProofType: n.options.spt,
|
SealProofType: n.options.proofType,
|
||||||
Peer: abi.PeerID(m.Libp2p.PeerID),
|
Peer: abi.PeerID(m.Libp2p.PeerID),
|
||||||
})
|
})
|
||||||
require.NoError(n.t, aerr)
|
require.NoError(n.t, aerr)
|
||||||
@ -512,7 +445,7 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.options.mockProofs {
|
if n.options.mockProofs {
|
||||||
opts = append(opts,
|
opts = append(opts,
|
||||||
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
|
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
|
||||||
return mock.NewMockSectorMgr(presealSectors), nil
|
return mock.NewMockSectorMgr(presealSectors), nil
|
||||||
@ -532,7 +465,7 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
// using real proofs, therefore need real sectors.
|
// using real proofs, therefore need real sectors.
|
||||||
if !n.bootstrapped && !m.options.mockProofs {
|
if !n.bootstrapped && !n.options.mockProofs {
|
||||||
err := m.StorageAddLocal(ctx, m.PresealDir)
|
err := m.StorageAddLocal(ctx, m.PresealDir)
|
||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
}
|
}
|
||||||
@ -667,99 +600,3 @@ func (n *Ensemble) generateGenesis() *genesis.Template {
|
|||||||
|
|
||||||
return templ
|
return templ
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) {
|
|
||||||
testServ := httptest.NewServer(handler)
|
|
||||||
t.Cleanup(testServ.Close)
|
|
||||||
t.Cleanup(testServ.CloseClientConnections)
|
|
||||||
|
|
||||||
addr := testServ.Listener.Addr()
|
|
||||||
maddr, err := manet.FromNetAddr(addr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return testServ, maddr
|
|
||||||
}
|
|
||||||
|
|
||||||
func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode {
|
|
||||||
handler, err := node.FullNodeHandler(f.FullNode, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
srv, maddr := CreateRPCServer(t, handler)
|
|
||||||
|
|
||||||
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Cleanup(stop)
|
|
||||||
f.ListenAddr, f.FullNode = maddr, cl
|
|
||||||
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
func minerRpc(t *testing.T, m *TestMiner) *TestMiner {
|
|
||||||
handler, err := node.MinerHandler(m.StorageMiner, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
srv, maddr := CreateRPCServer(t, handler)
|
|
||||||
|
|
||||||
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Cleanup(stop)
|
|
||||||
|
|
||||||
m.ListenAddr, m.StorageMiner = maddr, cl
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func LatestActorsAt(upgradeHeight abi.ChainEpoch) node.Option {
|
|
||||||
// Attention: Update this when introducing new actor versions or your tests will be sad
|
|
||||||
return NetworkUpgradeAt(network.Version13, upgradeHeight)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NetworkUpgradeAt(version network.Version, upgradeHeight abi.ChainEpoch) node.Option {
|
|
||||||
fullSchedule := stmgr.UpgradeSchedule{{
|
|
||||||
// prepare for upgrade.
|
|
||||||
Network: network.Version9,
|
|
||||||
Height: 1,
|
|
||||||
Migration: stmgr.UpgradeActorsV2,
|
|
||||||
}, {
|
|
||||||
Network: network.Version10,
|
|
||||||
Height: 2,
|
|
||||||
Migration: stmgr.UpgradeActorsV3,
|
|
||||||
}, {
|
|
||||||
Network: network.Version12,
|
|
||||||
Height: 3,
|
|
||||||
Migration: stmgr.UpgradeActorsV4,
|
|
||||||
}, {
|
|
||||||
Network: network.Version13,
|
|
||||||
Height: 4,
|
|
||||||
Migration: stmgr.UpgradeActorsV5,
|
|
||||||
}}
|
|
||||||
|
|
||||||
schedule := stmgr.UpgradeSchedule{}
|
|
||||||
for _, upgrade := range fullSchedule {
|
|
||||||
if upgrade.Network > version {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
schedule = append(schedule, upgrade)
|
|
||||||
}
|
|
||||||
|
|
||||||
if upgradeHeight > 0 {
|
|
||||||
schedule[len(schedule)-1].Height = upgradeHeight
|
|
||||||
}
|
|
||||||
|
|
||||||
return node.Override(new(stmgr.UpgradeSchedule), schedule)
|
|
||||||
}
|
|
||||||
|
|
||||||
func SDRUpgradeAt(calico, persian abi.ChainEpoch) node.Option {
|
|
||||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
|
||||||
Network: network.Version6,
|
|
||||||
Height: 1,
|
|
||||||
Migration: stmgr.UpgradeActorsV2,
|
|
||||||
}, {
|
|
||||||
Network: network.Version7,
|
|
||||||
Height: calico,
|
|
||||||
Migration: stmgr.UpgradeCalico,
|
|
||||||
}, {
|
|
||||||
Network: network.Version8,
|
|
||||||
Height: persian,
|
|
||||||
}})
|
|
||||||
|
|
||||||
}
|
|
35
itests/kit2/ensemble_opts.go
Normal file
35
itests/kit2/ensemble_opts.go
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EnsembleOpt func(opts *ensembleOpts) error
|
||||||
|
|
||||||
|
type ensembleOpts struct {
|
||||||
|
pastOffset time.Duration
|
||||||
|
proofType abi.RegisteredSealProof
|
||||||
|
mockProofs bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultEnsembleOpts = ensembleOpts{
|
||||||
|
pastOffset: 10000 * time.Second,
|
||||||
|
proofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
|
||||||
|
}
|
||||||
|
|
||||||
|
func ProofType(proofType abi.RegisteredSealProof) EnsembleOpt {
|
||||||
|
return func(opts *ensembleOpts) error {
|
||||||
|
opts.proofType = proofType
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockProofs activates mock proofs for the entire ensemble.
|
||||||
|
func MockProofs() EnsembleOpt {
|
||||||
|
return func(opts *ensembleOpts) error {
|
||||||
|
opts.mockProofs = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
70
itests/kit2/ensemble_presets.go
Normal file
70
itests/kit2/ensemble_presets.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
// EnsembleMinimal creates and starts an Ensemble with a single full node and a single miner.
|
||||||
|
// It does not interconnect nodes nor does it begin mining.
|
||||||
|
//
|
||||||
|
// This function supports passing both ensemble and node functional options.
|
||||||
|
// Functional options are applied to all nodes.
|
||||||
|
func EnsembleMinimal(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *Ensemble) {
|
||||||
|
eopts, nopts := siftOptions(t, opts)
|
||||||
|
|
||||||
|
var (
|
||||||
|
full TestFullNode
|
||||||
|
miner TestMiner
|
||||||
|
)
|
||||||
|
ens := NewEnsemble(t, eopts...).FullNode(&full, nopts...).Miner(&miner, &full, nopts...).Start()
|
||||||
|
return &full, &miner, ens
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsembleTwoOne creates and starts an Ensemble with two full nodes and one miner.
|
||||||
|
// It does not interconnect nodes nor does it begin mining.
|
||||||
|
//
|
||||||
|
// This function supports passing both ensemble and node functional options.
|
||||||
|
// Functional options are applied to all nodes.
|
||||||
|
func EnsembleTwoOne(t *testing.T, opts ...interface{}) (*TestFullNode, *TestFullNode, *TestMiner, *Ensemble) {
|
||||||
|
eopts, nopts := siftOptions(t, opts)
|
||||||
|
|
||||||
|
var (
|
||||||
|
one, two TestFullNode
|
||||||
|
miner TestMiner
|
||||||
|
)
|
||||||
|
ens := NewEnsemble(t, eopts...).FullNode(&one, nopts...).FullNode(&two, nopts...).Miner(&miner, &one, nopts...).Start()
|
||||||
|
return &one, &two, &miner, ens
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsembleOneTwo creates and starts an Ensemble with one full node and two miners.
|
||||||
|
// It does not interconnect nodes nor does it begin mining.
|
||||||
|
//
|
||||||
|
// This function supports passing both ensemble and node functional options.
|
||||||
|
// Functional options are applied to all nodes.
|
||||||
|
func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) {
|
||||||
|
eopts, nopts := siftOptions(t, opts)
|
||||||
|
|
||||||
|
var (
|
||||||
|
full TestFullNode
|
||||||
|
one, two TestMiner
|
||||||
|
)
|
||||||
|
ens := NewEnsemble(t, eopts...).
|
||||||
|
FullNode(&full, nopts...).
|
||||||
|
Miner(&one, &full, nopts...).
|
||||||
|
Miner(&two, &full, nopts...).
|
||||||
|
Start()
|
||||||
|
|
||||||
|
return &full, &one, &two, ens
|
||||||
|
}
|
||||||
|
|
||||||
|
func siftOptions(t *testing.T, opts []interface{}) (eopts []EnsembleOpt, nopts []NodeOpt) {
|
||||||
|
for _, v := range opts {
|
||||||
|
switch o := v.(type) {
|
||||||
|
case EnsembleOpt:
|
||||||
|
eopts = append(eopts, o)
|
||||||
|
case NodeOpt:
|
||||||
|
nopts = append(nopts, o)
|
||||||
|
default:
|
||||||
|
t.Fatalf("invalid option type: %T", o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return eopts, nopts
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
package kit
|
package kit2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@ -31,8 +31,9 @@ func CreateRandomFile(t *testing.T, rseed, size int) (path string) {
|
|||||||
return file.Name()
|
return file.Name()
|
||||||
}
|
}
|
||||||
|
|
||||||
// FilesEqual compares two files by blake2b hash equality.
|
// AssertFilesEqual compares two files by blake2b hash equality and
|
||||||
func FilesEqual(t *testing.T, left, right string) bool {
|
// fails the test if unequal.
|
||||||
|
func AssertFilesEqual(t *testing.T, left, right string) {
|
||||||
// initialize hashes.
|
// initialize hashes.
|
||||||
leftH, rightH := blake2b.New256(), blake2b.New256()
|
leftH, rightH := blake2b.New256(), blake2b.New256()
|
||||||
|
|
||||||
@ -53,5 +54,5 @@ func FilesEqual(t *testing.T, left, right string) bool {
|
|||||||
// compute digests.
|
// compute digests.
|
||||||
leftD, rightD := leftH.Sum(nil), rightH.Sum(nil)
|
leftD, rightD := leftH.Sum(nil), rightH.Sum(nil)
|
||||||
|
|
||||||
return bytes.Equal(leftD, rightD)
|
require.True(t, bytes.Equal(leftD, rightD))
|
||||||
}
|
}
|
34
itests/kit2/funds.go
Normal file
34
itests/kit2/funds.go
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SendFunds sends funds from the default wallet of the specified sender node
|
||||||
|
// to the recipient address.
|
||||||
|
func SendFunds(ctx context.Context, t *testing.T, sender TestFullNode, recipient address.Address, amount abi.TokenAmount) {
|
||||||
|
senderAddr, err := sender.WalletDefaultAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
From: senderAddr,
|
||||||
|
To: recipient,
|
||||||
|
Value: amount,
|
||||||
|
}
|
||||||
|
|
||||||
|
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 0, res.Receipt.ExitCode, "did not successfully send funds")
|
||||||
|
}
|
29
itests/kit2/init.go
Normal file
29
itests/kit2/init.go
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
_ = logging.SetLogLevel("*", "INFO")
|
||||||
|
|
||||||
|
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
||||||
|
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
||||||
|
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||||
|
|
||||||
|
build.InsecurePoStValidation = true
|
||||||
|
|
||||||
|
if err := os.Setenv("BELLMAN_NO_GPU", "1"); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Setenv("LOTUS_DISABLE_WATCHDOG", "1"); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to set LOTUS_DISABLE_WATCHDOG env variable: %s", err))
|
||||||
|
}
|
||||||
|
}
|
19
itests/kit2/log.go
Normal file
19
itests/kit2/log.go
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func QuietMiningLogs() {
|
||||||
|
lotuslog.SetupLogLevels()
|
||||||
|
|
||||||
|
_ = logging.SetLogLevel("miner", "ERROR")
|
||||||
|
_ = logging.SetLogLevel("chainstore", "ERROR")
|
||||||
|
_ = logging.SetLogLevel("chain", "ERROR")
|
||||||
|
_ = logging.SetLogLevel("sub", "ERROR")
|
||||||
|
_ = logging.SetLogLevel("storageminer", "ERROR")
|
||||||
|
_ = logging.SetLogLevel("pubsub", "ERROR")
|
||||||
|
_ = logging.SetLogLevel("gen", "ERROR")
|
||||||
|
_ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR")
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
package kit
|
package kit2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestFullNode represents a full node enrolled in an Ensemble.
|
||||||
type TestFullNode struct {
|
type TestFullNode struct {
|
||||||
v1api.FullNode
|
v1api.FullNode
|
||||||
|
|
||||||
@ -21,9 +22,11 @@ type TestFullNode struct {
|
|||||||
ListenAddr multiaddr.Multiaddr
|
ListenAddr multiaddr.Multiaddr
|
||||||
DefaultKey *wallet.Key
|
DefaultKey *wallet.Key
|
||||||
|
|
||||||
options NodeOpts
|
options nodeOpts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateImportFile creates a random file with the specified seed and size, and
|
||||||
|
// imports it into the full node.
|
||||||
func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, path string) {
|
func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, path string) {
|
||||||
path = CreateRandomFile(f.t, rseed, size)
|
path = CreateRandomFile(f.t, rseed, size)
|
||||||
res, err := f.ClientImport(ctx, api.FileRef{Path: path})
|
res, err := f.ClientImport(ctx, api.FileRef{Path: path})
|
@ -1,4 +1,4 @@
|
|||||||
package kit
|
package kit2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestMiner represents a miner enrolled in an Ensemble.
|
||||||
type TestMiner struct {
|
type TestMiner struct {
|
||||||
api.StorageMiner
|
api.StorageMiner
|
||||||
|
|
||||||
@ -42,12 +43,7 @@ type TestMiner struct {
|
|||||||
PrivKey libp2pcrypto.PrivKey
|
PrivKey libp2pcrypto.PrivKey
|
||||||
}
|
}
|
||||||
|
|
||||||
options NodeOpts
|
options nodeOpts
|
||||||
}
|
|
||||||
|
|
||||||
var MineNext = miner.MineReq{
|
|
||||||
InjectNulls: 0,
|
|
||||||
Done: func(bool, abi.ChainEpoch, error) {},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNotif <-chan struct{}) {
|
func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNotif <-chan struct{}) {
|
89
itests/kit2/node_opts.go
Normal file
89
itests/kit2/node_opts.go
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultPresealsPerBootstrapMiner is the number of preseals that every
|
||||||
|
// bootstrap miner has by default. It can be overridden through the
|
||||||
|
// PresealSectors option.
|
||||||
|
const DefaultPresealsPerBootstrapMiner = 2
|
||||||
|
|
||||||
|
// nodeOpts is an options accumulating struct, where functional options are
|
||||||
|
// merged into.
|
||||||
|
type nodeOpts struct {
|
||||||
|
balance abi.TokenAmount
|
||||||
|
lite bool
|
||||||
|
sectors int
|
||||||
|
rpc bool
|
||||||
|
ownerKey *wallet.Key
|
||||||
|
extraNodeOpts []node.Option
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultNodeOpts are the default options that will be applied to test nodes.
|
||||||
|
var DefaultNodeOpts = nodeOpts{
|
||||||
|
balance: big.Mul(big.NewInt(100000000), types.NewInt(build.FilecoinPrecision)),
|
||||||
|
sectors: DefaultPresealsPerBootstrapMiner,
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeOpt is a functional option for test nodes.
|
||||||
|
type NodeOpt func(opts *nodeOpts) error
|
||||||
|
|
||||||
|
// OwnerBalance specifies the balance to be attributed to a miner's owner
|
||||||
|
// account. Only relevant when creating a miner.
|
||||||
|
func OwnerBalance(balance abi.TokenAmount) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.balance = balance
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LiteNode specifies that this node will be a lite node. Only relevant when
|
||||||
|
// creating a fullnode.
|
||||||
|
func LiteNode() NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.lite = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PresealSectors specifies the amount of preseal sectors to give to a miner
|
||||||
|
// at genesis. Only relevant when creating a miner.
|
||||||
|
func PresealSectors(sectors int) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.sectors = sectors
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ThroughRPC makes interactions with this node throughout the test flow through
|
||||||
|
// the JSON-RPC API.
|
||||||
|
func ThroughRPC() NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.rpc = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OwnerAddr sets the owner address of a miner. Only relevant when creating
|
||||||
|
// a miner.
|
||||||
|
func OwnerAddr(wk *wallet.Key) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.ownerKey = wk
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConstructorOpts are Lotus node constructor options that are passed as-is to
|
||||||
|
// the node.
|
||||||
|
func ConstructorOpts(extra ...node.Option) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.extraNodeOpts = extra
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
65
itests/kit2/node_opts_nv.go
Normal file
65
itests/kit2/node_opts_nv.go
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
)
|
||||||
|
|
||||||
|
func LatestActorsAt(upgradeHeight abi.ChainEpoch) node.Option {
|
||||||
|
// Attention: Update this when introducing new actor versions or your tests will be sad
|
||||||
|
return NetworkUpgradeAt(network.Version13, upgradeHeight)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NetworkUpgradeAt(version network.Version, upgradeHeight abi.ChainEpoch) node.Option {
|
||||||
|
fullSchedule := stmgr.UpgradeSchedule{{
|
||||||
|
// prepare for upgrade.
|
||||||
|
Network: network.Version9,
|
||||||
|
Height: 1,
|
||||||
|
Migration: stmgr.UpgradeActorsV2,
|
||||||
|
}, {
|
||||||
|
Network: network.Version10,
|
||||||
|
Height: 2,
|
||||||
|
Migration: stmgr.UpgradeActorsV3,
|
||||||
|
}, {
|
||||||
|
Network: network.Version12,
|
||||||
|
Height: 3,
|
||||||
|
Migration: stmgr.UpgradeActorsV4,
|
||||||
|
}, {
|
||||||
|
Network: network.Version13,
|
||||||
|
Height: 4,
|
||||||
|
Migration: stmgr.UpgradeActorsV5,
|
||||||
|
}}
|
||||||
|
|
||||||
|
schedule := stmgr.UpgradeSchedule{}
|
||||||
|
for _, upgrade := range fullSchedule {
|
||||||
|
if upgrade.Network > version {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
schedule = append(schedule, upgrade)
|
||||||
|
}
|
||||||
|
|
||||||
|
if upgradeHeight > 0 {
|
||||||
|
schedule[len(schedule)-1].Height = upgradeHeight
|
||||||
|
}
|
||||||
|
|
||||||
|
return node.Override(new(stmgr.UpgradeSchedule), schedule)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SDRUpgradeAt(calico, persian abi.ChainEpoch) node.Option {
|
||||||
|
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||||
|
Network: network.Version6,
|
||||||
|
Height: 1,
|
||||||
|
Migration: stmgr.UpgradeActorsV2,
|
||||||
|
}, {
|
||||||
|
Network: network.Version7,
|
||||||
|
Height: calico,
|
||||||
|
Migration: stmgr.UpgradeCalico,
|
||||||
|
}, {
|
||||||
|
Network: network.Version8,
|
||||||
|
Height: persian,
|
||||||
|
}})
|
||||||
|
|
||||||
|
}
|
53
itests/kit2/rpc.go
Normal file
53
itests/kit2/rpc.go
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
package kit2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api/client"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) {
|
||||||
|
testServ := httptest.NewServer(handler)
|
||||||
|
t.Cleanup(testServ.Close)
|
||||||
|
t.Cleanup(testServ.CloseClientConnections)
|
||||||
|
|
||||||
|
addr := testServ.Listener.Addr()
|
||||||
|
maddr, err := manet.FromNetAddr(addr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return testServ, maddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode {
|
||||||
|
handler, err := node.FullNodeHandler(f.FullNode, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
srv, maddr := CreateRPCServer(t, handler)
|
||||||
|
|
||||||
|
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(stop)
|
||||||
|
f.ListenAddr, f.FullNode = maddr, cl
|
||||||
|
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func minerRpc(t *testing.T, m *TestMiner) *TestMiner {
|
||||||
|
handler, err := node.MinerHandler(m.StorageMiner, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
srv, maddr := CreateRPCServer(t, handler)
|
||||||
|
|
||||||
|
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(stop)
|
||||||
|
|
||||||
|
m.ListenAddr, m.StorageMiner = maddr, cl
|
||||||
|
return m
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user