Merge pull request #6329 from filecoin-project/raulk/itests-refactor-kit

revamped integration test kit (aka. Operation Sparks Joy)
This commit is contained in:
Łukasz Magiera 2021-06-28 13:20:49 +02:00 committed by GitHub
commit cefd140e45
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 2522 additions and 2998 deletions

View File

@ -771,6 +771,31 @@ workflows:
suite: itest-deadlines
target: "./itests/deadlines_test.go"
- test:
name: test-itest-deals_concurrent
suite: itest-deals_concurrent
target: "./itests/deals_concurrent_test.go"
- test:
name: test-itest-deals_offline
suite: itest-deals_offline
target: "./itests/deals_offline_test.go"
- test:
name: test-itest-deals_power
suite: itest-deals_power
target: "./itests/deals_power_test.go"
- test:
name: test-itest-deals_pricing
suite: itest-deals_pricing
target: "./itests/deals_pricing_test.go"
- test:
name: test-itest-deals_publish
suite: itest-deals_publish
target: "./itests/deals_publish_test.go"
- test:
name: test-itest-deals
suite: itest-deals

View File

@ -10,14 +10,13 @@ import (
"testing"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/go-state-types/network"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
@ -32,36 +31,21 @@ func TestWorkerKeyChange(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_ = logging.SetLogLevel("*", "INFO")
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
kit.QuietMiningLogs()
blocktime := 1 * time.Millisecond
clients, miners := kit.MockMinerBuilder(t,
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1), kit.FullNodeWithLatestActorsAt(-1)},
kit.OneMiner)
client1 := clients[0]
client2 := clients[1]
// Connect the nodes.
addrinfo, err := client1.NetAddrsListen(ctx)
require.NoError(t, err)
err = client2.NetConnect(ctx, addrinfo)
require.NoError(t, err)
client1, client2, miner, ens := kit.EnsembleTwoOne(t, kit.MockProofs(),
kit.ConstructorOpts(kit.InstantaneousNetworkVersion(network.Version13)),
)
ens.InterconnectAll().BeginMining(blocktime)
output := bytes.NewBuffer(nil)
run := func(cmd *cli.Command, args ...string) error {
app := cli.NewApp()
app.Metadata = map[string]interface{}{
"repoType": repo.StorageMiner,
"testnode-full": clients[0],
"testnode-storage": miners[0],
"testnode-full": client1,
"testnode-storage": miner,
}
app.Writer = output
api.RunningNodeType = api.NodeMiner
@ -78,9 +62,6 @@ func TestWorkerKeyChange(t *testing.T) {
return cmd.Action(cctx)
}
// start mining
kit.ConnectAndStartMining(t, blocktime, miners[0], client1, client2)
newKey, err := client1.WalletNew(ctx, types.KTBLS)
require.NoError(t, err)
@ -105,14 +86,8 @@ func TestWorkerKeyChange(t *testing.T) {
require.Error(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String()))
output.Reset()
for {
head, err := client1.ChainHead(ctx)
require.NoError(t, err)
if head.Height() >= abi.ChainEpoch(targetEpoch) {
break
}
build.Clock.Sleep(10 * blocktime)
}
client1.WaitTillChain(ctx, kit.HeightAtLeast(abi.ChainEpoch(targetEpoch)))
require.NoError(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String()))
output.Reset()
@ -121,23 +96,8 @@ func TestWorkerKeyChange(t *testing.T) {
// Wait for finality (worker key switch).
targetHeight := head.Height() + policy.ChainFinality
for {
head, err := client1.ChainHead(ctx)
require.NoError(t, err)
if head.Height() >= targetHeight {
break
}
build.Clock.Sleep(10 * blocktime)
}
client1.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight))
// Make sure the other node can catch up.
for i := 0; i < 20; i++ {
head, err := client2.ChainHead(ctx)
require.NoError(t, err)
if head.Height() >= targetHeight {
return
}
build.Clock.Sleep(10 * blocktime)
}
t.Fatal("failed to reach target epoch on the second miner")
client2.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight))
}

View File

@ -7,13 +7,9 @@ import (
"time"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/impl"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/node/repo"
@ -24,12 +20,6 @@ func TestMinerAllInfo(t *testing.T) {
t.Skip("skipping test in short mode")
}
_ = logging.SetLogLevel("*", "INFO")
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
_test = true
kit.QuietMiningLogs()
@ -40,16 +30,15 @@ func TestMinerAllInfo(t *testing.T) {
policy.SetPreCommitChallengeDelay(oldDelay)
})
n, sn := kit.Builder(t, kit.OneFull, kit.OneMiner)
client, miner := n[0].FullNode, sn[0]
kit.ConnectAndStartMining(t, time.Second, miner, client.(*impl.FullNodeAPI))
client, miner, ens := kit.EnsembleMinimal(t)
ens.InterconnectAll().BeginMining(time.Second)
run := func(t *testing.T) {
app := cli.NewApp()
app.Metadata = map[string]interface{}{
"repoType": repo.StorageMiner,
"testnode-full": n[0],
"testnode-storage": sn[0],
"testnode-full": client,
"testnode-storage": miner,
}
api.RunningNodeType = api.NodeMiner
@ -61,14 +50,9 @@ func TestMinerAllInfo(t *testing.T) {
t.Run("pre-info-all", run)
dh := kit.NewDealHarness(t, client, miner)
_, _, _ = dh.MakeFullDeal(kit.MakeFullDealParams{
Ctx: context.Background(),
Rseed: 6,
CarExport: false,
FastRet: false,
StartEpoch: 0,
DoRetrieval: true,
})
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6})
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
kit.AssertFilesEqual(t, inPath, outPath)
t.Run("post-info-all", run)
}

View File

@ -7,34 +7,32 @@ import (
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAPI(t *testing.T) {
t.Run("direct", func(t *testing.T) {
runAPITest(t, kit.Builder)
runAPITest(t)
})
t.Run("rpc", func(t *testing.T) {
runAPITest(t, kit.RPCBuilder)
runAPITest(t, kit.ThroughRPC())
})
}
type apiSuite struct {
makeNodes kit.APIBuilder
opts []interface{}
}
// runAPITest is the entry point to API test suite
func runAPITest(t *testing.T, b kit.APIBuilder) {
ts := apiSuite{
makeNodes: b,
}
func runAPITest(t *testing.T, opts ...interface{}) {
ts := apiSuite{opts: opts}
t.Run("version", ts.testVersion)
t.Run("id", ts.testID)
@ -51,145 +49,114 @@ func (ts *apiSuite) testVersion(t *testing.T) {
lapi.RunningNodeType = lapi.NodeUnknown
})
ctx := context.Background()
apis, _ := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
napi := apis[0]
full, _, _ := kit.EnsembleMinimal(t, ts.opts...)
v, err := full.Version(context.Background())
require.NoError(t, err)
v, err := napi.Version(ctx)
if err != nil {
t.Fatal(err)
}
versions := strings.Split(v.Version, "+")
if len(versions) <= 0 {
t.Fatal("empty version")
}
require.NotZero(t, len(versions), "empty version")
require.Equal(t, versions[0], build.BuildVersion)
}
func (ts *apiSuite) testSearchMsg(t *testing.T) {
apis, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
func (ts *apiSuite) testID(t *testing.T) {
ctx := context.Background()
api := apis[0]
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
senderAddr, err := api.WalletDefaultAddress(ctx)
full, _, _ := kit.EnsembleMinimal(t, ts.opts...)
id, err := full.ID(ctx)
if err != nil {
t.Fatal(err)
}
require.Regexp(t, "^12", id.Pretty())
}
func (ts *apiSuite) testConnectTwo(t *testing.T) {
ctx := context.Background()
one, two, _, ens := kit.EnsembleTwoOne(t, ts.opts...)
p, err := one.NetPeers(ctx)
require.NoError(t, err)
require.Empty(t, p, "node one has peers")
p, err = two.NetPeers(ctx)
require.NoError(t, err)
require.Empty(t, p, "node two has peers")
ens.InterconnectAll()
peers, err := one.NetPeers(ctx)
require.NoError(t, err)
require.Lenf(t, peers, 2, "node one doesn't have 2 peers")
peers, err = two.NetPeers(ctx)
require.NoError(t, err)
require.Lenf(t, peers, 2, "node two doesn't have 2 peers")
}
func (ts *apiSuite) testSearchMsg(t *testing.T) {
ctx := context.Background()
full, _, ens := kit.EnsembleMinimal(t, ts.opts...)
senderAddr, err := full.WalletDefaultAddress(ctx)
require.NoError(t, err)
msg := &types.Message{
From: senderAddr,
To: senderAddr,
Value: big.Zero(),
}
bm := kit.NewBlockMiner(t, miners[0])
bm.MineBlocks(ctx, 100*time.Millisecond)
defer bm.Stop()
sm, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal(err)
}
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
ens.BeginMining(100 * time.Millisecond)
searchRes, err := api.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
sm, err := full.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
if searchRes.TipSet != res.TipSet {
t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
}
res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
require.NoError(t, err)
}
require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful")
func (ts *apiSuite) testID(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
api := apis[0]
searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
require.NoError(t, err)
id, err := api.ID(ctx)
if err != nil {
t.Fatal(err)
}
assert.Regexp(t, "^12", id.Pretty())
}
func (ts *apiSuite) testConnectTwo(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, kit.TwoFull, kit.OneMiner)
p, err := apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 0 has a peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 1 has a peer")
}
addrs, err := apis[1].NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := apis[0].NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
p, err = apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
require.Equalf(t, res.TipSet, searchRes.TipSet, "search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
}
func (ts *apiSuite) testMining(t *testing.T) {
ctx := context.Background()
fulls, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
api := fulls[0]
newHeads, err := api.ChainNotify(ctx)
full, miner, _ := kit.EnsembleMinimal(t, ts.opts...)
newHeads, err := full.ChainNotify(ctx)
require.NoError(t, err)
initHead := (<-newHeads)[0]
baseHeight := initHead.Val.Height()
h1, err := api.ChainHead(ctx)
h1, err := full.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, int64(h1.Height()), int64(baseHeight))
bm := kit.NewBlockMiner(t, miners[0])
bm.MineUntilBlock(ctx, fulls[0], nil)
bm := kit.NewBlockMiner(t, miner)
bm.MineUntilBlock(ctx, full, nil)
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
h2, err := full.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
bm.MineUntilBlock(ctx, full, nil)
require.NoError(t, err)
<-newHeads
h3, err := full.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
}
func (ts *apiSuite) testMiningReal(t *testing.T) {
@ -198,66 +165,30 @@ func (ts *apiSuite) testMiningReal(t *testing.T) {
build.InsecurePoStValidation = true
}()
ctx := context.Background()
fulls, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
api := fulls[0]
newHeads, err := api.ChainNotify(ctx)
require.NoError(t, err)
at := (<-newHeads)[0].Val.Height()
h1, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, int64(at), int64(h1.Height()))
bm := kit.NewBlockMiner(t, miners[0])
bm.MineUntilBlock(ctx, fulls[0], nil)
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
bm.MineUntilBlock(ctx, fulls[0], nil)
require.NoError(t, err)
<-newHeads
h3, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
ts.testMining(t)
}
func (ts *apiSuite) testNonGenesisMiner(t *testing.T) {
ctx := context.Background()
n, sn := ts.makeNodes(t,
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
[]kit.StorageMiner{{Full: 0, Preseal: kit.PresealGenesis}},
)
full, ok := n[0].FullNode.(*impl.FullNodeAPI)
if !ok {
t.Skip("not testing with a full node")
return
}
genesisMiner := sn[0]
full, genesisMiner, ens := kit.EnsembleMinimal(t, append(ts.opts, kit.MockProofs())...)
ens.InterconnectAll().BeginMining(4 * time.Millisecond)
bm := kit.NewBlockMiner(t, genesisMiner)
bm.MineBlocks(ctx, 4*time.Millisecond)
t.Cleanup(bm.Stop)
time.Sleep(1 * time.Second)
gaa, err := genesisMiner.ActorAddress(ctx)
require.NoError(t, err)
gmi, err := full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
_, err = full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
require.NoError(t, err)
testm := n[0].Stb(ctx, t, kit.TestSpt, gmi.Owner)
var newMiner kit.TestMiner
ens.Miner(&newMiner, full,
kit.OwnerAddr(full.DefaultKey),
kit.ProofType(abi.RegisteredSealProof_StackedDrg2KiBV1), // we're using v0 actors with old proofs.
).Start().InterconnectAll()
ta, err := testm.ActorAddress(ctx)
ta, err := newMiner.ActorAddress(ctx)
require.NoError(t, err)
tid, err := address.IDFromAddress(ta)

View File

@ -13,7 +13,6 @@ import (
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/stretchr/testify/require"
)
@ -32,50 +31,40 @@ func TestBatchDealInput(t *testing.T) {
run := func(piece, deals, expectSectors int) func(t *testing.T) {
return func(t *testing.T) {
ctx := context.Background()
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(deals)
// Set max deals per publish deals message to maxDealsPerMsg
minerDef := []kit.StorageMiner{{
Full: 0,
Opts: node.Options(
node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
return func() (sealiface.Config, error) {
return sealiface.Config{
MaxWaitDealsSectors: 2,
MaxSealingSectors: 1,
MaxSealingSectorsForDeals: 3,
AlwaysKeepUnsealedCopy: true,
WaitDealsDelay: time.Hour,
}, nil
opts := kit.ConstructorOpts(node.Options(
node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
return func() (sealiface.Config, error) {
return sealiface.Config{
MaxWaitDealsSectors: 2,
MaxSealingSectors: 1,
MaxSealingSectorsForDeals: 3,
AlwaysKeepUnsealedCopy: true,
WaitDealsDelay: time.Hour,
}, nil
}),
),
Preseal: kit.PresealGenesis,
}}
// Create a connect client and miner node
n, sn := kit.MockMinerBuilder(t, kit.OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
blockMiner := kit.ConnectAndStartMining(t, blockTime, miner, client)
t.Cleanup(blockMiner.Stop)
}, nil
}),
))
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blockTime)
dh := kit.NewDealHarness(t, client, miner)
ctx := context.Background()
err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
require.NoError(t, err)
checkNoPadding := func() {
sl, err := sn[0].SectorsList(ctx)
sl, err := miner.SectorsList(ctx)
require.NoError(t, err)
sort.Slice(sl, func(i, j int) bool {
@ -83,7 +72,7 @@ func TestBatchDealInput(t *testing.T) {
})
for _, snum := range sl {
si, err := sn[0].SectorsStatus(ctx, snum, false)
si, err := miner.SectorsStatus(ctx, snum, false)
require.NoError(t, err)
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
@ -122,7 +111,7 @@ func TestBatchDealInput(t *testing.T) {
checkNoPadding()
sl, err := sn[0].SectorsList(ctx)
sl, err := miner.SectorsList(ctx)
require.NoError(t, err)
require.Equal(t, len(sl), expectSectors)
}

View File

@ -3,17 +3,14 @@ package itests
import (
"context"
"fmt"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/impl"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
)
func TestCCUpgrade(t *testing.T) {
@ -27,60 +24,33 @@ func TestCCUpgrade(t *testing.T) {
} {
height := height // make linters happy by copying
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
runTestCCUpgrade(t, kit.MockMinerBuilder, 5*time.Millisecond, height)
runTestCCUpgrade(t, height)
})
}
}
func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) {
ctx := context.Background()
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(upgradeHeight)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
blockTime := 5 * time.Millisecond
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
t.Error(err)
}
}
}()
opts := kit.ConstructorOpts(kit.LatestActorsAt(upgradeHeight))
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blockTime)
maddr, err := miner.ActorAddress(ctx)
if err != nil {
t.Fatal(err)
}
CC := abi.SectorNumber(kit.GenesisPreseals + 1)
CC := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
Upgraded := CC + 1
kit.PledgeSectors(t, ctx, miner, 1, 0, nil)
miner.PledgeSectors(ctx, 1, 0, nil)
sl, err := miner.SectorsList(ctx)
if err != nil {
t.Fatal(err)
}
if len(sl) != 1 {
t.Fatal("expected 1 sector")
}
if sl[0] != CC {
t.Fatal("bad")
}
require.NoError(t, err)
require.Len(t, sl, 1, "expected 1 sector")
require.Equal(t, CC, sl[0], "unexpected sector number")
{
si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
@ -88,20 +58,16 @@ func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, u
require.Less(t, 50000, int(si.Expiration))
}
if err := miner.SectorMarkForUpgrade(ctx, sl[0]); err != nil {
t.Fatal(err)
}
err = miner.SectorMarkForUpgrade(ctx, sl[0])
require.NoError(t, err)
dh := kit.NewDealHarness(t, client, miner)
_, _, _ = dh.MakeFullDeal(kit.MakeFullDealParams{
Ctx: context.Background(),
Rseed: 6,
CarExport: false,
FastRet: false,
StartEpoch: 0,
DoRetrieval: true,
deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
Rseed: 6,
SuspendUntilCryptoeconStable: true,
})
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
kit.AssertFilesEqual(t, inPath, outPath)
// Validate upgrade
@ -130,10 +96,6 @@ func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, u
}
t.Log("waiting for sector to expire")
// wait one deadline per loop.
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime)
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blockTime)
}
fmt.Println("shutting down mining")
atomic.AddInt64(&mine, -1)
<-done
}

View File

@ -1,7 +1,6 @@
package itests
import (
"context"
"os"
"testing"
"time"
@ -15,8 +14,8 @@ func TestClient(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
clientNode, _ := kit.StartOneNodeOneMiner(ctx, t, blocktime)
kit.RunClientTest(t, cli.Commands, clientNode)
blockTime := 5 * time.Millisecond
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
ens.InterconnectAll().BeginMining(blockTime)
kit.RunClientTest(t, cli.Commands, client)
}

View File

@ -3,7 +3,6 @@ package itests
import (
"bytes"
"context"
"fmt"
"testing"
"time"
@ -26,7 +25,6 @@ import (
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
)
@ -56,11 +54,7 @@ import (
func TestDeadlineToggling(t *testing.T) {
kit.Expensive(t)
_ = logging.SetLogLevel("miner", "ERROR")
_ = logging.SetLogLevel("chainstore", "ERROR")
_ = logging.SetLogLevel("chain", "ERROR")
_ = logging.SetLogLevel("sub", "ERROR")
_ = logging.SetLogLevel("storageminer", "FATAL")
kit.QuietMiningLogs()
const sectorsC, sectorsD, sectorsB = 10, 9, 8
@ -73,21 +67,26 @@ func TestDeadlineToggling(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithNetworkUpgradeAt(network.Version12, upgradeH)}, kit.OneMiner)
var (
client kit.TestFullNode
minerA kit.TestMiner
minerB kit.TestMiner
minerC kit.TestMiner
minerD kit.TestMiner
minerE kit.TestMiner
)
opts := []kit.NodeOpt{kit.ConstructorOpts(kit.NetworkUpgradeAt(network.Version12, upgradeH))}
ens := kit.NewEnsemble(t, kit.MockProofs()).
FullNode(&client, opts...).
Miner(&minerA, &client, opts...).
Start().
InterconnectAll()
ens.BeginMining(blocktime)
client := n[0].FullNode.(*impl.FullNodeAPI)
minerA := sn[0]
{
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := minerA.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
}
opts = append(opts, kit.OwnerAddr(client.DefaultKey))
ens.Miner(&minerB, &client, opts...).
Miner(&minerC, &client, opts...).
Start()
defaultFrom, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
@ -97,28 +96,6 @@ func TestDeadlineToggling(t *testing.T) {
build.Clock.Sleep(time.Second)
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := minerA.MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
minerB := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
minerC := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
maddrB, err := minerB.ActorAddress(ctx)
require.NoError(t, err)
maddrC, err := minerC.ActorAddress(ctx)
@ -129,20 +106,20 @@ func TestDeadlineToggling(t *testing.T) {
// pledge sectors on C, go through a PP, check for power
{
kit.PledgeSectors(t, ctx, minerC, sectorsC, 0, nil)
minerC.PledgeSectors(ctx, sectorsC, 0, nil)
di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK)
require.NoError(t, err)
fmt.Printf("Running one proving period (miner C)\n")
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
t.Log("Running one proving period (miner C)")
t.Logf("End for head.Height > %d", di.PeriodStart+di.WPoStProvingPeriod*2)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+provingPeriod*2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
t.Logf("Now head.Height = %d", head.Height())
break
}
build.Clock.Sleep(blocktime)
@ -163,7 +140,7 @@ func TestDeadlineToggling(t *testing.T) {
require.NoError(t, err)
if head.Height() > upgradeH+provingPeriod {
fmt.Printf("Now head.Height = %d\n", head.Height())
t.Logf("Now head.Height = %d", head.Height())
break
}
build.Clock.Sleep(blocktime)
@ -214,8 +191,9 @@ func TestDeadlineToggling(t *testing.T) {
require.NoError(t, err)
require.GreaterOrEqual(t, nv, network.Version12)
minerD := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
minerE := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
ens.Miner(&minerD, &client, opts...).
Miner(&minerE, &client, opts...).
Start()
maddrD, err := minerD.ActorAddress(ctx)
require.NoError(t, err)
@ -223,7 +201,7 @@ func TestDeadlineToggling(t *testing.T) {
require.NoError(t, err)
// first round of miner checks
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(0), false, false, types.EmptyTSK)
@ -231,10 +209,10 @@ func TestDeadlineToggling(t *testing.T) {
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
// pledge sectors on minerB/minerD, stop post on minerC
kit.PledgeSectors(t, ctx, minerB, sectorsB, 0, nil)
minerB.PledgeSectors(ctx, sectorsB, 0, nil)
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
kit.PledgeSectors(t, ctx, minerD, sectorsD, 0, nil)
minerD.PledgeSectors(ctx, sectorsD, 0, nil)
checkMiner(maddrD, types.NewInt(0), true, true, types.EmptyTSK)
minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
@ -279,7 +257,7 @@ func TestDeadlineToggling(t *testing.T) {
require.NoError(t, err)
if head.Height() > upgradeH+provingPeriod+(provingPeriod/2) {
fmt.Printf("Now head.Height = %d\n", head.Height())
t.Logf("Now head.Height = %d", head.Height())
break
}
build.Clock.Sleep(blocktime)
@ -293,14 +271,14 @@ func TestDeadlineToggling(t *testing.T) {
require.NoError(t, err)
if head.Height() > upgradeH+(provingPeriod*3) {
fmt.Printf("Now head.Height = %d\n", head.Height())
t.Logf("Now head.Height = %d", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
// second round of miner checks
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, true, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, true, types.EmptyTSK)
@ -349,7 +327,7 @@ func TestDeadlineToggling(t *testing.T) {
}, nil)
require.NoError(t, err)
fmt.Println("sent termination message:", smsg.Cid())
t.Log("sent termination message:", smsg.Cid())
r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true)
require.NoError(t, err)
@ -365,13 +343,13 @@ func TestDeadlineToggling(t *testing.T) {
require.NoError(t, err)
if head.Height() > upgradeH+(provingPeriod*5) {
fmt.Printf("Now head.Height = %d\n", head.Height())
t.Logf("Now head.Height = %d", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK)
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK)

View File

@ -0,0 +1,49 @@
package itests
import (
"fmt"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/itests/kit"
)
func TestDealCyclesConcurrent(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
kit.QuietMiningLogs()
blockTime := 10 * time.Millisecond
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
startEpoch := abi.ChainEpoch(2 << 12)
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blockTime)
dh := kit.NewDealHarness(t, client, miner)
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
N: n,
FastRetrieval: fastRetrieval,
CarExport: carExport,
StartEpoch: startEpoch,
})
}
// TODO: add 2, 4, 8, more when this graphsync issue is fixed: https://github.com/ipfs/go-graphsync/issues/175#
cycles := []int{1}
for _, n := range cycles {
n := n
ns := fmt.Sprintf("%d", n)
t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) })
t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) })
t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) })
t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) })
}
}

View File

@ -0,0 +1,101 @@
package itests
import (
"context"
"path/filepath"
"testing"
"time"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
)
func TestOfflineDealFlow(t *testing.T) {
blocktime := 10 * time.Millisecond
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
startEpoch := abi.ChainEpoch(2 << 12)
runTest := func(t *testing.T, fastRet bool) {
ctx := context.Background()
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blocktime)
dh := kit.NewDealHarness(t, client, miner)
// Create a random file and import on the client.
res, inFile := client.CreateImportFile(ctx, 1, 0)
// Get the piece size and commP
rootCid := res.Root
pieceInfo, err := client.ClientDealPieceCID(ctx, rootCid)
require.NoError(t, err)
t.Log("FILE CID:", rootCid)
// Create a storage deal with the miner
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
addr, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
// Manual storage deal (offline deal)
dataRef := &storagemarket.DataRef{
TransferType: storagemarket.TTManual,
Root: rootCid,
PieceCid: &pieceInfo.PieceCID,
PieceSize: pieceInfo.PieceSize.Unpadded(),
}
proposalCid, err := client.ClientStartDeal(ctx, &api.StartDealParams{
Data: dataRef,
Wallet: addr,
Miner: maddr,
EpochPrice: types.NewInt(1000000),
DealStartEpoch: startEpoch,
MinBlocksDuration: uint64(build.MinDealDuration),
FastRetrieval: fastRet,
})
require.NoError(t, err)
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
require.NoError(t, err)
require.Eventually(t, func() bool {
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
return cd.State == storagemarket.StorageDealCheckForAcceptance
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
// Create a CAR file from the raw file
carFileDir := t.TempDir()
carFilePath := filepath.Join(carFileDir, "out.car")
err = client.ClientGenCar(ctx, api.FileRef{Path: inFile}, carFilePath)
require.NoError(t, err)
// Import the CAR file on the miner - this is the equivalent to
// transferring the file across the wire in a normal (non-offline) deal
err = miner.DealsImportData(ctx, *proposalCid, carFilePath)
require.NoError(t, err)
// Wait for the deal to be published
dh.WaitDealPublished(ctx, proposalCid)
t.Logf("deal published, retrieving")
// Retrieve the deal
outFile := dh.PerformRetrieval(ctx, proposalCid, rootCid, false)
kit.AssertFilesEqual(t, inFile, outFile)
}
t.Run("stdretrieval", func(t *testing.T) { runTest(t, false) })
t.Run("fastretrieval", func(t *testing.T) { runTest(t, true) })
}

View File

@ -0,0 +1,61 @@
package itests
import (
"context"
"testing"
"time"
"github.com/filecoin-project/lotus/itests/kit"
)
func TestFirstDealEnablesMining(t *testing.T) {
// test making a deal with a fresh miner, and see if it starts to mine.
if testing.Short() {
t.Skip("skipping test in short mode")
}
kit.QuietMiningLogs()
var (
client kit.TestFullNode
genMiner kit.TestMiner // bootstrap
provider kit.TestMiner // no sectors, will need to create one
)
ens := kit.NewEnsemble(t, kit.MockProofs())
ens.FullNode(&client)
ens.Miner(&genMiner, &client)
ens.Miner(&provider, &client, kit.PresealSectors(0))
ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond)
ctx := context.Background()
dh := kit.NewDealHarness(t, &client, &provider)
ref, _ := client.CreateImportFile(ctx, 5, 0)
t.Log("FILE CID:", ref.Root)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// start a goroutine to monitor head changes from the client
// once the provider has mined a block, thanks to the power acquired from the deal,
// we pass the test.
providerMined := make(chan struct{})
go func() {
_ = client.WaitTillChain(ctx, kit.BlockMinedBy(provider.ActorAddr))
close(providerMined)
}()
// now perform the deal.
deal := dh.StartDeal(ctx, ref.Root, false, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal, false, false, nil)
<-providerMined
}

View File

@ -0,0 +1,131 @@
package itests
import (
"context"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
)
func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
var (
ctx = context.Background()
blocktime = time.Second
)
kit.QuietMiningLogs()
client, miner, ens := kit.EnsembleMinimal(t)
ens.InterconnectAll().BeginMining(blocktime)
var (
ppb = int64(1)
unsealPrice = int64(77)
)
// Set unsealed price to non-zero
ask, err := miner.MarketGetRetrievalAsk(ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(ppb)
ask.UnsealPrice = abi.NewTokenAmount(unsealPrice)
err = miner.MarketSetRetrievalAsk(ctx, ask)
require.NoError(t, err)
dh := kit.NewDealHarness(t, client, miner)
deal1, res1, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
// one more storage deal for the same data
_, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
require.Equal(t, res1.Root, res2.Root)
// Retrieval
dealInfo, err := client.ClientGetDealInfo(ctx, *deal1)
require.NoError(t, err)
// fetch quote -> zero for unsealed price since unsealed file already exists.
offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64())
// remove ONLY one unsealed file
ss, err := miner.StorageList(context.Background())
require.NoError(t, err)
_, err = miner.SectorsList(ctx)
require.NoError(t, err)
iLoop:
for storeID, sd := range ss {
for _, sector := range sd {
err := miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed)
require.NoError(t, err)
break iLoop // remove ONLY one
}
}
// get retrieval quote -> zero for unsealed price as unsealed file exists.
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64())
// remove the other unsealed file as well
ss, err = miner.StorageList(context.Background())
require.NoError(t, err)
_, err = miner.SectorsList(ctx)
require.NoError(t, err)
for storeID, sd := range ss {
for _, sector := range sd {
require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed))
}
}
// fetch quote -> non-zero for unseal price as we no more unsealed files.
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(unsealPrice), offers[0].UnsealPrice.Uint64())
total := (dealInfo.Size * uint64(ppb)) + uint64(unsealPrice)
require.Equal(t, total, offers[0].MinPrice.Uint64())
}
func TestZeroPricePerByteRetrieval(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
kit.QuietMiningLogs()
var (
blockTime = 10 * time.Millisecond
startEpoch = abi.ChainEpoch(2 << 12)
)
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blockTime)
ctx := context.Background()
ask, err := miner.MarketGetRetrievalAsk(ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(0)
err = miner.MarketSetRetrievalAsk(ctx, ask)
require.NoError(t, err)
dh := kit.NewDealHarness(t, client, miner)
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
N: 1,
StartEpoch: startEpoch,
})
}

View File

@ -0,0 +1,108 @@
package itests
import (
"bytes"
"context"
"testing"
"time"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/node"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
"github.com/stretchr/testify/require"
)
func TestPublishDealsBatching(t *testing.T) {
var (
ctx = context.Background()
publishPeriod = 10 * time.Second
maxDealsPerMsg = uint64(2) // Set max deals per publish deals message to 2
startEpoch = abi.ChainEpoch(2 << 12)
)
kit.QuietMiningLogs()
opts := node.Override(new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
}),
)
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts(opts))
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
dh := kit.NewDealHarness(t, client, miner)
// Starts a deal and waits until it's published
runDealTillPublish := func(rseed int) {
res, _ := client.CreateImportFile(ctx, rseed, 0)
upds, err := client.ClientGetDealUpdates(ctx)
require.NoError(t, err)
dh.StartDeal(ctx, res.Root, false, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
done := make(chan struct{})
go func() {
for upd := range upds {
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
done <- struct{}{}
}
}
}()
<-done
}
// Run three deals in parallel
done := make(chan struct{}, maxDealsPerMsg+1)
for rseed := 1; rseed <= 3; rseed++ {
rseed := rseed
go func() {
runDealTillPublish(rseed)
done <- struct{}{}
}()
}
// Wait for two of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
// Expect a single PublishStorageDeals message that includes the first two deals
msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
require.NoError(t, err)
count := 0
for _, msgCid := range msgCids {
msg, err := client.ChainGetMessage(ctx, msgCid)
require.NoError(t, err)
if msg.Method == market.Methods.PublishStorageDeals {
count++
var pubDealsParams market2.PublishStorageDealsParams
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
require.NoError(t, err)
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
}
}
require.Equal(t, 1, count)
// The third deal should be published once the publish period expires.
// Allow a little padding as it takes a moment for the state change to
// be noticed by the client.
padding := 10 * time.Second
select {
case <-time.After(publishPeriod + padding):
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
case <-done: // Success
}
}

View File

@ -1,633 +1,35 @@
package itests
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
"github.com/stretchr/testify/require"
)
func TestDealCycle(t *testing.T) {
kit.QuietMiningLogs()
blockTime := 10 * time.Millisecond
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
dealStartEpoch := abi.ChainEpoch(2 << 12)
t.Run("TestFullDealCycle_Single", func(t *testing.T) {
runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
})
t.Run("TestFullDealCycle_Two", func(t *testing.T) {
runFullDealCycles(t, 2, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
})
t.Run("WithExportedCAR", func(t *testing.T) {
runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, true, false, dealStartEpoch)
})
t.Run("TestFastRetrievalDealCycle", func(t *testing.T) {
runFastRetrievalDealFlowT(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
})
t.Run("TestZeroPricePerByteRetrievalDealFlow", func(t *testing.T) {
runZeroPricePerByteRetrievalDealFlow(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
})
}
func TestAPIDealFlowReal(t *testing.T) {
func TestDealsWithSealingAndRPC(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
kit.QuietMiningLogs()
// TODO: just set this globally?
oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5)
t.Cleanup(func() {
policy.SetPreCommitChallengeDelay(oldDelay)
})
t.Run("basic", func(t *testing.T) {
runFullDealCycles(t, 1, kit.Builder, time.Second, false, false, 0)
})
t.Run("fast-retrieval", func(t *testing.T) {
runFullDealCycles(t, 1, kit.Builder, time.Second, false, true, 0)
})
t.Run("retrieval-second", func(t *testing.T) {
runSecondDealRetrievalTest(t, kit.Builder, time.Second)
})
t.Run("quote-price-for-non-unsealed-retrieval", func(t *testing.T) {
runQuotePriceForUnsealedRetrieval(t, kit.Builder, time.Second, 0)
})
}
func runQuotePriceForUnsealedRetrieval(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
ctx := context.Background()
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
ppb := int64(1)
unsealPrice := int64(77)
// Set unsealed price to non-zero
ask, err := miner.MarketGetRetrievalAsk(ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(ppb)
ask.UnsealPrice = abi.NewTokenAmount(unsealPrice)
err = miner.MarketSetRetrievalAsk(ctx, ask)
require.NoError(t, err)
var blockTime = 1 * time.Second
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC()) // no mock proofs.
ens.InterconnectAll().BeginMining(blockTime)
dh := kit.NewDealHarness(t, client, miner)
_, info, fcid := dh.MakeFullDeal(kit.MakeFullDealParams{
Ctx: ctx,
Rseed: 6,
CarExport: false,
FastRet: false,
StartEpoch: startEpoch,
DoRetrieval: false,
t.Run("stdretrieval", func(t *testing.T) {
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1})
})
// one more storage deal for the same data
_, _, fcid2 := dh.MakeFullDeal(kit.MakeFullDealParams{
Ctx: ctx,
Rseed: 6,
CarExport: false,
FastRet: false,
StartEpoch: startEpoch,
DoRetrieval: false,
})
require.Equal(t, fcid, fcid2)
// fetch quote -> zero for unsealed price since unsealed file already exists.
offers, err := client.ClientFindData(ctx, fcid, &info.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
require.Equal(t, info.Size*uint64(ppb), offers[0].MinPrice.Uint64())
// remove ONLY one unsealed file
ss, err := miner.StorageList(context.Background())
require.NoError(t, err)
_, err = miner.SectorsList(ctx)
require.NoError(t, err)
iLoop:
for storeID, sd := range ss {
for _, sector := range sd {
require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed))
// remove ONLY one
break iLoop
}
}
// get retrieval quote -> zero for unsealed price as unsealed file exists.
offers, err = client.ClientFindData(ctx, fcid, &info.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
require.Equal(t, info.Size*uint64(ppb), offers[0].MinPrice.Uint64())
// remove the other unsealed file as well
ss, err = miner.StorageList(context.Background())
require.NoError(t, err)
_, err = miner.SectorsList(ctx)
require.NoError(t, err)
for storeID, sd := range ss {
for _, sector := range sd {
require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed))
}
}
// fetch quote -> non-zero for unseal price as we no more unsealed files.
offers, err = client.ClientFindData(ctx, fcid, &info.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(unsealPrice), offers[0].UnsealPrice.Uint64())
total := (info.Size * uint64(ppb)) + uint64(unsealPrice)
require.Equal(t, total, offers[0].MinPrice.Uint64())
}
func TestPublishDealsBatching(t *testing.T) {
ctx := context.Background()
kit.QuietMiningLogs()
b := kit.MockMinerBuilder
blocktime := 10 * time.Millisecond
startEpoch := abi.ChainEpoch(2 << 12)
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(2)
// Set max deals per publish deals message to 2
minerDef := []kit.StorageMiner{{
Full: 0,
Opts: node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
Preseal: kit.PresealGenesis,
}}
// Create a connect client and miner node
n, sn := b(t, kit.OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
// Starts a deal and waits until it's published
runDealTillPublish := func(rseed int) {
res, _, _, err := kit.CreateImportFile(ctx, client, rseed, 0)
require.NoError(t, err)
upds, err := client.ClientGetDealUpdates(ctx)
require.NoError(t, err)
dh.StartDeal(ctx, res.Root, false, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
done := make(chan struct{})
go func() {
for upd := range upds {
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
done <- struct{}{}
}
}
}()
<-done
}
// Run three deals in parallel
done := make(chan struct{}, maxDealsPerMsg+1)
for rseed := 1; rseed <= 3; rseed++ {
rseed := rseed
go func() {
runDealTillPublish(rseed)
done <- struct{}{}
}()
}
// Wait for two of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
// Expect a single PublishStorageDeals message that includes the first two deals
msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
require.NoError(t, err)
count := 0
for _, msgCid := range msgCids {
msg, err := client.ChainGetMessage(ctx, msgCid)
require.NoError(t, err)
if msg.Method == market.Methods.PublishStorageDeals {
count++
var pubDealsParams market2.PublishStorageDealsParams
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
require.NoError(t, err)
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
}
}
require.Equal(t, 1, count)
// The third deal should be published once the publish period expires.
// Allow a little padding as it takes a moment for the state change to
// be noticed by the client.
padding := 10 * time.Second
select {
case <-time.After(publishPeriod + padding):
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
case <-done: // Success
}
}
func TestDealMining(t *testing.T) {
// test making a deal with a fresh miner, and see if it starts to mine.
if testing.Short() {
t.Skip("skipping test in short mode")
}
kit.QuietMiningLogs()
b := kit.MockMinerBuilder
blocktime := 50 * time.Millisecond
ctx := context.Background()
fulls, miners := b(t,
kit.OneFull,
[]kit.StorageMiner{
{Full: 0, Preseal: kit.PresealGenesis},
{Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node
})
client := fulls[0].FullNode.(*impl.FullNodeAPI)
genesisMiner := miners[0]
provider := miners[1]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := provider.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
data := make([]byte, 600)
rand.New(rand.NewSource(5)).Read(data)
r := bytes.NewReader(data)
fcid, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
var mine int32 = 1
done := make(chan struct{})
minedTwo := make(chan struct{})
m2addr, err := miners[1].ActorAddress(context.TODO())
if err != nil {
t.Fatal(err)
}
go func() {
defer close(done)
complChan := minedTwo
for atomic.LoadInt32(&mine) != 0 {
wait := make(chan int)
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
n := 0
if mined {
n = 1
}
wait <- n
}
if err := miners[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
t.Error(err)
}
if err := miners[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
t.Error(err)
}
expect := <-wait
expect += <-wait
time.Sleep(blocktime)
if expect == 0 {
// null block
continue
}
var nodeOneMined bool
for _, node := range miners {
mb, err := node.MiningBase(ctx)
if err != nil {
t.Error(err)
return
}
for _, b := range mb.Blocks() {
if b.Miner == m2addr {
nodeOneMined = true
break
}
}
}
if nodeOneMined && complChan != nil {
close(complChan)
complChan = nil
}
}
}()
dh := kit.NewDealHarness(t, client, provider)
deal := dh.StartDeal(ctx, fcid, false, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal, false, false, nil)
<-minedTwo
atomic.StoreInt32(&mine, 0)
fmt.Println("shutting down mining")
<-done
}
func TestOfflineDealFlow(t *testing.T) {
blocktime := 10 * time.Millisecond
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
// so that the deal starts sealing in time
startEpoch := abi.ChainEpoch(2 << 12)
runTest := func(t *testing.T, fastRet bool) {
ctx := context.Background()
fulls, miners := kit.MockMinerBuilder(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
// Create a random file and import on the client.
res, path, data, err := kit.CreateImportFile(ctx, client, 1, 0)
require.NoError(t, err)
// Get the piece size and commP
fcid := res.Root
pieceInfo, err := client.ClientDealPieceCID(ctx, fcid)
require.NoError(t, err)
fmt.Println("FILE CID: ", fcid)
// Create a storage deal with the miner
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
addr, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
// Manual storage deal (offline deal)
dataRef := &storagemarket.DataRef{
TransferType: storagemarket.TTManual,
Root: fcid,
PieceCid: &pieceInfo.PieceCID,
PieceSize: pieceInfo.PieceSize.Unpadded(),
}
proposalCid, err := client.ClientStartDeal(ctx, &api.StartDealParams{
Data: dataRef,
Wallet: addr,
Miner: maddr,
EpochPrice: types.NewInt(1000000),
DealStartEpoch: startEpoch,
MinBlocksDuration: uint64(build.MinDealDuration),
FastRetrieval: fastRet,
})
require.NoError(t, err)
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
require.NoError(t, err)
require.Eventually(t, func() bool {
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
return cd.State == storagemarket.StorageDealCheckForAcceptance
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
// Create a CAR file from the raw file
carFileDir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-car")
require.NoError(t, err)
carFilePath := filepath.Join(carFileDir, "out.car")
err = client.ClientGenCar(ctx, api.FileRef{Path: path}, carFilePath)
require.NoError(t, err)
// Import the CAR file on the miner - this is the equivalent to
// transferring the file across the wire in a normal (non-offline) deal
err = miner.DealsImportData(ctx, *proposalCid, carFilePath)
require.NoError(t, err)
// Wait for the deal to be published
dh.WaitDealPublished(ctx, proposalCid)
t.Logf("deal published, retrieving")
// Retrieve the deal
dh.TestRetrieval(ctx, fcid, &pieceInfo.PieceCID, false, data)
}
t.Run("NormalRetrieval", func(t *testing.T) {
runTest(t, false)
})
t.Run("FastRetrieval", func(t *testing.T) {
runTest(t, true)
t.Run("fastretrieval", func(t *testing.T) {
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true})
})
}
func runFullDealCycles(t *testing.T, n int, b kit.APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
baseseed := 6
for i := 0; i < n; i++ {
_, _, _ = dh.MakeFullDeal(kit.MakeFullDealParams{
Ctx: context.Background(),
Rseed: baseseed + i,
CarExport: carExport,
FastRet: fastRet,
StartEpoch: startEpoch,
DoRetrieval: true,
})
}
}
func runFastRetrievalDealFlowT(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
ctx := context.Background()
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
data := make([]byte, 1600)
rand.New(rand.NewSource(int64(8))).Read(data)
r := bytes.NewReader(data)
fcid, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
deal := dh.StartDeal(ctx, fcid, true, startEpoch)
dh.WaitDealPublished(ctx, deal)
fmt.Println("deal published, retrieving")
// Retrieval
info, err := client.ClientGetDealInfo(ctx, *deal)
require.NoError(t, err)
dh.TestRetrieval(ctx, fcid, &info.PieceCID, false, data)
}
func runSecondDealRetrievalTest(t *testing.T, b kit.APIBuilder, blocktime time.Duration) {
ctx := context.Background()
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
{
data1 := make([]byte, 800)
rand.New(rand.NewSource(int64(3))).Read(data1)
r := bytes.NewReader(data1)
fcid1, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
data2 := make([]byte, 800)
rand.New(rand.NewSource(int64(9))).Read(data2)
r2 := bytes.NewReader(data2)
fcid2, err := client.ClientImportLocal(ctx, r2)
if err != nil {
t.Fatal(err)
}
deal1 := dh.StartDeal(ctx, fcid1, true, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal1, true, false, nil)
deal2 := dh.StartDeal(ctx, fcid2, true, 0)
time.Sleep(time.Second)
dh.WaitDealSealed(ctx, deal2, false, false, nil)
// Retrieval
info, err := client.ClientGetDealInfo(ctx, *deal2)
require.NoError(t, err)
rf, _ := miner.SectorsRefs(ctx)
fmt.Printf("refs: %+v\n", rf)
dh.TestRetrieval(ctx, fcid2, &info.PieceCID, false, data2)
}
}
func runZeroPricePerByteRetrievalDealFlow(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
ctx := context.Background()
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
kit.ConnectAndStartMining(t, blocktime, miner, client)
dh := kit.NewDealHarness(t, client, miner)
// Set price-per-byte to zero
ask, err := miner.MarketGetRetrievalAsk(ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(0)
err = miner.MarketSetRetrievalAsk(ctx, ask)
require.NoError(t, err)
_, _, _ = dh.MakeFullDeal(kit.MakeFullDealParams{
Ctx: ctx,
Rseed: 6,
CarExport: false,
FastRet: false,
StartEpoch: startEpoch,
DoRetrieval: true,
t.Run("fastretrieval-twodeals-sequential", func(t *testing.T) {
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true})
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true})
})
}

View File

@ -5,31 +5,28 @@ import (
"context"
"fmt"
"math"
"os"
"testing"
"time"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/itests/multisig"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/gateway"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/itests/multisig"
"github.com/filecoin-project/lotus/node"
init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
)
const (
@ -37,22 +34,14 @@ const (
maxStateWaitLookbackLimit = stmgr.LookbackNoLimit
)
func init() {
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
}
// TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite
// node that is connected through a gateway to a full API node
func TestGatewayWalletMsig(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes := startNodes(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
defer nodes.closer()
lite := nodes.lite
full := nodes.full
@ -112,7 +101,6 @@ func TestGatewayWalletMsig(t *testing.T) {
if err != nil {
return cid.Undef, err
}
return lite.MpoolPush(ctx, sm)
}
@ -180,26 +168,24 @@ func TestGatewayWalletMsig(t *testing.T) {
// TestGatewayMsigCLI tests that msig CLI calls can be made
// on a lite node that is connected through a gateway to a full API node
func TestGatewayMsigCLI(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
defer nodes.closer()
lite := nodes.lite
multisig.RunMultisigTests(t, lite)
}
func TestGatewayDealFlow(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
defer nodes.closer()
time.Sleep(5 * time.Second)
// For these tests where the block time is artificially short, just use
// a deal start epoch that is guaranteed to be far enough in the future
@ -207,33 +193,27 @@ func TestGatewayDealFlow(t *testing.T) {
dealStartEpoch := abi.ChainEpoch(2 << 12)
dh := kit.NewDealHarness(t, nodes.lite, nodes.miner)
dh.MakeFullDeal(kit.MakeFullDealParams{
Ctx: ctx,
Rseed: 6,
CarExport: false,
FastRet: false,
StartEpoch: dealStartEpoch,
DoRetrieval: true,
dealCid, res, _ := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{
Rseed: 6,
StartEpoch: dealStartEpoch,
})
dh.PerformRetrieval(ctx, dealCid, res.Root, false)
}
func TestGatewayCLIDealFlow(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
defer nodes.closer()
kit.RunClientTest(t, cli.Commands, nodes.lite)
}
type testNodes struct {
lite kit.TestFullNode
full kit.TestFullNode
miner kit.TestMiner
closer jsonrpc.ClientCloser
lite *kit.TestFullNode
full *kit.TestFullNode
miner *kit.TestMiner
}
func startNodesWithFunds(
@ -249,8 +229,8 @@ func startNodesWithFunds(
fullWalletAddr, err := nodes.full.WalletDefaultAddress(ctx)
require.NoError(t, err)
// Create a wallet on the lite node
liteWalletAddr, err := nodes.lite.WalletNew(ctx, types.KTSecp256k1)
// Get the lite node default wallet address.
liteWalletAddr, err := nodes.lite.WalletDefaultAddress(ctx)
require.NoError(t, err)
// Send some funds from the full node to the lite node
@ -269,66 +249,47 @@ func startNodes(
) *testNodes {
var closer jsonrpc.ClientCloser
// Create one miner and two full nodes.
var (
full *kit.TestFullNode
miner *kit.TestMiner
lite kit.TestFullNode
)
// - Create one full node and one lite node
// - Put a gateway server in front of full node 1
// - Start full node 2 in lite mode
// - Connect lite node -> gateway server -> full node
opts := append(
// Full node
kit.OneFull,
// Lite node
kit.FullNodeOpts{
Lite: true,
Opts: func(nodes []kit.TestFullNode) node.Option {
fullNode := nodes[0]
// Create a gateway server in front of the full node
gwapi := gateway.NewNode(fullNode, lookbackCap, stateWaitLookbackLimit)
handler, err := gateway.Handler(gwapi)
require.NoError(t, err)
// create the full node and the miner.
var ens *kit.Ensemble
full, miner, ens = kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blocktime)
srv, _ := kit.CreateRPCServer(t, handler)
// Create a gateway client API that connects to the gateway server
var gapi api.Gateway
gapi, closer, err = client.NewGatewayRPCV1(ctx, "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
require.NoError(t, err)
// Provide the gateway API to dependency injection
return node.Override(new(api.Gateway), gapi)
},
},
)
n, sn := kit.RPCMockMinerBuilder(t, opts, kit.OneMiner)
full := n[0]
lite := n[1]
miner := sn[0]
// Get the listener address for the full node
fullAddr, err := full.NetAddrsListen(ctx)
// Create a gateway server in front of the full node
gwapi := gateway.NewNode(full, lookbackCap, stateWaitLookbackLimit)
handler, err := gateway.Handler(gwapi)
require.NoError(t, err)
// Connect the miner and the full node
err = miner.NetConnect(ctx, fullAddr)
require.NoError(t, err)
srv, _ := kit.CreateRPCServer(t, handler)
// Connect the miner and the lite node (so that the lite node can send
// data to the miner)
liteAddr, err := lite.NetAddrsListen(ctx)
require.NoError(t, err)
err = miner.NetConnect(ctx, liteAddr)
// Create a gateway client API that connects to the gateway server
var gapi api.Gateway
gapi, closer, err = client.NewGatewayRPCV1(ctx, "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
require.NoError(t, err)
t.Cleanup(closer)
// Start mining blocks
bm := kit.NewBlockMiner(t, miner)
bm.MineBlocks(ctx, blocktime)
t.Cleanup(bm.Stop)
ens.FullNode(&lite,
kit.LiteNode(),
kit.ThroughRPC(),
kit.ConstructorOpts(
node.Override(new(api.Gateway), gapi),
),
).Start().InterconnectAll()
return &testNodes{lite: lite, full: full, miner: miner, closer: closer}
return &testNodes{lite: &lite, full: full, miner: miner}
}
func sendFunds(ctx context.Context, fromNode kit.TestFullNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error {
func sendFunds(ctx context.Context, fromNode *kit.TestFullNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error {
msg := &types.Message{
From: fromAddr,
To: toAddr,

View File

@ -15,14 +15,14 @@ import (
// BlockMiner is a utility that makes a test miner Mine blocks on a timer.
type BlockMiner struct {
t *testing.T
miner TestMiner
miner *TestMiner
nextNulls int64
wg sync.WaitGroup
cancel context.CancelFunc
}
func NewBlockMiner(t *testing.T, miner TestMiner) *BlockMiner {
func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
return &BlockMiner{
t: t,
miner: miner,
@ -69,7 +69,7 @@ func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) {
atomic.AddInt64(&bm.nextNulls, int64(rounds))
}
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn TestFullNode, cb func(abi.ChainEpoch)) {
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb func(abi.ChainEpoch)) {
for i := 0; i < 1000; i++ {
var (
success bool
@ -93,7 +93,7 @@ func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn TestFullNode, cb fu
if success {
// Wait until it shows up on the given full nodes ChainHead
nloops := 50
nloops := 200
for i := 0; i < nloops; i++ {
ts, err := fn.ChainHead(ctx)
require.NoError(bm.t, err)

View File

@ -21,7 +21,7 @@ import (
)
// RunClientTest exercises some of the Client CLI commands
func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode) {
func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode *TestFullNode) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()

View File

@ -1,50 +1,68 @@
package kit
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
"github.com/ipld/go-car"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/node/impl"
"github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
dstest "github.com/ipfs/go-merkledag/test"
unixfile "github.com/ipfs/go-unixfs/file"
"github.com/ipld/go-car"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
type DealHarness struct {
t *testing.T
client api.FullNode
miner TestMiner
client *TestFullNode
miner *TestMiner
}
type MakeFullDealParams struct {
Ctx context.Context
Rseed int
CarExport bool
FastRet bool
StartEpoch abi.ChainEpoch
DoRetrieval bool
Rseed int
FastRet bool
StartEpoch abi.ChainEpoch
// SuspendUntilCryptoeconStable suspends deal-making, until cryptoecon
// parameters are stabilised. This affects projected collateral, and tests
// will fail in network version 13 and higher if deals are started too soon
// after network birth.
//
// The reason is that the formula for collateral calculation takes
// circulating supply into account:
//
// [portion of power this deal will be] * [~1% of tokens].
//
// In the first epochs after genesis, the total circulating supply is
// changing dramatically in percentual terms. Therefore, if the deal is
// proposed too soon, by the time it gets published on chain, the quoted
// provider collateral will no longer be valid.
//
// The observation is that deals fail with:
//
// GasEstimateMessageGas error: estimating gas used: message execution
// failed: exit 16, reason: Provider collateral out of bounds. (RetCode=16)
//
// Enabling this will suspend deal-making until the network has reached a
// height of 300.
SuspendUntilCryptoeconStable bool
}
// NewDealHarness creates a test harness that contains testing utilities for deals.
func NewDealHarness(t *testing.T, client api.FullNode, miner TestMiner) *DealHarness {
func NewDealHarness(t *testing.T, client *TestFullNode, miner *TestMiner) *DealHarness {
return &DealHarness{
t: t,
client: client,
@ -52,43 +70,39 @@ func NewDealHarness(t *testing.T, client api.FullNode, miner TestMiner) *DealHar
}
}
func (dh *DealHarness) MakeFullDeal(params MakeFullDealParams) ([]byte,
*api.DealInfo, cid.Cid) {
res, _, data, err := CreateImportFile(params.Ctx, dh.client, params.Rseed, 0)
if err != nil {
dh.t.Fatal(err)
// MakeOnlineDeal makes an online deal, generating a random file with the
// supplied seed, and setting the specified fast retrieval flag and start epoch
// on the storage deal. It returns when the deal is sealed.
//
// TODO: convert input parameters to struct, and add size as an input param.
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
res, path = dh.client.CreateImportFile(ctx, params.Rseed, 0)
dh.t.Logf("FILE CID: %s", res.Root)
if params.SuspendUntilCryptoeconStable {
dh.t.Logf("deal-making suspending until cryptecon parameters have stabilised")
ts := dh.client.WaitTillChain(ctx, HeightAtLeast(300))
dh.t.Logf("deal-making continuing; current height is %d", ts.Height())
}
fcid := res.Root
fmt.Println("FILE CID: ", fcid)
deal := dh.StartDeal(params.Ctx, fcid, params.FastRet, params.StartEpoch)
deal = dh.StartDeal(ctx, res.Root, params.FastRet, params.StartEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
dh.WaitDealSealed(params.Ctx, deal, false, false, nil)
dh.WaitDealSealed(ctx, deal, false, false, nil)
// Retrieval
info, err := dh.client.ClientGetDealInfo(params.Ctx, *deal)
require.NoError(dh.t, err)
if params.DoRetrieval {
dh.TestRetrieval(params.Ctx, fcid, &info.PieceCID, params.CarExport, data)
}
return data, info, fcid
return deal, res, path
}
// StartDeal starts a storage deal between the client and the miner.
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
maddr, err := dh.miner.ActorAddress(ctx)
if err != nil {
dh.t.Fatal(err)
}
require.NoError(dh.t, err)
addr, err := dh.client.WalletDefaultAddress(ctx)
if err != nil {
dh.t.Fatal(err)
}
require.NoError(dh.t, err)
deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{
Data: &storagemarket.DataRef{
TransferType: storagemarket.TTGraphsync,
@ -101,12 +115,12 @@ func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool
MinBlocksDuration: uint64(build.MinDealDuration),
FastRetrieval: fastRet,
})
if err != nil {
dh.t.Fatalf("%+v", err)
}
require.NoError(dh.t, err)
return deal
}
// WaitDealSealed waits until the deal is sealed.
func (dh *DealHarness) WaitDealSealed(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
loop:
for {
@ -128,7 +142,7 @@ loop:
case storagemarket.StorageDealError:
dh.t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealActive:
fmt.Println("COMPLETE", di)
dh.t.Log("COMPLETE", di)
break loop
}
@ -143,7 +157,7 @@ loop:
}
}
fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
dh.t.Logf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
time.Sleep(time.Second / 2)
if cb != nil {
cb()
@ -151,13 +165,14 @@ loop:
}
}
// WaitDealPublished waits until the deal is published.
func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
updates, err := dh.miner.MarketGetDealUpdates(subCtx)
if err != nil {
dh.t.Fatal(err)
}
require.NoError(dh.t, err)
for {
select {
case <-ctx.Done():
@ -172,10 +187,10 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
case storagemarket.StorageDealError:
dh.t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
fmt.Println("COMPLETE", di)
dh.t.Log("COMPLETE", di)
return
}
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
dh.t.Log("Deal state: ", storagemarket.DealStates[di.State])
}
}
}
@ -194,133 +209,103 @@ func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
require.NoError(dh.t, dh.miner.SectorStartSealing(ctx, snum))
}
flushSealingBatches(dh.t, ctx, dh.miner)
dh.miner.FlushSealingBatches(ctx)
}
}
func (dh *DealHarness) TestRetrieval(ctx context.Context, fcid cid.Cid, piece *cid.Cid, carExport bool, expect []byte) {
offers, err := dh.client.ClientFindData(ctx, fcid, piece)
if err != nil {
dh.t.Fatal(err)
}
func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool) (path string) {
// perform retrieval.
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
require.NoError(dh.t, err)
if len(offers) < 1 {
dh.t.Fatal("no offers")
}
offers, err := dh.client.ClientFindData(ctx, root, &info.PieceCID)
require.NoError(dh.t, err)
require.NotEmpty(dh.t, offers, "no offers")
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
if err != nil {
dh.t.Fatal(err)
}
defer os.RemoveAll(rpath) //nolint:errcheck
carFile, err := ioutil.TempFile(dh.t.TempDir(), "ret-car")
require.NoError(dh.t, err)
defer carFile.Close() //nolint:errcheck
caddr, err := dh.client.WalletDefaultAddress(ctx)
if err != nil {
dh.t.Fatal(err)
}
require.NoError(dh.t, err)
ref := &api.FileRef{
Path: filepath.Join(rpath, "ret"),
Path: carFile.Name(),
IsCAR: carExport,
}
updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
if err != nil {
dh.t.Fatal(err)
}
require.NoError(dh.t, err)
for update := range updates {
if update.Err != "" {
dh.t.Fatalf("retrieval failed: %s", update.Err)
}
}
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
if err != nil {
dh.t.Fatal(err)
require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err)
}
ret := carFile.Name()
if carExport {
rdata = dh.ExtractCarData(ctx, rdata, rpath)
actualFile := dh.ExtractFileFromCAR(ctx, carFile)
ret = actualFile.Name()
_ = actualFile.Close() //nolint:errcheck
}
if !bytes.Equal(rdata, expect) {
dh.t.Fatal("wrong expect retrieved")
}
return ret
}
func (dh *DealHarness) ExtractCarData(ctx context.Context, rdata []byte, rpath string) []byte {
func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, file *os.File) (out *os.File) {
bserv := dstest.Bserv()
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
if err != nil {
dh.t.Fatal(err)
}
ch, err := car.LoadCar(bserv.Blockstore(), file)
require.NoError(dh.t, err)
b, err := bserv.GetBlock(ctx, ch.Roots[0])
if err != nil {
dh.t.Fatal(err)
}
require.NoError(dh.t, err)
nd, err := ipld.Decode(b)
if err != nil {
dh.t.Fatal(err)
}
require.NoError(dh.t, err)
dserv := dag.NewDAGService(bserv)
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
if err != nil {
dh.t.Fatal(err)
}
outPath := filepath.Join(rpath, "retLoadedCAR")
if err := files.WriteTo(fil, outPath); err != nil {
dh.t.Fatal(err)
}
rdata, err = ioutil.ReadFile(outPath)
if err != nil {
dh.t.Fatal(err)
}
return rdata
require.NoError(dh.t, err)
tmpfile, err := ioutil.TempFile(dh.t.TempDir(), "file-in-car")
require.NoError(dh.t, err)
defer tmpfile.Close() //nolint:errcheck
err = files.WriteTo(fil, tmpfile.Name())
require.NoError(dh.t, err)
return tmpfile
}
type DealsScaffold struct {
Ctx context.Context
Client *impl.FullNodeAPI
Miner TestMiner
BlockMiner *BlockMiner
type RunConcurrentDealsOpts struct {
N int
FastRetrieval bool
CarExport bool
StartEpoch abi.ChainEpoch
}
func ConnectAndStartMining(t *testing.T, blocktime time.Duration, miner TestMiner, clients ...api.FullNode) *BlockMiner {
ctx := context.Background()
for _, c := range clients {
addrinfo, err := c.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) {
errgrp, _ := errgroup.WithContext(context.Background())
for i := 0; i < opts.N; i++ {
i := i
errgrp.Go(func() (err error) {
defer func() {
// This is necessary because golang can't deal with test
// failures being reported from children goroutines ¯\_(ツ)_/¯
if r := recover(); r != nil {
err = fmt.Errorf("deal failed: %s", r)
}
}()
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), MakeFullDealParams{
Rseed: 5 + i,
FastRet: opts.FastRetrieval,
StartEpoch: opts.StartEpoch,
})
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, opts.CarExport)
AssertFilesEqual(dh.t, inPath, outPath)
return nil
})
}
time.Sleep(time.Second)
blockMiner := NewBlockMiner(t, miner)
blockMiner.MineBlocks(ctx, blocktime)
t.Cleanup(blockMiner.Stop)
return blockMiner
}
type TestDealState int
const (
TestDealStateFailed = TestDealState(-1)
TestDealStateInProgress = TestDealState(0)
TestDealStateComplete = TestDealState(1)
)
// CategorizeDealState categorizes deal states into one of three states:
// Complete, InProgress, Failed.
func CategorizeDealState(dealStatus string) TestDealState {
switch dealStatus {
case "StorageDealFailing", "StorageDealError":
return TestDealStateFailed
case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed":
return TestDealStateComplete
}
return TestDealStateInProgress
require.NoError(dh.t, errgrp.Wait())
}

21
itests/kit/deals_state.go Normal file
View File

@ -0,0 +1,21 @@
package kit
type TestDealState int
const (
TestDealStateFailed = TestDealState(-1)
TestDealStateInProgress = TestDealState(0)
TestDealStateComplete = TestDealState(1)
)
// CategorizeDealState categorizes deal states into one of three states:
// Complete, InProgress, Failed.
func CategorizeDealState(dealStatus string) TestDealState {
switch dealStatus {
case "StorageDealFailing", "StorageDealError":
return TestDealStateFailed
case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed":
return TestDealStateComplete
}
return TestDealStateInProgress
}

646
itests/kit/ensemble.go Normal file
View File

@ -0,0 +1,646 @@
package kit
import (
"bytes"
"context"
"crypto/rand"
"io/ioutil"
"sync"
"testing"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/go-storedcounter"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/gen"
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
"github.com/filecoin-project/lotus/genesis"
lotusminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/mockstorage"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power"
"github.com/ipfs/go-datastore"
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
"github.com/stretchr/testify/require"
)
func init() {
chain.BootstrapPeerThreshold = 1
messagepool.HeadChangeCoalesceMinDelay = time.Microsecond
messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
}
// Ensemble is a collection of nodes instantiated within a test.
//
// Create a new ensemble with:
//
// ens := kit.NewEnsemble()
//
// Create full nodes and miners:
//
// var full TestFullNode
// var miner TestMiner
// ens.FullNode(&full, opts...) // populates a full node
// ens.Miner(&miner, &full, opts...) // populates a miner, using the full node as its chain daemon
//
// It is possible to pass functional options to set initial balances,
// presealed sectors, owner keys, etc.
//
// After the initial nodes are added, call `ens.Start()` to forge genesis
// and start the network. Mining will NOT be started automatically. It needs
// to be started explicitly by calling `BeginMining`.
//
// Nodes also need to be connected with one another, either via `ens.Connect()`
// or `ens.InterconnectAll()`. A common inchantation for simple tests is to do:
//
// ens.InterconnectAll().BeginMining(blocktime)
//
// You can continue to add more nodes, but you must always follow with
// `ens.Start()` to activate the new nodes.
//
// The API is chainable, so it's possible to do a lot in a very succinct way:
//
// kit.NewEnsemble().FullNode(&full).Miner(&miner, &full).Start().InterconnectAll().BeginMining()
//
// You can also find convenient fullnode:miner presets, such as 1:1, 1:2,
// and 2:1, e.g.:
//
// kit.EnsembleMinimal()
// kit.EnsembleOneTwo()
// kit.EnsembleTwoOne()
//
type Ensemble struct {
t *testing.T
bootstrapped bool
genesisBlock bytes.Buffer
mn mocknet.Mocknet
options *ensembleOpts
inactive struct {
fullnodes []*TestFullNode
miners []*TestMiner
}
active struct {
fullnodes []*TestFullNode
miners []*TestMiner
bms map[*TestMiner]*BlockMiner
}
genesis struct {
miners []genesis.Miner
accounts []genesis.Actor
}
}
// NewEnsemble instantiates a new blank Ensemble.
func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble {
options := DefaultEnsembleOpts
for _, o := range opts {
err := o(&options)
require.NoError(t, err)
}
n := &Ensemble{t: t, options: &options}
n.active.bms = make(map[*TestMiner]*BlockMiner)
// add accounts from ensemble options to genesis.
for _, acc := range options.accounts {
n.genesis.accounts = append(n.genesis.accounts, genesis.Actor{
Type: genesis.TAccount,
Balance: acc.initialBalance,
Meta: (&genesis.AccountMeta{Owner: acc.key.Address}).ActorMeta(),
})
}
return n
}
// FullNode enrolls a new full node.
func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
options := DefaultNodeOpts
for _, o := range opts {
err := o(&options)
require.NoError(n.t, err)
}
key, err := wallet.GenerateKey(types.KTBLS)
require.NoError(n.t, err)
if !n.bootstrapped && !options.balance.IsZero() {
// if we still haven't forged genesis, create a key+address, and assign
// it some FIL; this will be set as the default wallet when the node is
// started.
genacc := genesis.Actor{
Type: genesis.TAccount,
Balance: options.balance,
Meta: (&genesis.AccountMeta{Owner: key.Address}).ActorMeta(),
}
n.genesis.accounts = append(n.genesis.accounts, genacc)
}
*full = TestFullNode{t: n.t, options: options, DefaultKey: key}
n.inactive.fullnodes = append(n.inactive.fullnodes, full)
return n
}
// Miner enrolls a new miner, using the provided full node for chain
// interactions.
func (n *Ensemble) Miner(miner *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
require.NotNil(n.t, full, "full node required when instantiating miner")
options := DefaultNodeOpts
for _, o := range opts {
err := o(&options)
require.NoError(n.t, err)
}
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
require.NoError(n.t, err)
peerId, err := peer.IDFromPrivateKey(privkey)
require.NoError(n.t, err)
tdir, err := ioutil.TempDir("", "preseal-memgen")
require.NoError(n.t, err)
minerCnt := len(n.inactive.miners) + len(n.active.miners)
actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt))
require.NoError(n.t, err)
ownerKey := options.ownerKey
if !n.bootstrapped {
var (
sectors = options.sectors
k *types.KeyInfo
genm *genesis.Miner
)
// create the preseal commitment.
if n.options.mockProofs {
genm, k, err = mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, actorAddr, sectors)
} else {
genm, k, err = seed.PreSeal(actorAddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, sectors, tdir, []byte("make genesis mem random"), nil, true)
}
require.NoError(n.t, err)
genm.PeerId = peerId
// create an owner key, and assign it some FIL.
ownerKey, err = wallet.NewKey(*k)
require.NoError(n.t, err)
genacc := genesis.Actor{
Type: genesis.TAccount,
Balance: options.balance,
Meta: (&genesis.AccountMeta{Owner: ownerKey.Address}).ActorMeta(),
}
n.genesis.miners = append(n.genesis.miners, *genm)
n.genesis.accounts = append(n.genesis.accounts, genacc)
} else {
require.NotNil(n.t, ownerKey, "worker key can't be null if initializing a miner after genesis")
}
*miner = TestMiner{
t: n.t,
ActorAddr: actorAddr,
OwnerKey: ownerKey,
FullNode: full,
PresealDir: tdir,
options: options,
}
miner.Libp2p.PeerID = peerId
miner.Libp2p.PrivKey = privkey
n.inactive.miners = append(n.inactive.miners, miner)
return n
}
// Start starts all enrolled nodes.
func (n *Ensemble) Start() *Ensemble {
ctx := context.Background()
var gtempl *genesis.Template
if !n.bootstrapped {
// We haven't been bootstrapped yet, we need to generate genesis and
// create the networking backbone.
gtempl = n.generateGenesis()
n.mn = mocknet.New(ctx)
}
// ---------------------
// FULL NODES
// ---------------------
// Create all inactive full nodes.
for i, full := range n.inactive.fullnodes {
opts := []node.Option{
node.FullAPI(&full.FullNode, node.Lite(full.options.lite)),
node.Online(),
node.Repo(repo.NewMemory(nil)),
node.MockHost(n.mn),
node.Test(),
// so that we subscribe to pubsub topics immediately
node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)),
}
// append any node builder options.
opts = append(opts, full.options.extraNodeOpts...)
// Either generate the genesis or inject it.
if i == 0 && !n.bootstrapped {
opts = append(opts, node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&n.genesisBlock, *gtempl)))
} else {
opts = append(opts, node.Override(new(modules.Genesis), modules.LoadGenesis(n.genesisBlock.Bytes())))
}
// Are we mocking proofs?
if n.options.mockProofs {
opts = append(opts,
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
)
}
// Call option builders, passing active nodes as the parameter
for _, bopt := range full.options.optBuilders {
opts = append(opts, bopt(n.active.fullnodes))
}
// Construct the full node.
stop, err := node.New(ctx, opts...)
require.NoError(n.t, err)
addr, err := full.WalletImport(context.Background(), &full.DefaultKey.KeyInfo)
require.NoError(n.t, err)
err = full.WalletSetDefault(context.Background(), addr)
require.NoError(n.t, err)
// Are we hitting this node through its RPC?
if full.options.rpc {
withRPC := fullRpc(n.t, full)
n.inactive.fullnodes[i] = withRPC
}
n.t.Cleanup(func() { _ = stop(context.Background()) })
n.active.fullnodes = append(n.active.fullnodes, full)
}
// If we are here, we have processed all inactive fullnodes and moved them
// to active, so clear the slice.
n.inactive.fullnodes = n.inactive.fullnodes[:0]
// Link all the nodes.
err := n.mn.LinkAll()
require.NoError(n.t, err)
// ---------------------
// MINERS
// ---------------------
// Create all inactive miners.
for i, m := range n.inactive.miners {
if n.bootstrapped {
// this is a miner created after genesis, so it won't have a preseal.
// we need to create it on chain.
params, aerr := actors.SerializeParams(&power2.CreateMinerParams{
Owner: m.OwnerKey.Address,
Worker: m.OwnerKey.Address,
SealProofType: m.options.proofType,
Peer: abi.PeerID(m.Libp2p.PeerID),
})
require.NoError(n.t, aerr)
createStorageMinerMsg := &types.Message{
From: m.OwnerKey.Address,
To: power.Address,
Value: big.Zero(),
Method: power.Methods.CreateMiner,
Params: params,
GasLimit: 0,
GasPremium: big.NewInt(5252),
}
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
require.NoError(n.t, err)
mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(n.t, err)
require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode)
var retval power2.CreateMinerReturn
err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return))
require.NoError(n.t, err, "failed to create miner")
m.ActorAddr = retval.IDAddress
}
has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address)
require.NoError(n.t, err)
// Only import the owner's full key into our companion full node, if we
// don't have it still.
if !has {
_, err = m.FullNode.WalletImport(ctx, &m.OwnerKey.KeyInfo)
require.NoError(n.t, err)
}
// // Set it as the default address.
// err = m.FullNode.WalletSetDefault(ctx, m.OwnerAddr.Address)
// require.NoError(n.t, err)
r := repo.NewMemory(nil)
lr, err := r.Lock(repo.StorageMiner)
require.NoError(n.t, err)
ks, err := lr.KeyStore()
require.NoError(n.t, err)
pk, err := m.Libp2p.PrivKey.Bytes()
require.NoError(n.t, err)
err = ks.Put("libp2p-host", types.KeyInfo{
Type: "libp2p-host",
PrivateKey: pk,
})
require.NoError(n.t, err)
ds, err := lr.Datastore(context.TODO(), "/metadata")
require.NoError(n.t, err)
err = ds.Put(datastore.NewKey("miner-address"), m.ActorAddr.Bytes())
require.NoError(n.t, err)
nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix))
for i := 0; i < m.options.sectors; i++ {
_, err := nic.Next()
require.NoError(n.t, err)
}
_, err = nic.Next()
require.NoError(n.t, err)
err = lr.Close()
require.NoError(n.t, err)
enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)})
require.NoError(n.t, err)
msg := &types.Message{
From: m.OwnerKey.Address,
To: m.ActorAddr,
Method: miner.Methods.ChangePeerID,
Params: enc,
Value: types.NewInt(0),
}
_, err = m.FullNode.MpoolPushMessage(ctx, msg, nil)
require.NoError(n.t, err)
var mineBlock = make(chan lotusminer.MineReq)
opts := []node.Option{
node.StorageMiner(&m.StorageMiner),
node.Online(),
node.Repo(r),
node.Test(),
node.MockHost(n.mn),
node.Override(new(v1api.FullNode), m.FullNode.FullNode),
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
// disable resource filtering so that local worker gets assigned tasks
// regardless of system pressure.
node.Override(new(sectorstorage.SealerConfig), func() sectorstorage.SealerConfig {
scfg := config.DefaultStorageMiner()
scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled
return scfg.Storage
}),
}
// append any node builder options.
opts = append(opts, m.options.extraNodeOpts...)
idAddr, err := address.IDFromAddress(m.ActorAddr)
require.NoError(n.t, err)
// preload preseals if the network still hasn't bootstrapped.
var presealSectors []abi.SectorID
if !n.bootstrapped {
sectors := n.genesis.miners[i].Sectors
for _, sector := range sectors {
presealSectors = append(presealSectors, abi.SectorID{
Miner: abi.ActorID(idAddr),
Number: sector.SectorID,
})
}
}
if n.options.mockProofs {
opts = append(opts,
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
return mock.NewMockSectorMgr(presealSectors), nil
}),
node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))),
node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))),
node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))),
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
node.Unset(new(*sectorstorage.Manager)),
)
}
// start node
stop, err := node.New(ctx, opts...)
require.NoError(n.t, err)
// using real proofs, therefore need real sectors.
if !n.bootstrapped && !n.options.mockProofs {
err := m.StorageAddLocal(ctx, m.PresealDir)
require.NoError(n.t, err)
}
n.t.Cleanup(func() { _ = stop(context.Background()) })
// Are we hitting this node through its RPC?
if m.options.rpc {
withRPC := minerRpc(n.t, m)
n.inactive.miners[i] = withRPC
}
mineOne := func(ctx context.Context, req lotusminer.MineReq) error {
select {
case mineBlock <- req:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
m.MineOne = mineOne
m.Stop = stop
n.active.miners = append(n.active.miners, m)
}
// If we are here, we have processed all inactive miners and moved them
// to active, so clear the slice.
n.inactive.miners = n.inactive.miners[:0]
// Link all the nodes.
err = n.mn.LinkAll()
require.NoError(n.t, err)
if !n.bootstrapped && len(n.active.miners) > 0 {
// We have *just* bootstrapped, so mine 2 blocks to setup some CE stuff in some actors
var wait sync.Mutex
wait.Lock()
observer := n.active.fullnodes[0]
bm := NewBlockMiner(n.t, n.active.miners[0])
n.t.Cleanup(bm.Stop)
bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
}
n.bootstrapped = true
return n
}
// InterconnectAll connects all miners and full nodes to one another.
func (n *Ensemble) InterconnectAll() *Ensemble {
// connect full nodes to miners.
for _, from := range n.active.fullnodes {
for _, to := range n.active.miners {
// []*TestMiner to []api.CommonAPI type coercion not possible
// so cannot use variadic form.
n.Connect(from, to)
}
}
// connect full nodes between each other, skipping ourselves.
last := len(n.active.fullnodes) - 1
for i, from := range n.active.fullnodes {
if i == last {
continue
}
for _, to := range n.active.fullnodes[i+1:] {
n.Connect(from, to)
}
}
return n
}
// Connect connects one full node to the provided full nodes.
func (n *Ensemble) Connect(from api.Common, to ...api.Common) *Ensemble {
addr, err := from.NetAddrsListen(context.Background())
require.NoError(n.t, err)
for _, other := range to {
err = other.NetConnect(context.Background(), addr)
require.NoError(n.t, err)
}
return n
}
// BeginMining kicks off mining for the specified miners. If nil or 0-length,
// it will kick off mining for all enrolled and active miners. It also adds a
// cleanup function to stop all mining operations on test teardown.
func (n *Ensemble) BeginMining(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner {
ctx := context.Background()
// wait one second to make sure that nodes are connected and have handshaken.
// TODO make this deterministic by listening to identify events on the
// libp2p eventbus instead (or something else).
time.Sleep(1 * time.Second)
var bms []*BlockMiner
if len(miners) == 0 {
// no miners have been provided explicitly, instantiate block miners
// for all active miners that aren't still mining.
for _, m := range n.active.miners {
if _, ok := n.active.bms[m]; ok {
continue // skip, already have a block miner
}
miners = append(miners, m)
}
}
for _, m := range miners {
bm := NewBlockMiner(n.t, m)
bm.MineBlocks(ctx, blocktime)
n.t.Cleanup(bm.Stop)
bms = append(bms, bm)
n.active.bms[m] = bm
}
return bms
}
func (n *Ensemble) generateGenesis() *genesis.Template {
var verifRoot = gen.DefaultVerifregRootkeyActor
if k := n.options.verifiedRoot.key; k != nil {
verifRoot = genesis.Actor{
Type: genesis.TAccount,
Balance: n.options.verifiedRoot.initialBalance,
Meta: (&genesis.AccountMeta{Owner: k.Address}).ActorMeta(),
}
}
templ := &genesis.Template{
NetworkVersion: network.Version0,
Accounts: n.genesis.accounts,
Miners: n.genesis.miners,
NetworkName: "test",
Timestamp: uint64(time.Now().Unix() - int64(n.options.pastOffset.Seconds())),
VerifregRootKey: verifRoot,
RemainderAccount: gen.DefaultRemainderAccountActor,
}
return templ
}

View File

@ -0,0 +1,55 @@
package kit
import (
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/wallet"
)
type EnsembleOpt func(opts *ensembleOpts) error
type genesisAccount struct {
key *wallet.Key
initialBalance abi.TokenAmount
}
type ensembleOpts struct {
pastOffset time.Duration
verifiedRoot genesisAccount
accounts []genesisAccount
mockProofs bool
}
var DefaultEnsembleOpts = ensembleOpts{
pastOffset: 10000000 * time.Second, // time sufficiently in the past to trigger catch-up mining.
}
// MockProofs activates mock proofs for the entire ensemble.
func MockProofs() EnsembleOpt {
return func(opts *ensembleOpts) error {
opts.mockProofs = true
return nil
}
}
// RootVerifier specifies the key to be enlisted as the verified registry root,
// as well as the initial balance to be attributed during genesis.
func RootVerifier(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt {
return func(opts *ensembleOpts) error {
opts.verifiedRoot.key = key
opts.verifiedRoot.initialBalance = balance
return nil
}
}
// Account sets up an account at genesis with the specified key and balance.
func Account(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt {
return func(opts *ensembleOpts) error {
opts.accounts = append(opts.accounts, genesisAccount{
key: key,
initialBalance: balance,
})
return nil
}
}

View File

@ -0,0 +1,70 @@
package kit
import "testing"
// EnsembleMinimal creates and starts an Ensemble with a single full node and a single miner.
// It does not interconnect nodes nor does it begin mining.
//
// This function supports passing both ensemble and node functional options.
// Functional options are applied to all nodes.
func EnsembleMinimal(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *Ensemble) {
eopts, nopts := siftOptions(t, opts)
var (
full TestFullNode
miner TestMiner
)
ens := NewEnsemble(t, eopts...).FullNode(&full, nopts...).Miner(&miner, &full, nopts...).Start()
return &full, &miner, ens
}
// EnsembleTwoOne creates and starts an Ensemble with two full nodes and one miner.
// It does not interconnect nodes nor does it begin mining.
//
// This function supports passing both ensemble and node functional options.
// Functional options are applied to all nodes.
func EnsembleTwoOne(t *testing.T, opts ...interface{}) (*TestFullNode, *TestFullNode, *TestMiner, *Ensemble) {
eopts, nopts := siftOptions(t, opts)
var (
one, two TestFullNode
miner TestMiner
)
ens := NewEnsemble(t, eopts...).FullNode(&one, nopts...).FullNode(&two, nopts...).Miner(&miner, &one, nopts...).Start()
return &one, &two, &miner, ens
}
// EnsembleOneTwo creates and starts an Ensemble with one full node and two miners.
// It does not interconnect nodes nor does it begin mining.
//
// This function supports passing both ensemble and node functional options.
// Functional options are applied to all nodes.
func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) {
eopts, nopts := siftOptions(t, opts)
var (
full TestFullNode
one, two TestMiner
)
ens := NewEnsemble(t, eopts...).
FullNode(&full, nopts...).
Miner(&one, &full, nopts...).
Miner(&two, &full, nopts...).
Start()
return &full, &one, &two, ens
}
func siftOptions(t *testing.T, opts []interface{}) (eopts []EnsembleOpt, nopts []NodeOpt) {
for _, v := range opts {
switch o := v.(type) {
case EnsembleOpt:
eopts = append(eopts, o)
case NodeOpt:
nopts = append(nopts, o)
default:
t.Fatalf("invalid option type: %T", o)
}
}
return eopts, nopts
}

58
itests/kit/files.go Normal file
View File

@ -0,0 +1,58 @@
package kit
import (
"bytes"
"io"
"math/rand"
"os"
"testing"
"github.com/minio/blake2b-simd"
"github.com/stretchr/testify/require"
)
// CreateRandomFile creates a random file with the provided seed and the
// provided size.
func CreateRandomFile(t *testing.T, rseed, size int) (path string) {
if size == 0 {
size = 1600
}
source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size))
file, err := os.CreateTemp(t.TempDir(), "sourcefile.dat")
require.NoError(t, err)
n, err := io.Copy(file, source)
require.NoError(t, err)
require.EqualValues(t, n, size)
return file.Name()
}
// AssertFilesEqual compares two files by blake2b hash equality and
// fails the test if unequal.
func AssertFilesEqual(t *testing.T, left, right string) {
// initialize hashes.
leftH, rightH := blake2b.New256(), blake2b.New256()
// open files.
leftF, err := os.Open(left)
require.NoError(t, err)
rightF, err := os.Open(right)
require.NoError(t, err)
// feed hash functions.
_, err = io.Copy(leftH, leftF)
require.NoError(t, err)
_, err = io.Copy(rightH, rightF)
require.NoError(t, err)
// compute digests.
leftD, rightD := leftH.Sum(nil), rightH.Sum(nil)
require.True(t, bytes.Equal(leftD, rightD))
}

View File

@ -5,6 +5,7 @@ import (
"testing"
"github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api"
@ -13,11 +14,9 @@ import (
// SendFunds sends funds from the default wallet of the specified sender node
// to the recipient address.
func SendFunds(ctx context.Context, t *testing.T, sender TestFullNode, recipient address.Address, amount abi.TokenAmount) {
func SendFunds(ctx context.Context, t *testing.T, sender *TestFullNode, recipient address.Address, amount abi.TokenAmount) {
senderAddr, err := sender.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
msg := &types.Message{
From: senderAddr,
@ -26,14 +25,10 @@ func SendFunds(ctx context.Context, t *testing.T, sender TestFullNode, recipient
}
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send money")
}
require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode, "did not successfully send funds")
}

View File

@ -3,7 +3,6 @@ package kit
import (
"fmt"
"os"
"strings"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
@ -12,21 +11,19 @@ import (
)
func init() {
bin := os.Args[0]
if !strings.HasSuffix(bin, ".test") {
panic("package itests/kit must only be imported from tests")
}
_ = logging.SetLogLevel("*", "INFO")
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
err := os.Setenv("BELLMAN_NO_GPU", "1")
if err != nil {
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
}
build.InsecurePoStValidation = true
if err := os.Setenv("BELLMAN_NO_GPU", "1"); err != nil {
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
}
if err := os.Setenv("LOTUS_DISABLE_WATCHDOG", "1"); err != nil {
panic(fmt.Sprintf("failed to set LOTUS_DISABLE_WATCHDOG env variable: %s", err))
}
}

View File

@ -8,7 +8,7 @@ import (
func QuietMiningLogs() {
lotuslog.SetupLogLevels()
_ = logging.SetLogLevel("miner", "ERROR")
_ = logging.SetLogLevel("miner", "ERROR") // set this to INFO to watch mining happen.
_ = logging.SetLogLevel("chainstore", "ERROR")
_ = logging.SetLogLevel("chain", "ERROR")
_ = logging.SetLogLevel("sub", "ERROR")

View File

@ -1,87 +0,0 @@
package kit
import (
"context"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/go-address"
)
func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) (TestFullNode, address.Address) {
n, sn := RPCMockMinerBuilder(t, OneFull, OneMiner)
full := n[0]
miner := sn[0]
// Get everyone connected
addrs, err := full.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
// Start mining blocks
bm := NewBlockMiner(t, miner)
bm.MineBlocks(ctx, blocktime)
t.Cleanup(bm.Stop)
// Get the full node's wallet address
fullAddr, err := full.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
// Create mock CLI
return full, fullAddr
}
func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]TestFullNode, []address.Address) {
n, sn := RPCMockMinerBuilder(t, TwoFull, OneMiner)
fullNode1 := n[0]
fullNode2 := n[1]
miner := sn[0]
// Get everyone connected
addrs, err := fullNode1.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := fullNode2.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
// Start mining blocks
bm := NewBlockMiner(t, miner)
bm.MineBlocks(ctx, blocktime)
t.Cleanup(bm.Stop)
// Send some funds to register the second node
fullNodeAddr2, err := fullNode2.WalletNew(ctx, types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18))
// Get the first node's address
fullNodeAddr1, err := fullNode1.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
// Create mock CLI
return n, []address.Address{fullNodeAddr1, fullNodeAddr2}
}

View File

@ -1,670 +0,0 @@
package kit
import (
"bytes"
"context"
"crypto/rand"
"io/ioutil"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-storedcounter"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/gen"
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
"github.com/filecoin-project/lotus/genesis"
lotusminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/mockstorage"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power"
"github.com/ipfs/go-datastore"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/stretchr/testify/require"
)
func init() {
chain.BootstrapPeerThreshold = 1
messagepool.HeadChangeCoalesceMinDelay = time.Microsecond
messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
}
func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Address, act address.Address, pk crypto.PrivKey, tnd TestFullNode, mn mocknet.Mocknet, opts node.Option) TestMiner {
r := repo.NewMemory(nil)
lr, err := r.Lock(repo.StorageMiner)
require.NoError(t, err)
ks, err := lr.KeyStore()
require.NoError(t, err)
kbytes, err := pk.Bytes()
require.NoError(t, err)
err = ks.Put("libp2p-host", types.KeyInfo{
Type: "libp2p-host",
PrivateKey: kbytes,
})
require.NoError(t, err)
ds, err := lr.Datastore(context.TODO(), "/metadata")
require.NoError(t, err)
err = ds.Put(datastore.NewKey("miner-address"), act.Bytes())
require.NoError(t, err)
nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix))
for i := 0; i < GenesisPreseals; i++ {
_, err := nic.Next()
require.NoError(t, err)
}
_, err = nic.Next()
require.NoError(t, err)
err = lr.Close()
require.NoError(t, err)
peerid, err := peer.IDFromPrivateKey(pk)
require.NoError(t, err)
enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(peerid)})
require.NoError(t, err)
msg := &types.Message{
To: act,
From: waddr,
Method: miner.Methods.ChangePeerID,
Params: enc,
Value: types.NewInt(0),
}
_, err = tnd.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
// start node
var minerapi api.StorageMiner
mineBlock := make(chan lotusminer.MineReq)
stop, err := node.New(ctx,
node.StorageMiner(&minerapi),
node.Online(),
node.Repo(r),
node.Test(),
node.MockHost(mn),
node.Override(new(v1api.FullNode), tnd),
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, act)),
node.Override(new(*sectorstorage.SealerConfig), func() *sectorstorage.SealerConfig {
scfg := config.DefaultStorageMiner()
scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled
return &scfg.Storage
}),
opts,
)
if err != nil {
t.Fatalf("failed to construct node: %v", err)
}
t.Cleanup(func() { _ = stop(context.Background()) })
/*// Bootstrap with full node
remoteAddrs, err := tnd.NetAddrsListen(Ctx)
require.NoError(t, err)
err = minerapi.NetConnect(Ctx, remoteAddrs)
require.NoError(t, err)*/
mineOne := func(ctx context.Context, req lotusminer.MineReq) error {
select {
case mineBlock <- req:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
return TestMiner{StorageMiner: minerapi, MineOne: mineOne, Stop: stop}
}
func storageBuilder(parentNode TestFullNode, mn mocknet.Mocknet, opts node.Option) MinerBuilder {
return func(ctx context.Context, t *testing.T, spt abi.RegisteredSealProof, owner address.Address) TestMiner {
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
require.NoError(t, err)
minerPid, err := peer.IDFromPrivateKey(pk)
require.NoError(t, err)
params, serr := actors.SerializeParams(&power2.CreateMinerParams{
Owner: owner,
Worker: owner,
SealProofType: spt,
Peer: abi.PeerID(minerPid),
})
require.NoError(t, serr)
createStorageMinerMsg := &types.Message{
To: power.Address,
From: owner,
Value: big.Zero(),
Method: power.Methods.CreateMiner,
Params: params,
GasLimit: 0,
GasPremium: big.NewInt(5252),
}
signed, err := parentNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
require.NoError(t, err)
mw, err := parentNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, mw.Receipt.ExitCode)
var retval power2.CreateMinerReturn
err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return))
require.NoError(t, err)
return CreateTestStorageNode(ctx, t, owner, retval.IDAddress, pk, parentNode, mn, opts)
}
}
func Builder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
return mockBuilderOpts(t, fullOpts, storage, false)
}
func RPCBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
return mockBuilderOpts(t, fullOpts, storage, true)
}
func MockMinerBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
return mockMinerBuilderOpts(t, fullOpts, storage, false)
}
func RPCMockMinerBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
return mockMinerBuilderOpts(t, fullOpts, storage, true)
}
func mockBuilderOpts(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner, rpc bool) ([]TestFullNode, []TestMiner) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
mn := mocknet.New(ctx)
fulls := make([]TestFullNode, len(fullOpts))
miners := make([]TestMiner, len(storage))
// *****
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
require.NoError(t, err)
minerPid, err := peer.IDFromPrivateKey(pk)
require.NoError(t, err)
var genbuf bytes.Buffer
if len(storage) > 1 {
panic("need more peer IDs")
}
// *****
// PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE
// TODO: would be great if there was a better way to fake the preseals
var (
genms []genesis.Miner
maddrs []address.Address
genaccs []genesis.Actor
keys []*wallet.Key
)
var presealDirs []string
for i := 0; i < len(storage); i++ {
maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i))
if err != nil {
t.Fatal(err)
}
tdir, err := ioutil.TempDir("", "preseal-memgen")
if err != nil {
t.Fatal(err)
}
genm, k, err := seed.PreSeal(maddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, GenesisPreseals, tdir, []byte("make genesis mem random"), nil, true)
if err != nil {
t.Fatal(err)
}
genm.PeerId = minerPid
wk, err := wallet.NewKey(*k)
if err != nil {
return nil, nil
}
genaccs = append(genaccs, genesis.Actor{
Type: genesis.TAccount,
Balance: big.Mul(big.NewInt(400000000), types.NewInt(build.FilecoinPrecision)),
Meta: (&genesis.AccountMeta{Owner: wk.Address}).ActorMeta(),
})
keys = append(keys, wk)
presealDirs = append(presealDirs, tdir)
maddrs = append(maddrs, maddr)
genms = append(genms, *genm)
}
rkhKey, err := wallet.GenerateKey(types.KTSecp256k1)
if err != nil {
return nil, nil
}
vrk := genesis.Actor{
Type: genesis.TAccount,
Balance: big.Mul(big.Div(big.NewInt(int64(build.FilBase)), big.NewInt(100)), big.NewInt(int64(build.FilecoinPrecision))),
Meta: (&genesis.AccountMeta{Owner: rkhKey.Address}).ActorMeta(),
}
keys = append(keys, rkhKey)
templ := &genesis.Template{
NetworkVersion: network.Version0,
Accounts: genaccs,
Miners: genms,
NetworkName: "test",
Timestamp: uint64(time.Now().Unix() - 10000), // some time sufficiently far in the past
VerifregRootKey: vrk,
RemainderAccount: gen.DefaultRemainderAccountActor,
}
// END PRESEAL SECTION
for i := 0; i < len(fullOpts); i++ {
var genesis node.Option
if i == 0 {
genesis = node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&genbuf, *templ))
} else {
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genbuf.Bytes()))
}
stop, err := node.New(ctx,
node.FullAPI(&fulls[i].FullNode, node.Lite(fullOpts[i].Lite)),
node.Online(),
node.Repo(repo.NewMemory(nil)),
node.MockHost(mn),
node.Test(),
genesis,
fullOpts[i].Opts(fulls),
)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { _ = stop(context.Background()) })
if rpc {
fulls[i] = fullRpc(t, fulls[i])
}
fulls[i].Stb = storageBuilder(fulls[i], mn, node.Options())
}
if _, err := fulls[0].FullNode.WalletImport(ctx, &rkhKey.KeyInfo); err != nil {
t.Fatal(err)
}
for i, def := range storage {
// TODO: support non-bootstrap miners
if i != 0 {
t.Fatal("only one storage node supported")
}
if def.Full != 0 {
t.Fatal("storage nodes only supported on the first full node")
}
f := fulls[def.Full]
if _, err := f.FullNode.WalletImport(ctx, &keys[i].KeyInfo); err != nil {
t.Fatal(err)
}
if err := f.FullNode.WalletSetDefault(ctx, keys[i].Address); err != nil {
t.Fatal(err)
}
genMiner := maddrs[i]
wa := genms[i].Worker
opts := def.Opts
if opts == nil {
opts = node.Options()
}
miners[i] = CreateTestStorageNode(ctx, t, wa, genMiner, pk, f, mn, opts)
if err := miners[i].StorageAddLocal(ctx, presealDirs[i]); err != nil {
t.Fatalf("%+v", err)
}
/*
sma := miners[i].StorageMiner.(*impl.StorageMinerAPI)
psd := presealDirs[i]
*/
if rpc {
miners[i] = storerRpc(t, miners[i])
}
}
if err := mn.LinkAll(); err != nil {
t.Fatal(err)
}
if len(miners) > 0 {
// Mine 2 blocks to setup some CE stuff in some actors
var wait sync.Mutex
wait.Lock()
bm := NewBlockMiner(t, miners[0])
t.Cleanup(bm.Stop)
bm.MineUntilBlock(ctx, fulls[0], func(epoch abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
bm.MineUntilBlock(ctx, fulls[0], func(epoch abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
}
return fulls, miners
}
func mockMinerBuilderOpts(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner, rpc bool) ([]TestFullNode, []TestMiner) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
mn := mocknet.New(ctx)
fulls := make([]TestFullNode, len(fullOpts))
miners := make([]TestMiner, len(storage))
var genbuf bytes.Buffer
// PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE
// TODO: would be great if there was a better way to fake the preseals
var (
genms []genesis.Miner
genaccs []genesis.Actor
maddrs []address.Address
keys []*wallet.Key
pidKeys []crypto.PrivKey
)
for i := 0; i < len(storage); i++ {
maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i))
if err != nil {
t.Fatal(err)
}
preseals := storage[i].Preseal
if preseals == PresealGenesis {
preseals = GenesisPreseals
}
genm, k, err := mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, maddr, preseals)
if err != nil {
t.Fatal(err)
}
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
require.NoError(t, err)
minerPid, err := peer.IDFromPrivateKey(pk)
require.NoError(t, err)
genm.PeerId = minerPid
wk, err := wallet.NewKey(*k)
if err != nil {
return nil, nil
}
genaccs = append(genaccs, genesis.Actor{
Type: genesis.TAccount,
Balance: big.Mul(big.NewInt(400000000), types.NewInt(build.FilecoinPrecision)),
Meta: (&genesis.AccountMeta{Owner: wk.Address}).ActorMeta(),
})
keys = append(keys, wk)
pidKeys = append(pidKeys, pk)
maddrs = append(maddrs, maddr)
genms = append(genms, *genm)
}
rkhKey, err := wallet.GenerateKey(types.KTSecp256k1)
if err != nil {
return nil, nil
}
vrk := genesis.Actor{
Type: genesis.TAccount,
Balance: big.Mul(big.Div(big.NewInt(int64(build.FilBase)), big.NewInt(100)), big.NewInt(int64(build.FilecoinPrecision))),
Meta: (&genesis.AccountMeta{Owner: rkhKey.Address}).ActorMeta(),
}
keys = append(keys, rkhKey)
templ := &genesis.Template{
NetworkVersion: network.Version0,
Accounts: genaccs,
Miners: genms,
NetworkName: "test",
Timestamp: uint64(time.Now().Unix()) - (build.BlockDelaySecs * 20000),
VerifregRootKey: vrk,
RemainderAccount: gen.DefaultRemainderAccountActor,
}
// END PRESEAL SECTION
for i := 0; i < len(fullOpts); i++ {
var genesis node.Option
if i == 0 {
genesis = node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&genbuf, *templ))
} else {
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genbuf.Bytes()))
}
stop, err := node.New(ctx,
node.FullAPI(&fulls[i].FullNode, node.Lite(fullOpts[i].Lite)),
node.Online(),
node.Repo(repo.NewMemory(nil)),
node.MockHost(mn),
node.Test(),
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
// so that we subscribe to pubsub topics immediately
node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)),
genesis,
fullOpts[i].Opts(fulls),
)
if err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() { _ = stop(context.Background()) })
if rpc {
fulls[i] = fullRpc(t, fulls[i])
}
fulls[i].Stb = storageBuilder(fulls[i], mn, node.Options(
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
return mock.NewMockSectorMgr(nil), nil
}),
node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))),
node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))),
node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))),
node.Override(new(*sectorstorage.SealerConfig), func() *sectorstorage.SealerConfig {
scfg := config.DefaultStorageMiner()
scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled
return &scfg.Storage
}),
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
node.Unset(new(*sectorstorage.Manager)),
))
}
if _, err := fulls[0].FullNode.WalletImport(ctx, &rkhKey.KeyInfo); err != nil {
t.Fatal(err)
}
for i, def := range storage {
// TODO: support non-bootstrap miners
minerID := abi.ActorID(genesis2.MinerStart + uint64(i))
if def.Full != 0 {
t.Fatal("storage nodes only supported on the first full node")
}
f := fulls[def.Full]
if _, err := f.FullNode.WalletImport(ctx, &keys[i].KeyInfo); err != nil {
return nil, nil
}
if err := f.FullNode.WalletSetDefault(ctx, keys[i].Address); err != nil {
return nil, nil
}
sectors := make([]abi.SectorID, len(genms[i].Sectors))
for i, sector := range genms[i].Sectors {
sectors[i] = abi.SectorID{
Miner: minerID,
Number: sector.SectorID,
}
}
opts := def.Opts
if opts == nil {
opts = node.Options()
}
miners[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options(
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
return mock.NewMockSectorMgr(sectors), nil
}),
node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))),
node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))),
node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))),
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
node.Override(new(ffiwrapper.Prover), mock.MockProver),
node.Unset(new(*sectorstorage.Manager)),
opts,
))
if rpc {
miners[i] = storerRpc(t, miners[i])
}
}
if err := mn.LinkAll(); err != nil {
t.Fatal(err)
}
bm := NewBlockMiner(t, miners[0])
if len(miners) > 0 {
// Mine 2 blocks to setup some CE stuff in some actors
var wait sync.Mutex
wait.Lock()
bm.MineUntilBlock(ctx, fulls[0], func(abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
bm.MineUntilBlock(ctx, fulls[0], func(abi.ChainEpoch) {
wait.Unlock()
})
wait.Lock()
}
return fulls, miners
}
func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) {
testServ := httptest.NewServer(handler)
t.Cleanup(testServ.Close)
t.Cleanup(testServ.CloseClientConnections)
addr := testServ.Listener.Addr()
maddr, err := manet.FromNetAddr(addr)
require.NoError(t, err)
return testServ, maddr
}
func fullRpc(t *testing.T, nd TestFullNode) TestFullNode {
handler, err := node.FullNodeHandler(nd.FullNode, false)
require.NoError(t, err)
srv, maddr := CreateRPCServer(t, handler)
var ret TestFullNode
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
require.NoError(t, err)
t.Cleanup(stop)
ret.ListenAddr, ret.FullNode = maddr, cl
return ret
}
func storerRpc(t *testing.T, nd TestMiner) TestMiner {
handler, err := node.MinerHandler(nd.StorageMiner, false)
require.NoError(t, err)
srv, maddr := CreateRPCServer(t, handler)
var ret TestMiner
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil)
require.NoError(t, err)
t.Cleanup(stop)
ret.ListenAddr, ret.StorageMiner, ret.MineOne = maddr, cl, nd.MineOne
return ret
}

85
itests/kit/node_full.go Normal file
View File

@ -0,0 +1,85 @@
package kit
import (
"context"
"testing"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
)
// TestFullNode represents a full node enrolled in an Ensemble.
type TestFullNode struct {
v1api.FullNode
t *testing.T
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node.
ListenAddr multiaddr.Multiaddr
DefaultKey *wallet.Key
options nodeOpts
}
// CreateImportFile creates a random file with the specified seed and size, and
// imports it into the full node.
func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, path string) {
path = CreateRandomFile(f.t, rseed, size)
res, err := f.ClientImport(ctx, api.FileRef{Path: path})
require.NoError(f.t, err)
return res, path
}
// WaitTillChain waits until a specified chain condition is met. It returns
// the first tipset where the condition is met.
func (f *TestFullNode) WaitTillChain(ctx context.Context, pred ChainPredicate) *types.TipSet {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
heads, err := f.ChainNotify(ctx)
require.NoError(f.t, err)
for chg := range heads {
for _, c := range chg {
if c.Type != "apply" {
continue
}
if ts := c.Val; pred(ts) {
return ts
}
}
}
require.Fail(f.t, "chain condition not met")
return nil
}
// ChainPredicate encapsulates a chain condition.
type ChainPredicate func(set *types.TipSet) bool
// HeightAtLeast returns a ChainPredicate that is satisfied when the chain
// height is equal or higher to the target.
func HeightAtLeast(target abi.ChainEpoch) ChainPredicate {
return func(ts *types.TipSet) bool {
return ts.Height() >= target
}
}
// BlockMinedBy returns a ChainPredicate that is satisfied when we observe the
// first block mined by the specified miner.
func BlockMinedBy(miner address.Address) ChainPredicate {
return func(ts *types.TipSet) bool {
for _, b := range ts.Blocks() {
if b.Miner == miner {
return true
}
}
return false
}
}

121
itests/kit/node_miner.go Normal file
View File

@ -0,0 +1,121 @@
package kit
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/wallet"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/miner"
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
)
// TestMiner represents a miner enrolled in an Ensemble.
type TestMiner struct {
api.StorageMiner
t *testing.T
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
ActorAddr address.Address
OwnerKey *wallet.Key
MineOne func(context.Context, miner.MineReq) error
Stop func(context.Context) error
FullNode *TestFullNode
PresealDir string
Libp2p struct {
PeerID peer.ID
PrivKey libp2pcrypto.PrivKey
}
options nodeOpts
}
func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNotif <-chan struct{}) {
toCheck := tm.StartPledge(ctx, n, existing, blockNotif)
for len(toCheck) > 0 {
tm.FlushSealingBatches(ctx)
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := tm.StorageMiner.SectorsStatus(ctx, n, false)
require.NoError(tm.t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
tm.t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
}
func (tm *TestMiner) StartPledge(ctx context.Context, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} {
for i := 0; i < n; i++ {
if i%3 == 0 && blockNotif != nil {
<-blockNotif
tm.t.Log("WAIT")
}
tm.t.Logf("PLEDGING %d", i)
_, err := tm.StorageMiner.PledgeSector(ctx)
require.NoError(tm.t, err)
}
for {
s, err := tm.StorageMiner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
require.NoError(tm.t, err)
fmt.Printf("Sectors: %d\n", len(s))
if len(s) >= n+existing {
break
}
build.Clock.Sleep(100 * time.Millisecond)
}
fmt.Printf("All sectors is fsm\n")
s, err := tm.StorageMiner.SectorsList(ctx)
require.NoError(tm.t, err)
toCheck := map[abi.SectorNumber]struct{}{}
for _, number := range s {
toCheck[number] = struct{}{}
}
return toCheck
}
func (tm *TestMiner) FlushSealingBatches(ctx context.Context) {
pcb, err := tm.StorageMiner.SectorPreCommitFlush(ctx)
require.NoError(tm.t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
}
cb, err := tm.StorageMiner.SectorCommitFlush(ctx)
require.NoError(tm.t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %+v\n", cb)
}
}

107
itests/kit/node_opts.go Normal file
View File

@ -0,0 +1,107 @@
package kit
import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/lotus/node"
)
// DefaultPresealsPerBootstrapMiner is the number of preseals that every
// bootstrap miner has by default. It can be overridden through the
// PresealSectors option.
const DefaultPresealsPerBootstrapMiner = 2
const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1
// nodeOpts is an options accumulating struct, where functional options are
// merged into.
type nodeOpts struct {
balance abi.TokenAmount
lite bool
sectors int
rpc bool
ownerKey *wallet.Key
extraNodeOpts []node.Option
optBuilders []OptBuilder
proofType abi.RegisteredSealProof
}
// DefaultNodeOpts are the default options that will be applied to test nodes.
var DefaultNodeOpts = nodeOpts{
balance: big.Mul(big.NewInt(100000000), types.NewInt(build.FilecoinPrecision)),
sectors: DefaultPresealsPerBootstrapMiner,
proofType: abi.RegisteredSealProof_StackedDrg2KiBV1_1, // default _concrete_ proof type for non-genesis miners (notice the _1) for new actors versions.
}
// OptBuilder is used to create an option after some other node is already
// active. Takes all active nodes as a parameter.
type OptBuilder func(activeNodes []*TestFullNode) node.Option
// NodeOpt is a functional option for test nodes.
type NodeOpt func(opts *nodeOpts) error
// OwnerBalance specifies the balance to be attributed to a miner's owner
// account. Only relevant when creating a miner.
func OwnerBalance(balance abi.TokenAmount) NodeOpt {
return func(opts *nodeOpts) error {
opts.balance = balance
return nil
}
}
// LiteNode specifies that this node will be a lite node. Only relevant when
// creating a fullnode.
func LiteNode() NodeOpt {
return func(opts *nodeOpts) error {
opts.lite = true
return nil
}
}
// PresealSectors specifies the amount of preseal sectors to give to a miner
// at genesis. Only relevant when creating a miner.
func PresealSectors(sectors int) NodeOpt {
return func(opts *nodeOpts) error {
opts.sectors = sectors
return nil
}
}
// ThroughRPC makes interactions with this node throughout the test flow through
// the JSON-RPC API.
func ThroughRPC() NodeOpt {
return func(opts *nodeOpts) error {
opts.rpc = true
return nil
}
}
// OwnerAddr sets the owner address of a miner. Only relevant when creating
// a miner.
func OwnerAddr(wk *wallet.Key) NodeOpt {
return func(opts *nodeOpts) error {
opts.ownerKey = wk
return nil
}
}
// ConstructorOpts are Lotus node constructor options that are passed as-is to
// the node.
func ConstructorOpts(extra ...node.Option) NodeOpt {
return func(opts *nodeOpts) error {
opts.extraNodeOpts = extra
return nil
}
}
// ProofType sets the proof type for this node. If you're using new actor
// versions, this should be a _1 proof type.
func ProofType(proofType abi.RegisteredSealProof) NodeOpt {
return func(opts *nodeOpts) error {
opts.proofType = proofType
return nil
}
}

View File

@ -0,0 +1,90 @@
package kit
import (
"context"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node"
"github.com/ipfs/go-cid"
)
// DefaultTestUpgradeSchedule
var DefaultTestUpgradeSchedule = stmgr.UpgradeSchedule{{
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 4,
Migration: stmgr.UpgradeActorsV5,
}}
func LatestActorsAt(upgradeHeight abi.ChainEpoch) node.Option {
// Attention: Update this when introducing new actor versions or your tests will be sad
return NetworkUpgradeAt(network.Version13, upgradeHeight)
}
// InstantaneousNetworkVersion starts the network instantaneously at the
// specified version in height 1.
func InstantaneousNetworkVersion(version network.Version) node.Option {
// composes all migration functions
var mf stmgr.MigrationFunc = func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error) {
var state = oldState
for _, u := range DefaultTestUpgradeSchedule {
if u.Network > version {
break
}
state, err = u.Migration(ctx, sm, cache, cb, state, height, ts)
if err != nil {
return cid.Undef, err
}
}
return state, nil
}
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{
{Network: version, Height: 1, Migration: mf},
})
}
func NetworkUpgradeAt(version network.Version, upgradeHeight abi.ChainEpoch) node.Option {
schedule := stmgr.UpgradeSchedule{}
for _, upgrade := range DefaultTestUpgradeSchedule {
if upgrade.Network > version {
break
}
schedule = append(schedule, upgrade)
}
if upgradeHeight > 0 {
schedule[len(schedule)-1].Height = upgradeHeight
}
return node.Override(new(stmgr.UpgradeSchedule), schedule)
}
func SDRUpgradeAt(calico, persian abi.ChainEpoch) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version6,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version7,
Height: calico,
Migration: stmgr.UpgradeCalico,
}, {
Network: network.Version8,
Height: persian,
}})
}

View File

@ -1,153 +0,0 @@
package kit
import (
"context"
"testing"
"github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
)
type MinerBuilder func(context.Context, *testing.T, abi.RegisteredSealProof, address.Address) TestMiner
type TestFullNode struct {
v1api.FullNode
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
Stb MinerBuilder
}
type TestMiner struct {
lapi.StorageMiner
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
MineOne func(context.Context, miner.MineReq) error
Stop func(context.Context) error
}
var PresealGenesis = -1
const GenesisPreseals = 2
const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1
// Options for setting up a mock storage Miner
type StorageMiner struct {
Full int
Opts node.Option
Preseal int
}
type OptionGenerator func([]TestFullNode) node.Option
// Options for setting up a mock full node
type FullNodeOpts struct {
Lite bool // run node in "lite" mode
Opts OptionGenerator // generate dependency injection options
}
// APIBuilder is a function which is invoked in test suite to provide
// test nodes and networks
//
// fullOpts array defines options for each full node
// storage array defines storage nodes, numbers in the array specify full node
// index the storage node 'belongs' to
type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner)
func DefaultFullOpts(nFull int) []FullNodeOpts {
full := make([]FullNodeOpts, nFull)
for i := range full {
full[i] = FullNodeOpts{
Opts: func(nodes []TestFullNode) node.Option {
return node.Options()
},
}
}
return full
}
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
var OneFull = DefaultFullOpts(1)
var TwoFull = DefaultFullOpts(2)
var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
// Attention: Update this when introducing new actor versions or your tests will be sad
return FullNodeWithNetworkUpgradeAt(network.Version13, upgradeHeight)
}
var FullNodeWithNetworkUpgradeAt = func(version network.Version, upgradeHeight abi.ChainEpoch) FullNodeOpts {
fullSchedule := stmgr.UpgradeSchedule{{
// prepare for upgrade.
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 4,
Migration: stmgr.UpgradeActorsV5,
}}
schedule := stmgr.UpgradeSchedule{}
for _, upgrade := range fullSchedule {
if upgrade.Network > version {
break
}
schedule = append(schedule, upgrade)
}
if upgradeHeight > 0 {
schedule[len(schedule)-1].Height = upgradeHeight
}
return FullNodeOpts{
Opts: func(nodes []TestFullNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), schedule)
},
}
}
var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
return FullNodeOpts{
Opts: func(nodes []TestFullNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version6,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version7,
Height: calico,
Migration: stmgr.UpgradeCalico,
}, {
Network: network.Version8,
Height: persian,
}})
},
}
}
var MineNext = miner.MineReq{
InjectNulls: 0,
Done: func(bool, abi.ChainEpoch, error) {},
}

View File

@ -1,88 +0,0 @@
package kit
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/stretchr/testify/require"
)
func PledgeSectors(t *testing.T, ctx context.Context, miner TestMiner, n, existing int, blockNotif <-chan struct{}) { //nolint:golint
toCheck := StartPledge(t, ctx, miner, n, existing, blockNotif)
for len(toCheck) > 0 {
flushSealingBatches(t, ctx, miner)
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
}
func flushSealingBatches(t *testing.T, ctx context.Context, miner TestMiner) { //nolint:golint
pcb, err := miner.SectorPreCommitFlush(ctx)
require.NoError(t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
}
cb, err := miner.SectorCommitFlush(ctx)
require.NoError(t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %+v\n", cb)
}
}
func StartPledge(t *testing.T, ctx context.Context, miner TestMiner, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { //nolint:golint
for i := 0; i < n; i++ {
if i%3 == 0 && blockNotif != nil {
<-blockNotif
t.Log("WAIT")
}
t.Logf("PLEDGING %d", i)
_, err := miner.PledgeSector(ctx)
require.NoError(t, err)
}
for {
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
require.NoError(t, err)
fmt.Printf("Sectors: %d\n", len(s))
if len(s) >= n+existing {
break
}
build.Clock.Sleep(100 * time.Millisecond)
}
fmt.Printf("All sectors is fsm\n")
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
toCheck := map[abi.SectorNumber]struct{}{}
for _, number := range s {
toCheck[number] = struct{}{}
}
return toCheck
}

53
itests/kit/rpc.go Normal file
View File

@ -0,0 +1,53 @@
package kit
import (
"context"
"net/http"
"net/http/httptest"
"testing"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/node"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/stretchr/testify/require"
)
func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) {
testServ := httptest.NewServer(handler)
t.Cleanup(testServ.Close)
t.Cleanup(testServ.CloseClientConnections)
addr := testServ.Listener.Addr()
maddr, err := manet.FromNetAddr(addr)
require.NoError(t, err)
return testServ, maddr
}
func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode {
handler, err := node.FullNodeHandler(f.FullNode, false)
require.NoError(t, err)
srv, maddr := CreateRPCServer(t, handler)
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
require.NoError(t, err)
t.Cleanup(stop)
f.ListenAddr, f.FullNode = maddr, cl
return f
}
func minerRpc(t *testing.T, m *TestMiner) *TestMiner {
handler, err := node.MinerHandler(m.StorageMiner, false)
require.NoError(t, err)
srv, maddr := CreateRPCServer(t, handler)
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil)
require.NoError(t, err)
t.Cleanup(stop)
m.ListenAddr, m.StorageMiner = maddr, cl
return m
}

View File

@ -14,21 +14,21 @@ import (
"github.com/stretchr/testify/require"
)
func RunMultisigTests(t *testing.T, clientNode kit.TestFullNode) {
func RunMultisigTests(t *testing.T, client *kit.TestFullNode) {
// Create mock CLI
ctx := context.Background()
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
clientCLI := mockCLI.Client(clientNode.ListenAddr)
clientCLI := mockCLI.Client(client.ListenAddr)
// Create some wallets on the node to use for testing multisig
var walletAddrs []address.Address
for i := 0; i < 4; i++ {
addr, err := clientNode.WalletNew(ctx, types.KTSecp256k1)
addr, err := client.WalletNew(ctx, types.KTSecp256k1)
require.NoError(t, err)
walletAddrs = append(walletAddrs, addr)
kit.SendFunds(ctx, t, clientNode, addr, types.NewInt(1e15))
kit.SendFunds(ctx, t, client, addr, types.NewInt(1e15))
}
// Create an msig with three of the addresses and threshold of two sigs

View File

@ -1,8 +1,6 @@
package itests
import (
"context"
"os"
"testing"
"time"
@ -12,12 +10,11 @@ import (
// TestMultisig does a basic test to exercise the multisig CLI commands
func TestMultisig(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
kit.QuietMiningLogs()
blocktime := 5 * time.Millisecond
ctx := context.Background()
clientNode, _ := kit.StartOneNodeOneMiner(ctx, t, blocktime)
blockTime := 5 * time.Millisecond
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
ens.InterconnectAll().BeginMining(blockTime)
multisig.RunMultisigTests(t, clientNode)
multisig.RunMultisigTests(t, client)
}

View File

@ -2,7 +2,6 @@ package itests
import (
"context"
"fmt"
"testing"
"time"
@ -10,6 +9,7 @@ import (
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
cbor "github.com/ipfs/go-ipld-cbor"
@ -30,63 +30,45 @@ func TestPaymentChannelsAPI(t *testing.T) {
kit.QuietMiningLogs()
ctx := context.Background()
n, sn := kit.MockMinerBuilder(t, kit.TwoFull, kit.OneMiner)
blockTime := 5 * time.Millisecond
paymentCreator := n[0]
paymentReceiver := n[1]
miner := sn[0]
var (
paymentCreator kit.TestFullNode
paymentReceiver kit.TestFullNode
miner kit.TestMiner
)
// get everyone connected
addrs, err := paymentCreator.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := paymentReceiver.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
// start mining blocks
bm := kit.NewBlockMiner(t, miner)
bm.MineBlocks(ctx, 5*time.Millisecond)
t.Cleanup(bm.Stop)
ens := kit.NewEnsemble(t, kit.MockProofs()).
FullNode(&paymentCreator).
FullNode(&paymentReceiver).
Miner(&miner, &paymentCreator).
Start().
InterconnectAll()
bms := ens.BeginMining(blockTime)
bm := bms[0]
// send some funds to register the receiver
receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
kit.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
kit.SendFunds(ctx, t, &paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
// setup the payment channel
createrAddr, err := paymentCreator.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
channelAmt := int64(7000)
channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt))
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
// allocate three lanes
var lanes []uint64
for i := 0; i < 3; i++ {
lane, err := paymentCreator.PaychAllocateLane(ctx, channel)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
lanes = append(lanes, lane)
}
@ -95,45 +77,28 @@ func TestPaymentChannelsAPI(t *testing.T) {
// supersedes the voucher with a value of 1000
for _, lane := range lanes {
vouch1, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), lane)
if err != nil {
t.Fatal(err)
}
if vouch1.Voucher == nil {
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch1.Shortfall))
}
require.NoError(t, err)
require.NotNil(t, vouch1.Voucher, "Not enough funds to create voucher: missing %d", vouch1.Shortfall)
vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane)
if err != nil {
t.Fatal(err)
}
if vouch2.Voucher == nil {
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch2.Shortfall))
}
require.NoError(t, err)
require.NotNil(t, vouch2.Voucher, "Not enough funds to create voucher: missing %d", vouch2.Shortfall)
delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1.Voucher, nil, abi.NewTokenAmount(1000))
if err != nil {
t.Fatal(err)
}
if !delta1.Equals(abi.NewTokenAmount(1000)) {
t.Fatal("voucher didn't have the right amount")
}
require.NoError(t, err)
require.EqualValues(t, abi.NewTokenAmount(1000), delta1, "voucher didn't have the right amount")
delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2.Voucher, nil, abi.NewTokenAmount(1000))
if err != nil {
t.Fatal(err)
}
if !delta2.Equals(abi.NewTokenAmount(1000)) {
t.Fatal("voucher didn't have the right amount")
}
require.NoError(t, err)
require.EqualValues(t, abi.NewTokenAmount(1000), delta2, "voucher didn't have the right amount")
}
// settle the payment channel
settleMsgCid, err := paymentCreator.PaychSettle(ctx, channel)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle")
if res.Receipt.ExitCode != 0 {
t.Fatal("Unable to settle payment channel")
}
require.EqualValues(t, 0, res.Receipt.ExitCode, "Unable to settle payment channel")
creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(paymentCreator)))
@ -170,9 +135,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
}, int(build.MessageConfidence)+1, build.Finality, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
})
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
select {
case <-finished:
@ -182,75 +145,49 @@ func TestPaymentChannelsAPI(t *testing.T) {
// Create a new voucher now that some vouchers have already been submitted
vouchRes, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), 3)
if err != nil {
t.Fatal(err)
}
if vouchRes.Voucher == nil {
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouchRes.Shortfall))
}
require.NoError(t, err)
require.NotNil(t, vouchRes.Voucher, "Not enough funds to create voucher: missing %d", vouchRes.Shortfall)
vdelta, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouchRes.Voucher, nil, abi.NewTokenAmount(1000))
if err != nil {
t.Fatal(err)
}
if !vdelta.Equals(abi.NewTokenAmount(1000)) {
t.Fatal("voucher didn't have the right amount")
}
require.NoError(t, err)
require.EqualValues(t, abi.NewTokenAmount(1000), vdelta, "voucher didn't have the right amount")
// Create a new voucher whose value would exceed the channel balance
excessAmt := abi.NewTokenAmount(1000)
vouchRes, err = paymentCreator.PaychVoucherCreate(ctx, channel, excessAmt, 4)
if err != nil {
t.Fatal(err)
}
if vouchRes.Voucher != nil {
t.Fatal("Expected not to be able to create voucher whose value would exceed channel balance")
}
if !vouchRes.Shortfall.Equals(excessAmt) {
t.Fatal(fmt.Errorf("Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall))
}
require.NoError(t, err)
require.Nil(t, vouchRes.Voucher, "Expected not to be able to create voucher whose value would exceed channel balance")
require.EqualValues(t, excessAmt, vouchRes.Shortfall, "Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall)
// Add a voucher whose value would exceed the channel balance
vouch := &paych.SignedVoucher{ChannelAddr: channel, Amount: excessAmt, Lane: 4, Nonce: 1}
vb, err := vouch.SigningBytes()
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
sig, err := paymentCreator.WalletSign(ctx, createrAddr, vb)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
vouch.Signature = sig
_, err = paymentReceiver.PaychVoucherAdd(ctx, channel, vouch, nil, abi.NewTokenAmount(1000))
if err == nil {
t.Fatal(fmt.Errorf("Expected shortfall error of %d", excessAmt))
}
require.Errorf(t, err, "Expected shortfall error of %d", excessAmt)
// wait for the settlement period to pass before collecting
waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, policy.PaychSettleDelay)
creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
// collect funds (from receiver, though either party can do it)
collectMsg, err := paymentReceiver.PaychCollect(ctx, channel)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("unable to collect on payment channel")
}
require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel")
// Finally, check the balance for the creator
currentCreatorBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
// The highest nonce voucher that the creator sent on each lane is 2000
totalVouchers := int64(len(lanes) * 2000)
@ -260,12 +197,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
// channel amount - total voucher value
expectedRefund := channelAmt - totalVouchers
delta := big.Sub(currentCreatorBalance, creatorPreCollectBalance)
if !delta.Equals(abi.NewTokenAmount(expectedRefund)) {
t.Fatalf("did not send correct funds from creator: expected %d, got %d", expectedRefund, delta)
}
// shut down mining
bm.Stop()
require.EqualValues(t, abi.NewTokenAmount(expectedRefund), delta, "did not send correct funds from creator: expected %d, got %d", expectedRefund, delta)
}
func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymentReceiver kit.TestFullNode, receiverAddr address.Address, count int) {
@ -286,14 +218,10 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymen
From: receiverAddr,
Value: types.NewInt(0),
}, nil)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
_, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1, api.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
}
}
@ -301,15 +229,12 @@ func waitForMessage(ctx context.Context, t *testing.T, paymentCreator kit.TestFu
ctx, cancel := context.WithTimeout(ctx, duration)
defer cancel()
fmt.Println("Waiting for", desc)
t.Log("Waiting for", desc)
res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1, api.LookbackNoLimit, true)
if err != nil {
fmt.Println("Error waiting for", desc, err)
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatalf("did not successfully send %s", desc)
}
fmt.Println("Confirmed", desc)
require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode, "did not successfully send %s", desc)
t.Log("Confirmed", desc)
return res
}

View File

@ -41,11 +41,12 @@ func TestPaymentChannelsBasic(t *testing.T) {
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
paymentReceiver := nodes[1]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
var (
paymentCreator kit.TestFullNode
paymentReceiver kit.TestFullNode
)
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
// Create mock CLI
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
@ -70,12 +71,16 @@ func TestPaymentChannelsBasic(t *testing.T) {
// creator: paych settle <channel>
creatorCLI.RunCmd("paych", "settle", chAddr.String())
t.Log("wait for chain to reach settle height")
// Wait for the chain to reach the settle height
chState := getPaychState(ctx, t, paymentReceiver, chAddr)
sa, err := chState.SettlingAt()
require.NoError(t, err)
waitForHeight(ctx, t, paymentReceiver, sa)
t.Log("settle height reached")
// receiver: paych collect <channel>
receiverCLI.RunCmd("paych", "collect", chAddr.String())
}
@ -93,10 +98,11 @@ func TestPaymentChannelStatus(t *testing.T) {
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
var (
paymentCreator kit.TestFullNode
paymentReceiver kit.TestFullNode
)
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
// Create mock CLI
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
@ -172,11 +178,11 @@ func TestPaymentChannelVouchers(t *testing.T) {
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
paymentReceiver := nodes[1]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
var (
paymentCreator kit.TestFullNode
paymentReceiver kit.TestFullNode
)
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
// Create mock CLI
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
@ -304,10 +310,11 @@ func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
var (
paymentCreator kit.TestFullNode
paymentReceiver kit.TestFullNode
)
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
// Create mock CLI
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
@ -406,3 +413,25 @@ func getPaychState(ctx context.Context, t *testing.T, node kit.TestFullNode, chA
return chState
}
func startPaychCreatorReceiverMiner(ctx context.Context, t *testing.T, paymentCreator *kit.TestFullNode, paymentReceiver *kit.TestFullNode, blocktime time.Duration) (address.Address, address.Address) {
var miner kit.TestMiner
opts := kit.ThroughRPC()
kit.NewEnsemble(t, kit.MockProofs()).
FullNode(paymentCreator, opts).
FullNode(paymentReceiver, opts).
Miner(&miner, paymentCreator).
Start().
InterconnectAll().
BeginMining(blocktime)
// Send some funds to the second node
receiverAddr, err := paymentReceiver.WalletDefaultAddress(ctx)
require.NoError(t, err)
kit.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
// Get the first node's address
creatorAddr, err := paymentCreator.WalletDefaultAddress(ctx)
require.NoError(t, err)
return creatorAddr, receiverAddr
}

View File

@ -12,7 +12,6 @@ import (
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/itests/kit"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -31,18 +30,10 @@ func TestSDRUpgrade(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithSDRAt(500, 1000)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
opts := kit.ConstructorOpts(kit.SDRUpgradeAt(500, 1000))
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
ens.InterconnectAll()
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
pledge := make(chan struct{})
@ -53,7 +44,7 @@ func TestSDRUpgrade(t *testing.T) {
round := 0
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
if err := miner.MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
@ -88,7 +79,7 @@ func TestSDRUpgrade(t *testing.T) {
}()
// before.
kit.PledgeSectors(t, ctx, miner, 9, 0, pledge)
miner.PledgeSectors(ctx, 9, 0, pledge)
s, err := miner.SectorsList(ctx)
require.NoError(t, err)

View File

@ -4,70 +4,38 @@ import (
"context"
"fmt"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/itests/kit"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/require"
)
func TestPledgeSectors(t *testing.T) {
kit.QuietMiningLogs()
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
blockTime := 50 * time.Millisecond
runTest := func(t *testing.T, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, kit.OneFull, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
_, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blockTime)
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
atomic.StoreInt64(&mine, 0)
<-done
miner.PledgeSectors(ctx, nSectors, 0, nil)
}
t.Run("1", func(t *testing.T) {
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 1)
runTest(t, 1)
})
t.Run("100", func(t *testing.T) {
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
runTest(t, 100)
})
t.Run("1000", func(t *testing.T) {
@ -75,52 +43,24 @@ func TestPledgeSectors(t *testing.T) {
t.Skip("skipping test in short mode")
}
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 1000)
runTest(t, 1000)
})
}
func TestPledgeBatching(t *testing.T) {
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
blockTime := 50 * time.Millisecond
runTest := func(t *testing.T, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
opts := kit.ConstructorOpts(kit.LatestActorsAt(-1))
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blockTime)
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
client.WaitTillChain(ctx, kit.HeightAtLeast(10))
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := kit.StartPledge(t, ctx, miner, nSectors, 0, nil)
toCheck := miner.StartPledge(ctx, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
@ -157,80 +97,27 @@ func TestPledgeBatching(t *testing.T) {
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
t.Run("100", func(t *testing.T) {
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
runTest(t, 100)
})
}
func TestPledgeBeforeNv13(t *testing.T) {
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
blocktime := 50 * time.Millisecond
runTest := func(t *testing.T, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []kit.FullNodeOpts{
{
Opts: func(nodes []kit.TestFullNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 1000000000,
Migration: stmgr.UpgradeActorsV5,
}})
},
},
}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
opts := kit.ConstructorOpts(kit.LatestActorsAt(1000000000))
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blocktime)
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
client.WaitTillChain(ctx, kit.HeightAtLeast(10))
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := kit.StartPledge(t, ctx, miner, nSectors, 0, nil)
toCheck := miner.StartPledge(ctx, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
@ -250,12 +137,9 @@ func TestPledgeBeforeNv13(t *testing.T) {
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
t.Run("100-before-nv13", func(t *testing.T) {
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
runTest(t, 100)
})
}

View File

@ -2,17 +2,14 @@ package itests
import (
"context"
"fmt"
"testing"
"time"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/require"
)
@ -21,49 +18,15 @@ func TestTerminate(t *testing.T) {
kit.QuietMiningLogs()
const blocktime = 2 * time.Millisecond
nSectors := uint64(2)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := kit.MockMinerBuilder(t,
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
[]kit.StorageMiner{{Full: 0, Preseal: int(nSectors)}},
var (
blocktime = 2 * time.Millisecond
nSectors = 2
ctx = context.Background()
)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
opts := kit.ConstructorOpts(kit.LatestActorsAt(-1))
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.PresealSectors(nSectors), opts)
ens.InterconnectAll().BeginMining(blocktime)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
@ -74,31 +37,24 @@ func TestTerminate(t *testing.T) {
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors)))
fmt.Printf("Seal a sector\n")
t.Log("Seal a sector")
kit.PledgeSectors(t, ctx, miner, 1, 0, nil)
miner.PledgeSectors(ctx, 1, 0, nil)
fmt.Printf("wait for power\n")
t.Log("wait for power")
{
// Wait until proven.
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
fmt.Printf("End for head.Height > %d\n", waitUntil)
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 20 // 20 is some slack for the proof to be submitted + applied
t.Logf("End for head.Height > %d", waitUntil)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > waitUntil {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
}
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
}
nSectors++
@ -106,9 +62,9 @@ func TestTerminate(t *testing.T) {
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors)))
fmt.Println("Terminate a sector")
t.Log("Terminate a sector")
toTerminate := abi.SectorNumber(3)
@ -121,7 +77,7 @@ loop:
si, err := miner.SectorsStatus(ctx, toTerminate, false)
require.NoError(t, err)
fmt.Println("state: ", si.State, msgTriggerred)
t.Log("state: ", si.State, msgTriggerred)
switch sealing.SectorState(si.State) {
case sealing.Terminating:
@ -137,7 +93,7 @@ loop:
require.NoError(t, err)
if c != nil {
msgTriggerred = true
fmt.Println("terminate message:", c)
t.Log("terminate message:", c)
{
p, err := miner.SectorTerminatePending(ctx)
@ -153,11 +109,14 @@ loop:
time.Sleep(100 * time.Millisecond)
}
// need to wait for message to be mined and applied.
time.Sleep(5 * time.Second)
// check power decreased
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower)
// check in terminated set
{
@ -177,22 +136,15 @@ loop:
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
require.NoError(t, err)
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 20 // slack like above
t.Logf("End for head.Height > %d", waitUntil)
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower)
}

View File

@ -2,7 +2,6 @@ package itests
import (
"context"
"fmt"
"testing"
"time"
@ -13,7 +12,6 @@ import (
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/stretchr/testify/require"
)
@ -25,10 +23,10 @@ func TestTapeFix(t *testing.T) {
// The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case
// TODO: Make the mock sector size configurable and reenable this
// t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
t.Run("after", func(t *testing.T) { testTapeFix(t, kit.MockMinerBuilder, blocktime, true) })
t.Run("after", func(t *testing.T) { testTapeFix(t, blocktime, true) })
}
func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after bool) {
func testTapeFix(t *testing.T, blocktime time.Duration, after bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -44,46 +42,14 @@ func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after
})
}
n, sn := b(t, []kit.FullNodeOpts{{Opts: func(_ []kit.TestFullNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule)
}}}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
nopts := kit.ConstructorOpts(node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule))
_, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), nopts)
ens.InterconnectAll().BeginMining(blocktime)
sid, err := miner.PledgeSector(ctx)
require.NoError(t, err)
fmt.Printf("All sectors is fsm\n")
t.Log("All sectors is fsm")
// If before, we expect the precommit to fail
successState := api.SectorState(sealing.CommitFailed)
@ -101,6 +67,6 @@ func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after
}
require.NotEqual(t, failureState, st.State)
build.Clock.Sleep(100 * time.Millisecond)
fmt.Println("WaitSeal")
t.Log("WaitSeal")
}
}

View File

@ -2,122 +2,127 @@ package itests
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/lotus/itests/kit"
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
"github.com/stretchr/testify/require"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/node/impl"
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/impl"
)
func TestVerifiedClientTopUp(t *testing.T) {
blockTime := 100 * time.Millisecond
test := func(nv network.Version, shouldWork bool) func(*testing.T) {
return func(t *testing.T) {
nodes, miners := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithNetworkUpgradeAt(nv, -1)}, kit.OneMiner)
api := nodes[0].FullNode.(*impl.FullNodeAPI)
rootKey, err := wallet.GenerateKey(types.KTSecp256k1)
require.NoError(t, err)
verifierKey, err := wallet.GenerateKey(types.KTSecp256k1)
require.NoError(t, err)
verifiedClientKey, err := wallet.GenerateKey(types.KTBLS)
require.NoError(t, err)
bal, err := types.ParseFIL("100fil")
require.NoError(t, err)
node, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(),
kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())),
kit.Account(verifierKey, abi.NewTokenAmount(bal.Int64())), // assign some balance to the verifier so they can send an AddClient message.
kit.ConstructorOpts(kit.InstantaneousNetworkVersion(nv)))
ens.InterconnectAll().BeginMining(blockTime)
api := node.FullNode.(*impl.FullNodeAPI)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
//Get VRH
// get VRH
vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
if err != nil {
t.Fatal(err)
}
fmt.Println(vrh.String())
require.NoError(t, err)
//Add verifier
verifier, err := api.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
// import the root key.
rootAddr, err := api.WalletImport(ctx, &rootKey.KeyInfo)
require.NoError(t, err)
// import the verifier's key.
verifierAddr, err := api.WalletImport(ctx, &verifierKey.KeyInfo)
require.NoError(t, err)
// import the verified client's key.
verifiedClientAddr, err := api.WalletImport(ctx, &verifiedClientKey.KeyInfo)
require.NoError(t, err)
params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifierAddr, Allowance: big.NewInt(100000000000)})
require.NoError(t, err)
params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifier, Allowance: big.NewInt(100000000000)})
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
From: rootAddr,
To: verifreg.Address,
From: vrh,
Method: verifreg.Methods.AddVerifier,
Params: params,
Value: big.Zero(),
}
bm := kit.NewBlockMiner(t, miners[0])
bm.MineBlocks(ctx, 100*time.Millisecond)
t.Cleanup(bm.Stop)
sm, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal("AddVerifier failed: ", err)
}
require.NoError(t, err, "AddVerifier failed")
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode)
//Assign datacap to a client
// assign datacap to a client
datacap := big.NewInt(10000)
clientAddress, err := api.WalletNew(ctx, types.KTBLS)
if err != nil {
t.Fatal(err)
}
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
if err != nil {
t.Fatal(err)
}
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap})
require.NoError(t, err)
msg = &types.Message{
From: verifierAddr,
To: verifreg.Address,
From: verifier,
Method: verifreg.Methods.AddVerifiedClient,
Params: params,
Value: big.Zero(),
}
sm, err = api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal("AddVerifiedClient faield: ", err)
}
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
require.NoError(t, err)
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
require.NoError(t, err)
require.EqualValues(t, 0, res.Receipt.ExitCode)
// check datacap balance
dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
require.NoError(t, err)
//check datacap balance
dcap, err := api.StateVerifiedClientStatus(ctx, clientAddress, types.EmptyTSK)
if err != nil {
t.Fatal(err)
}
if !dcap.Equals(datacap) {
t.Fatal("")
}
//try to assign datacap to the same client should fail for actor v4 and below
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
// try to assign datacap to the same client should fail for actor v4 and below
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap})
if err != nil {
t.Fatal(err)
}
msg = &types.Message{
From: verifierAddr,
To: verifreg.Address,
From: verifier,
Method: verifreg.Methods.AddVerifiedClient,
Params: params,
Value: big.Zero(),

View File

@ -2,7 +2,6 @@ package itests
import (
"context"
"fmt"
"testing"
"time"
@ -16,7 +15,6 @@ import (
minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/impl"
proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof"
"github.com/stretchr/testify/require"
)
@ -26,71 +24,38 @@ func TestWindowPostDispute(t *testing.T) {
kit.QuietMiningLogs()
b := kit.MockMinerBuilder
blocktime := 2 * time.Millisecond
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var (
client kit.TestFullNode
chainMiner kit.TestMiner
evilMiner kit.TestMiner
)
// First, we configure two miners. After sealing, we're going to turn off the first miner so
// it doesn't submit proofs.
//
// Then we're going to manually submit bad proofs.
n, sn := b(t,
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
[]kit.StorageMiner{
{Full: 0, Preseal: kit.PresealGenesis},
{Full: 0},
})
client := n[0].FullNode.(*impl.FullNodeAPI)
chainMiner := sn[0]
evilMiner := sn[1]
{
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := chainMiner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
if err := evilMiner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
}
opts := kit.ConstructorOpts(kit.LatestActorsAt(-1))
ens := kit.NewEnsemble(t, kit.MockProofs()).
FullNode(&client, opts).
Miner(&chainMiner, &client, opts).
Miner(&evilMiner, &client, opts, kit.PresealSectors(0)).
Start()
defaultFrom, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
build.Clock.Sleep(time.Second)
// Mine with the _second_ node (the good one).
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := chainMiner.MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
ens.InterconnectAll().BeginMining(blocktime, &chainMiner)
// Give the chain miner enough sectors to win every block.
kit.PledgeSectors(t, ctx, chainMiner, 10, 0, nil)
chainMiner.PledgeSectors(ctx, 10, 0, nil)
// And the evil one 1 sector. No cookie for you.
kit.PledgeSectors(t, ctx, evilMiner, 1, 0, nil)
evilMiner.PledgeSectors(ctx, 1, 0, nil)
// Let the evil miner's sectors gain power.
evilMinerAddr, err := evilMiner.ActorAddress(ctx)
@ -99,19 +64,13 @@ func TestWindowPostDispute(t *testing.T) {
di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
fmt.Printf("Running one proving period\n")
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
t.Logf("Running one proving period\n")
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1
t.Logf("End for head.Height > %d", waitUntil)
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err)
@ -128,12 +87,12 @@ func TestWindowPostDispute(t *testing.T) {
evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK)
require.NoError(t, err)
fmt.Println("evil miner stopping")
t.Log("evil miner stopping")
// Now stop the evil miner, and start manually submitting bad proofs.
require.NoError(t, evilMiner.Stop(ctx))
fmt.Println("evil miner stopped")
t.Log("evil miner stopped")
// Wait until we need to prove our sector.
for {
@ -145,7 +104,7 @@ func TestWindowPostDispute(t *testing.T) {
build.Clock.Sleep(blocktime)
}
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
require.NoError(t, err, "evil proof not accepted")
// Wait until after the proving period.
@ -158,7 +117,7 @@ func TestWindowPostDispute(t *testing.T) {
build.Clock.Sleep(blocktime)
}
fmt.Println("accepted evil proof")
t.Log("accepted evil proof")
// Make sure the evil node didn't lose any power.
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
@ -185,7 +144,7 @@ func TestWindowPostDispute(t *testing.T) {
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
fmt.Println("waiting dispute")
t.Log("waiting dispute")
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
@ -243,7 +202,7 @@ func TestWindowPostDispute(t *testing.T) {
}
// Now try to be evil again
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
require.Error(t, err)
require.Contains(t, err.Error(), "message execution failed: exit 16, reason: window post failed: invalid PoSt")
@ -255,27 +214,14 @@ func TestWindowPostDisputeFails(t *testing.T) {
kit.QuietMiningLogs()
b := kit.MockMinerBuilder
blocktime := 2 * time.Millisecond
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
{
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
}
opts := kit.ConstructorOpts(kit.LatestActorsAt(-1))
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blocktime)
defaultFrom, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
@ -285,48 +231,21 @@ func TestWindowPostDisputeFails(t *testing.T) {
build.Clock.Sleep(time.Second)
// Mine with the _second_ node (the good one).
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := miner.MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
kit.PledgeSectors(t, ctx, miner, 10, 0, nil)
miner.PledgeSectors(ctx, 10, 0, nil)
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
fmt.Printf("Running one proving period\n")
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
t.Log("Running one proving period")
waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1
t.Logf("End for head.Height > %d", waitUntil)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
ssz, err := miner.ActorSectorSize(ctx, maddr)
require.NoError(t, err)
expectedPower := types.NewInt(uint64(ssz) * (kit.GenesisPreseals + 10))
expectedPower := types.NewInt(uint64(ssz) * (kit.DefaultPresealsPerBootstrapMiner + 10))
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
@ -388,7 +307,7 @@ waitForProof:
func submitBadProof(
ctx context.Context,
client api.FullNode, maddr address.Address,
client api.FullNode, owner address.Address, maddr address.Address,
di *dline.Info, dlIdx, partIdx uint64,
) error {
head, err := client.ChainHead(ctx)
@ -396,11 +315,6 @@ func submitBadProof(
return err
}
from, err := client.WalletDefaultAddress(ctx)
if err != nil {
return err
}
minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key())
if err != nil {
return err
@ -435,7 +349,7 @@ func submitBadProof(
Method: minerActor.Methods.SubmitWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
From: from,
From: owner,
}
sm, err := client.MpoolPushMessage(ctx, msg, nil)
if err != nil {

View File

@ -6,18 +6,18 @@ import (
"testing"
"time"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/impl"
)
@ -38,50 +38,20 @@ func TestWindowedPost(t *testing.T) {
} {
height := height // copy to satisfy lints
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
testWindowPostUpgrade(t, kit.MockMinerBuilder, blocktime, nSectors, height)
testWindowPostUpgrade(t, blocktime, nSectors, height)
})
}
}
func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int, upgradeHeight abi.ChainEpoch) {
func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, upgradeHeight abi.ChainEpoch) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(upgradeHeight)}, kit.OneMiner)
opts := kit.ConstructorOpts(kit.LatestActorsAt(upgradeHeight))
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blocktime)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
miner.PledgeSectors(ctx, nSectors, 0, nil)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
@ -92,19 +62,12 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
mid, err := address.IDFromAddress(maddr)
require.NoError(t, err)
fmt.Printf("Running one proving period\n")
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
t.Log("Running one proving period")
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
t.Logf("End for head.Height > %d", waitUntil)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
@ -113,9 +76,9 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit.GenesisPreseals)))
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit.DefaultPresealsPerBootstrapMiner)))
fmt.Printf("Drop some sectors\n")
t.Log("Drop some sectors")
// Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
{
@ -159,7 +122,7 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
all, err := secs.All(2)
require.NoError(t, err)
fmt.Println("the sectors", all)
t.Log("the sectors", all)
s = storage.SectorRef{
ID: abi.SectorID{
@ -175,20 +138,12 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
fmt.Printf("Go through another PP, wait for sectors to become faulty\n")
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
t.Log("Go through another PP, wait for sectors to become faulty")
waitUntil = di.PeriodStart + di.WPoStProvingPeriod + 2
t.Logf("End for head.Height > %d", waitUntil)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
@ -196,9 +151,9 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
require.Equal(t, p.MinerPower, p.TotalPower)
sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
require.Equal(t, nSectors+kit.GenesisPreseals-3, int(sectors)) // -3 just removed sectors
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-3, int(sectors)) // -3 just removed sectors
fmt.Printf("Recover one sector\n")
t.Log("Recover one sector")
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false)
require.NoError(t, err)
@ -206,19 +161,11 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
waitUntil = di.PeriodStart + di.WPoStProvingPeriod + 2
t.Logf("End for head.Height > %d", waitUntil)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
build.Clock.Sleep(blocktime)
}
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
@ -226,11 +173,11 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
require.Equal(t, p.MinerPower, p.TotalPower)
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
require.Equal(t, nSectors+kit.GenesisPreseals-2, int(sectors)) // -2 not recovered sectors
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2, int(sectors)) // -2 not recovered sectors
// pledge a sector after recovery
kit.PledgeSectors(t, ctx, miner, 1, nSectors, nil)
miner.PledgeSectors(ctx, 1, nSectors, nil)
{
// Wait until proven.
@ -238,17 +185,10 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
require.NoError(t, err)
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
fmt.Printf("End for head.Height > %d\n", waitUntil)
t.Logf("End for head.Height > %d\n", waitUntil)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > waitUntil {
fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
}
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Logf("Now head.Height = %d", ts.Height())
}
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
@ -257,7 +197,7 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
require.Equal(t, p.MinerPower, p.TotalPower)
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
require.Equal(t, nSectors+kit.GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
}
func TestWindowPostBaseFeeNoBurn(t *testing.T) {
@ -276,11 +216,8 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) {
och := build.UpgradeClausHeight
build.UpgradeClausHeight = 10
n, sn := kit.MockMinerBuilder(t, kit.DefaultFullOpts(1), kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
bm := kit.ConnectAndStartMining(t, blocktime, miner, client)
t.Cleanup(bm.Stop)
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blocktime)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
@ -288,7 +225,7 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) {
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
miner.PledgeSectors(ctx, nSectors, 0, nil)
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
en := wact.Nonce
@ -327,11 +264,9 @@ func TestWindowPostBaseFeeBurn(t *testing.T) {
blocktime := 2 * time.Millisecond
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
bm := kit.ConnectAndStartMining(t, blocktime, miner, client)
t.Cleanup(bm.Stop)
opts := kit.ConstructorOpts(kit.LatestActorsAt(-1))
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blocktime)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
@ -339,7 +274,7 @@ func TestWindowPostBaseFeeBurn(t *testing.T) {
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
kit.PledgeSectors(t, ctx, miner, 10, 0, nil)
miner.PledgeSectors(ctx, 10, 0, nil)
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
en := wact.Nonce