Merge pull request #6329 from filecoin-project/raulk/itests-refactor-kit
revamped integration test kit (aka. Operation Sparks Joy)
This commit is contained in:
commit
cefd140e45
@ -771,6 +771,31 @@ workflows:
|
|||||||
suite: itest-deadlines
|
suite: itest-deadlines
|
||||||
target: "./itests/deadlines_test.go"
|
target: "./itests/deadlines_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_concurrent
|
||||||
|
suite: itest-deals_concurrent
|
||||||
|
target: "./itests/deals_concurrent_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_offline
|
||||||
|
suite: itest-deals_offline
|
||||||
|
target: "./itests/deals_offline_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_power
|
||||||
|
suite: itest-deals_power
|
||||||
|
target: "./itests/deals_power_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_pricing
|
||||||
|
suite: itest-deals_pricing
|
||||||
|
target: "./itests/deals_pricing_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_publish
|
||||||
|
suite: itest-deals_publish
|
||||||
|
target: "./itests/deals_publish_test.go"
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-deals
|
name: test-itest-deals
|
||||||
suite: itest-deals
|
suite: itest-deals
|
||||||
|
@ -10,14 +10,13 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
@ -32,36 +31,21 @@ func TestWorkerKeyChange(t *testing.T) {
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
_ = logging.SetLogLevel("*", "INFO")
|
|
||||||
|
|
||||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
|
||||||
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
|
||||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 1 * time.Millisecond
|
blocktime := 1 * time.Millisecond
|
||||||
|
client1, client2, miner, ens := kit.EnsembleTwoOne(t, kit.MockProofs(),
|
||||||
clients, miners := kit.MockMinerBuilder(t,
|
kit.ConstructorOpts(kit.InstantaneousNetworkVersion(network.Version13)),
|
||||||
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1), kit.FullNodeWithLatestActorsAt(-1)},
|
)
|
||||||
kit.OneMiner)
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
|
|
||||||
client1 := clients[0]
|
|
||||||
client2 := clients[1]
|
|
||||||
|
|
||||||
// Connect the nodes.
|
|
||||||
addrinfo, err := client1.NetAddrsListen(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = client2.NetConnect(ctx, addrinfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
output := bytes.NewBuffer(nil)
|
output := bytes.NewBuffer(nil)
|
||||||
run := func(cmd *cli.Command, args ...string) error {
|
run := func(cmd *cli.Command, args ...string) error {
|
||||||
app := cli.NewApp()
|
app := cli.NewApp()
|
||||||
app.Metadata = map[string]interface{}{
|
app.Metadata = map[string]interface{}{
|
||||||
"repoType": repo.StorageMiner,
|
"repoType": repo.StorageMiner,
|
||||||
"testnode-full": clients[0],
|
"testnode-full": client1,
|
||||||
"testnode-storage": miners[0],
|
"testnode-storage": miner,
|
||||||
}
|
}
|
||||||
app.Writer = output
|
app.Writer = output
|
||||||
api.RunningNodeType = api.NodeMiner
|
api.RunningNodeType = api.NodeMiner
|
||||||
@ -78,9 +62,6 @@ func TestWorkerKeyChange(t *testing.T) {
|
|||||||
return cmd.Action(cctx)
|
return cmd.Action(cctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// start mining
|
|
||||||
kit.ConnectAndStartMining(t, blocktime, miners[0], client1, client2)
|
|
||||||
|
|
||||||
newKey, err := client1.WalletNew(ctx, types.KTBLS)
|
newKey, err := client1.WalletNew(ctx, types.KTBLS)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -105,14 +86,8 @@ func TestWorkerKeyChange(t *testing.T) {
|
|||||||
require.Error(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String()))
|
require.Error(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String()))
|
||||||
output.Reset()
|
output.Reset()
|
||||||
|
|
||||||
for {
|
client1.WaitTillChain(ctx, kit.HeightAtLeast(abi.ChainEpoch(targetEpoch)))
|
||||||
head, err := client1.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if head.Height() >= abi.ChainEpoch(targetEpoch) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(10 * blocktime)
|
|
||||||
}
|
|
||||||
require.NoError(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String()))
|
require.NoError(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String()))
|
||||||
output.Reset()
|
output.Reset()
|
||||||
|
|
||||||
@ -121,23 +96,8 @@ func TestWorkerKeyChange(t *testing.T) {
|
|||||||
|
|
||||||
// Wait for finality (worker key switch).
|
// Wait for finality (worker key switch).
|
||||||
targetHeight := head.Height() + policy.ChainFinality
|
targetHeight := head.Height() + policy.ChainFinality
|
||||||
for {
|
client1.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight))
|
||||||
head, err := client1.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if head.Height() >= targetHeight {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(10 * blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure the other node can catch up.
|
// Make sure the other node can catch up.
|
||||||
for i := 0; i < 20; i++ {
|
client2.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight))
|
||||||
head, err := client2.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if head.Height() >= targetHeight {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(10 * blocktime)
|
|
||||||
}
|
|
||||||
t.Fatal("failed to reach target epoch on the second miner")
|
|
||||||
}
|
}
|
||||||
|
@ -7,13 +7,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
@ -24,12 +20,6 @@ func TestMinerAllInfo(t *testing.T) {
|
|||||||
t.Skip("skipping test in short mode")
|
t.Skip("skipping test in short mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = logging.SetLogLevel("*", "INFO")
|
|
||||||
|
|
||||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
|
||||||
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
|
||||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
|
||||||
|
|
||||||
_test = true
|
_test = true
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
@ -40,16 +30,15 @@ func TestMinerAllInfo(t *testing.T) {
|
|||||||
policy.SetPreCommitChallengeDelay(oldDelay)
|
policy.SetPreCommitChallengeDelay(oldDelay)
|
||||||
})
|
})
|
||||||
|
|
||||||
n, sn := kit.Builder(t, kit.OneFull, kit.OneMiner)
|
client, miner, ens := kit.EnsembleMinimal(t)
|
||||||
client, miner := n[0].FullNode, sn[0]
|
ens.InterconnectAll().BeginMining(time.Second)
|
||||||
kit.ConnectAndStartMining(t, time.Second, miner, client.(*impl.FullNodeAPI))
|
|
||||||
|
|
||||||
run := func(t *testing.T) {
|
run := func(t *testing.T) {
|
||||||
app := cli.NewApp()
|
app := cli.NewApp()
|
||||||
app.Metadata = map[string]interface{}{
|
app.Metadata = map[string]interface{}{
|
||||||
"repoType": repo.StorageMiner,
|
"repoType": repo.StorageMiner,
|
||||||
"testnode-full": n[0],
|
"testnode-full": client,
|
||||||
"testnode-storage": sn[0],
|
"testnode-storage": miner,
|
||||||
}
|
}
|
||||||
api.RunningNodeType = api.NodeMiner
|
api.RunningNodeType = api.NodeMiner
|
||||||
|
|
||||||
@ -61,14 +50,9 @@ func TestMinerAllInfo(t *testing.T) {
|
|||||||
t.Run("pre-info-all", run)
|
t.Run("pre-info-all", run)
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
_, _, _ = dh.MakeFullDeal(kit.MakeFullDealParams{
|
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6})
|
||||||
Ctx: context.Background(),
|
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
|
||||||
Rseed: 6,
|
kit.AssertFilesEqual(t, inPath, outPath)
|
||||||
CarExport: false,
|
|
||||||
FastRet: false,
|
|
||||||
StartEpoch: 0,
|
|
||||||
DoRetrieval: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("post-info-all", run)
|
t.Run("post-info-all", run)
|
||||||
}
|
}
|
||||||
|
@ -7,34 +7,32 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
lapi "github.com/filecoin-project/lotus/api"
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAPI(t *testing.T) {
|
func TestAPI(t *testing.T) {
|
||||||
t.Run("direct", func(t *testing.T) {
|
t.Run("direct", func(t *testing.T) {
|
||||||
runAPITest(t, kit.Builder)
|
runAPITest(t)
|
||||||
})
|
})
|
||||||
t.Run("rpc", func(t *testing.T) {
|
t.Run("rpc", func(t *testing.T) {
|
||||||
runAPITest(t, kit.RPCBuilder)
|
runAPITest(t, kit.ThroughRPC())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type apiSuite struct {
|
type apiSuite struct {
|
||||||
makeNodes kit.APIBuilder
|
opts []interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// runAPITest is the entry point to API test suite
|
// runAPITest is the entry point to API test suite
|
||||||
func runAPITest(t *testing.T, b kit.APIBuilder) {
|
func runAPITest(t *testing.T, opts ...interface{}) {
|
||||||
ts := apiSuite{
|
ts := apiSuite{opts: opts}
|
||||||
makeNodes: b,
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("version", ts.testVersion)
|
t.Run("version", ts.testVersion)
|
||||||
t.Run("id", ts.testID)
|
t.Run("id", ts.testID)
|
||||||
@ -51,145 +49,114 @@ func (ts *apiSuite) testVersion(t *testing.T) {
|
|||||||
lapi.RunningNodeType = lapi.NodeUnknown
|
lapi.RunningNodeType = lapi.NodeUnknown
|
||||||
})
|
})
|
||||||
|
|
||||||
ctx := context.Background()
|
full, _, _ := kit.EnsembleMinimal(t, ts.opts...)
|
||||||
apis, _ := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
|
|
||||||
napi := apis[0]
|
v, err := full.Version(context.Background())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
v, err := napi.Version(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
versions := strings.Split(v.Version, "+")
|
versions := strings.Split(v.Version, "+")
|
||||||
if len(versions) <= 0 {
|
require.NotZero(t, len(versions), "empty version")
|
||||||
t.Fatal("empty version")
|
|
||||||
}
|
|
||||||
require.Equal(t, versions[0], build.BuildVersion)
|
require.Equal(t, versions[0], build.BuildVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *apiSuite) testSearchMsg(t *testing.T) {
|
func (ts *apiSuite) testID(t *testing.T) {
|
||||||
apis, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
|
ctx := context.Background()
|
||||||
|
|
||||||
api := apis[0]
|
full, _, _ := kit.EnsembleMinimal(t, ts.opts...)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
id, err := full.ID(ctx)
|
||||||
senderAddr, err := api.WalletDefaultAddress(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
require.Regexp(t, "^12", id.Pretty())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *apiSuite) testConnectTwo(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
one, two, _, ens := kit.EnsembleTwoOne(t, ts.opts...)
|
||||||
|
|
||||||
|
p, err := one.NetPeers(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Empty(t, p, "node one has peers")
|
||||||
|
|
||||||
|
p, err = two.NetPeers(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Empty(t, p, "node two has peers")
|
||||||
|
|
||||||
|
ens.InterconnectAll()
|
||||||
|
|
||||||
|
peers, err := one.NetPeers(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Lenf(t, peers, 2, "node one doesn't have 2 peers")
|
||||||
|
|
||||||
|
peers, err = two.NetPeers(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Lenf(t, peers, 2, "node two doesn't have 2 peers")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *apiSuite) testSearchMsg(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
full, _, ens := kit.EnsembleMinimal(t, ts.opts...)
|
||||||
|
|
||||||
|
senderAddr, err := full.WalletDefaultAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
msg := &types.Message{
|
msg := &types.Message{
|
||||||
From: senderAddr,
|
From: senderAddr,
|
||||||
To: senderAddr,
|
To: senderAddr,
|
||||||
Value: big.Zero(),
|
Value: big.Zero(),
|
||||||
}
|
}
|
||||||
bm := kit.NewBlockMiner(t, miners[0])
|
|
||||||
bm.MineBlocks(ctx, 100*time.Millisecond)
|
|
||||||
defer bm.Stop()
|
|
||||||
|
|
||||||
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
ens.BeginMining(100 * time.Millisecond)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if res.Receipt.ExitCode != 0 {
|
|
||||||
t.Fatal("did not successfully send message")
|
|
||||||
}
|
|
||||||
|
|
||||||
searchRes, err := api.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
|
sm, err := full.MpoolPushMessage(ctx, msg, nil)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if searchRes.TipSet != res.TipSet {
|
res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||||
t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
|
require.NoError(t, err)
|
||||||
}
|
|
||||||
|
|
||||||
}
|
require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful")
|
||||||
|
|
||||||
func (ts *apiSuite) testID(t *testing.T) {
|
searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
|
||||||
ctx := context.Background()
|
require.NoError(t, err)
|
||||||
apis, _ := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
|
|
||||||
api := apis[0]
|
|
||||||
|
|
||||||
id, err := api.ID(ctx)
|
require.Equalf(t, res.TipSet, searchRes.TipSet, "search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
assert.Regexp(t, "^12", id.Pretty())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *apiSuite) testConnectTwo(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
apis, _ := ts.makeNodes(t, kit.TwoFull, kit.OneMiner)
|
|
||||||
|
|
||||||
p, err := apis[0].NetPeers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(p) != 0 {
|
|
||||||
t.Error("Node 0 has a peer")
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = apis[1].NetPeers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(p) != 0 {
|
|
||||||
t.Error("Node 1 has a peer")
|
|
||||||
}
|
|
||||||
|
|
||||||
addrs, err := apis[1].NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := apis[0].NetConnect(ctx, addrs); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = apis[0].NetPeers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(p) != 1 {
|
|
||||||
t.Error("Node 0 doesn't have 1 peer")
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = apis[1].NetPeers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(p) != 1 {
|
|
||||||
t.Error("Node 0 doesn't have 1 peer")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *apiSuite) testMining(t *testing.T) {
|
func (ts *apiSuite) testMining(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
fulls, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
|
|
||||||
api := fulls[0]
|
|
||||||
|
|
||||||
newHeads, err := api.ChainNotify(ctx)
|
full, miner, _ := kit.EnsembleMinimal(t, ts.opts...)
|
||||||
|
|
||||||
|
newHeads, err := full.ChainNotify(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
initHead := (<-newHeads)[0]
|
initHead := (<-newHeads)[0]
|
||||||
baseHeight := initHead.Val.Height()
|
baseHeight := initHead.Val.Height()
|
||||||
|
|
||||||
h1, err := api.ChainHead(ctx)
|
h1, err := full.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(h1.Height()), int64(baseHeight))
|
require.Equal(t, int64(h1.Height()), int64(baseHeight))
|
||||||
|
|
||||||
bm := kit.NewBlockMiner(t, miners[0])
|
bm := kit.NewBlockMiner(t, miner)
|
||||||
bm.MineUntilBlock(ctx, fulls[0], nil)
|
bm.MineUntilBlock(ctx, full, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
<-newHeads
|
<-newHeads
|
||||||
|
|
||||||
h2, err := api.ChainHead(ctx)
|
h2, err := full.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
||||||
|
|
||||||
|
bm.MineUntilBlock(ctx, full, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
<-newHeads
|
||||||
|
|
||||||
|
h3, err := full.ChainHead(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *apiSuite) testMiningReal(t *testing.T) {
|
func (ts *apiSuite) testMiningReal(t *testing.T) {
|
||||||
@ -198,66 +165,30 @@ func (ts *apiSuite) testMiningReal(t *testing.T) {
|
|||||||
build.InsecurePoStValidation = true
|
build.InsecurePoStValidation = true
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ctx := context.Background()
|
ts.testMining(t)
|
||||||
fulls, miners := ts.makeNodes(t, kit.OneFull, kit.OneMiner)
|
|
||||||
api := fulls[0]
|
|
||||||
|
|
||||||
newHeads, err := api.ChainNotify(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
at := (<-newHeads)[0].Val.Height()
|
|
||||||
|
|
||||||
h1, err := api.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, int64(at), int64(h1.Height()))
|
|
||||||
|
|
||||||
bm := kit.NewBlockMiner(t, miners[0])
|
|
||||||
|
|
||||||
bm.MineUntilBlock(ctx, fulls[0], nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
<-newHeads
|
|
||||||
|
|
||||||
h2, err := api.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
|
||||||
|
|
||||||
bm.MineUntilBlock(ctx, fulls[0], nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
<-newHeads
|
|
||||||
|
|
||||||
h3, err := api.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *apiSuite) testNonGenesisMiner(t *testing.T) {
|
func (ts *apiSuite) testNonGenesisMiner(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
n, sn := ts.makeNodes(t,
|
|
||||||
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
|
|
||||||
[]kit.StorageMiner{{Full: 0, Preseal: kit.PresealGenesis}},
|
|
||||||
)
|
|
||||||
|
|
||||||
full, ok := n[0].FullNode.(*impl.FullNodeAPI)
|
full, genesisMiner, ens := kit.EnsembleMinimal(t, append(ts.opts, kit.MockProofs())...)
|
||||||
if !ok {
|
ens.InterconnectAll().BeginMining(4 * time.Millisecond)
|
||||||
t.Skip("not testing with a full node")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
genesisMiner := sn[0]
|
|
||||||
|
|
||||||
bm := kit.NewBlockMiner(t, genesisMiner)
|
time.Sleep(1 * time.Second)
|
||||||
bm.MineBlocks(ctx, 4*time.Millisecond)
|
|
||||||
t.Cleanup(bm.Stop)
|
|
||||||
|
|
||||||
gaa, err := genesisMiner.ActorAddress(ctx)
|
gaa, err := genesisMiner.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
gmi, err := full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
|
_, err = full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testm := n[0].Stb(ctx, t, kit.TestSpt, gmi.Owner)
|
var newMiner kit.TestMiner
|
||||||
|
ens.Miner(&newMiner, full,
|
||||||
|
kit.OwnerAddr(full.DefaultKey),
|
||||||
|
kit.ProofType(abi.RegisteredSealProof_StackedDrg2KiBV1), // we're using v0 actors with old proofs.
|
||||||
|
).Start().InterconnectAll()
|
||||||
|
|
||||||
ta, err := testm.ActorAddress(ctx)
|
ta, err := newMiner.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
tid, err := address.IDFromAddress(ta)
|
tid, err := address.IDFromAddress(ta)
|
||||||
|
@ -13,7 +13,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||||
"github.com/filecoin-project/lotus/node"
|
"github.com/filecoin-project/lotus/node"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@ -32,50 +31,40 @@ func TestBatchDealInput(t *testing.T) {
|
|||||||
|
|
||||||
run := func(piece, deals, expectSectors int) func(t *testing.T) {
|
run := func(piece, deals, expectSectors int) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
publishPeriod := 10 * time.Second
|
publishPeriod := 10 * time.Second
|
||||||
maxDealsPerMsg := uint64(deals)
|
maxDealsPerMsg := uint64(deals)
|
||||||
|
|
||||||
// Set max deals per publish deals message to maxDealsPerMsg
|
// Set max deals per publish deals message to maxDealsPerMsg
|
||||||
minerDef := []kit.StorageMiner{{
|
opts := kit.ConstructorOpts(node.Options(
|
||||||
Full: 0,
|
node.Override(
|
||||||
Opts: node.Options(
|
new(*storageadapter.DealPublisher),
|
||||||
node.Override(
|
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||||
new(*storageadapter.DealPublisher),
|
Period: publishPeriod,
|
||||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
MaxDealsPerMsg: maxDealsPerMsg,
|
||||||
Period: publishPeriod,
|
})),
|
||||||
MaxDealsPerMsg: maxDealsPerMsg,
|
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
|
||||||
})),
|
return func() (sealiface.Config, error) {
|
||||||
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
|
return sealiface.Config{
|
||||||
return func() (sealiface.Config, error) {
|
MaxWaitDealsSectors: 2,
|
||||||
return sealiface.Config{
|
MaxSealingSectors: 1,
|
||||||
MaxWaitDealsSectors: 2,
|
MaxSealingSectorsForDeals: 3,
|
||||||
MaxSealingSectors: 1,
|
AlwaysKeepUnsealedCopy: true,
|
||||||
MaxSealingSectorsForDeals: 3,
|
WaitDealsDelay: time.Hour,
|
||||||
AlwaysKeepUnsealedCopy: true,
|
|
||||||
WaitDealsDelay: time.Hour,
|
|
||||||
}, nil
|
|
||||||
}, nil
|
}, nil
|
||||||
}),
|
}, nil
|
||||||
),
|
}),
|
||||||
Preseal: kit.PresealGenesis,
|
))
|
||||||
}}
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
|
||||||
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
// Create a connect client and miner node
|
|
||||||
n, sn := kit.MockMinerBuilder(t, kit.OneFull, minerDef)
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
blockMiner := kit.ConnectAndStartMining(t, blockTime, miner, client)
|
|
||||||
t.Cleanup(blockMiner.Stop)
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
|
err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
checkNoPadding := func() {
|
checkNoPadding := func() {
|
||||||
sl, err := sn[0].SectorsList(ctx)
|
sl, err := miner.SectorsList(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Slice(sl, func(i, j int) bool {
|
sort.Slice(sl, func(i, j int) bool {
|
||||||
@ -83,7 +72,7 @@ func TestBatchDealInput(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
for _, snum := range sl {
|
for _, snum := range sl {
|
||||||
si, err := sn[0].SectorsStatus(ctx, snum, false)
|
si, err := miner.SectorsStatus(ctx, snum, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
|
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
|
||||||
@ -122,7 +111,7 @@ func TestBatchDealInput(t *testing.T) {
|
|||||||
|
|
||||||
checkNoPadding()
|
checkNoPadding()
|
||||||
|
|
||||||
sl, err := sn[0].SectorsList(ctx)
|
sl, err := miner.SectorsList(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(sl), expectSectors)
|
require.Equal(t, len(sl), expectSectors)
|
||||||
}
|
}
|
||||||
|
@ -3,17 +3,14 @@ package itests
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCCUpgrade(t *testing.T) {
|
func TestCCUpgrade(t *testing.T) {
|
||||||
@ -27,60 +24,33 @@ func TestCCUpgrade(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
height := height // make linters happy by copying
|
height := height // make linters happy by copying
|
||||||
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
|
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
|
||||||
runTestCCUpgrade(t, kit.MockMinerBuilder, 5*time.Millisecond, height)
|
runTestCCUpgrade(t, height)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
|
func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(upgradeHeight)}, kit.OneMiner)
|
blockTime := 5 * time.Millisecond
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
opts := kit.ConstructorOpts(kit.LatestActorsAt(upgradeHeight))
|
||||||
if err != nil {
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
|
||||||
t.Fatal(err)
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for atomic.LoadInt64(&mine) == 1 {
|
|
||||||
time.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
CC := abi.SectorNumber(kit.GenesisPreseals + 1)
|
CC := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
|
||||||
Upgraded := CC + 1
|
Upgraded := CC + 1
|
||||||
|
|
||||||
kit.PledgeSectors(t, ctx, miner, 1, 0, nil)
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||||
|
|
||||||
sl, err := miner.SectorsList(ctx)
|
sl, err := miner.SectorsList(ctx)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.Len(t, sl, 1, "expected 1 sector")
|
||||||
}
|
require.Equal(t, CC, sl[0], "unexpected sector number")
|
||||||
if len(sl) != 1 {
|
|
||||||
t.Fatal("expected 1 sector")
|
|
||||||
}
|
|
||||||
|
|
||||||
if sl[0] != CC {
|
|
||||||
t.Fatal("bad")
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
|
si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
|
||||||
@ -88,20 +58,16 @@ func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, u
|
|||||||
require.Less(t, 50000, int(si.Expiration))
|
require.Less(t, 50000, int(si.Expiration))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := miner.SectorMarkForUpgrade(ctx, sl[0]); err != nil {
|
err = miner.SectorMarkForUpgrade(ctx, sl[0])
|
||||||
t.Fatal(err)
|
require.NoError(t, err)
|
||||||
}
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
|
deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
|
||||||
_, _, _ = dh.MakeFullDeal(kit.MakeFullDealParams{
|
Rseed: 6,
|
||||||
Ctx: context.Background(),
|
SuspendUntilCryptoeconStable: true,
|
||||||
Rseed: 6,
|
|
||||||
CarExport: false,
|
|
||||||
FastRet: false,
|
|
||||||
StartEpoch: 0,
|
|
||||||
DoRetrieval: true,
|
|
||||||
})
|
})
|
||||||
|
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
|
||||||
|
kit.AssertFilesEqual(t, inPath, outPath)
|
||||||
|
|
||||||
// Validate upgrade
|
// Validate upgrade
|
||||||
|
|
||||||
@ -130,10 +96,6 @@ func runTestCCUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, u
|
|||||||
}
|
}
|
||||||
t.Log("waiting for sector to expire")
|
t.Log("waiting for sector to expire")
|
||||||
// wait one deadline per loop.
|
// wait one deadline per loop.
|
||||||
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime)
|
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blockTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("shutting down mining")
|
|
||||||
atomic.AddInt64(&mine, -1)
|
|
||||||
<-done
|
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -15,8 +14,8 @@ func TestClient(t *testing.T) {
|
|||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blockTime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
|
||||||
clientNode, _ := kit.StartOneNodeOneMiner(ctx, t, blocktime)
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
kit.RunClientTest(t, cli.Commands, clientNode)
|
kit.RunClientTest(t, cli.Commands, client)
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,6 @@ package itests
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -26,7 +25,6 @@ import (
|
|||||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -56,11 +54,7 @@ import (
|
|||||||
func TestDeadlineToggling(t *testing.T) {
|
func TestDeadlineToggling(t *testing.T) {
|
||||||
kit.Expensive(t)
|
kit.Expensive(t)
|
||||||
|
|
||||||
_ = logging.SetLogLevel("miner", "ERROR")
|
kit.QuietMiningLogs()
|
||||||
_ = logging.SetLogLevel("chainstore", "ERROR")
|
|
||||||
_ = logging.SetLogLevel("chain", "ERROR")
|
|
||||||
_ = logging.SetLogLevel("sub", "ERROR")
|
|
||||||
_ = logging.SetLogLevel("storageminer", "FATAL")
|
|
||||||
|
|
||||||
const sectorsC, sectorsD, sectorsB = 10, 9, 8
|
const sectorsC, sectorsD, sectorsB = 10, 9, 8
|
||||||
|
|
||||||
@ -73,21 +67,26 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithNetworkUpgradeAt(network.Version12, upgradeH)}, kit.OneMiner)
|
var (
|
||||||
|
client kit.TestFullNode
|
||||||
|
minerA kit.TestMiner
|
||||||
|
minerB kit.TestMiner
|
||||||
|
minerC kit.TestMiner
|
||||||
|
minerD kit.TestMiner
|
||||||
|
minerE kit.TestMiner
|
||||||
|
)
|
||||||
|
opts := []kit.NodeOpt{kit.ConstructorOpts(kit.NetworkUpgradeAt(network.Version12, upgradeH))}
|
||||||
|
ens := kit.NewEnsemble(t, kit.MockProofs()).
|
||||||
|
FullNode(&client, opts...).
|
||||||
|
Miner(&minerA, &client, opts...).
|
||||||
|
Start().
|
||||||
|
InterconnectAll()
|
||||||
|
ens.BeginMining(blocktime)
|
||||||
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
opts = append(opts, kit.OwnerAddr(client.DefaultKey))
|
||||||
minerA := sn[0]
|
ens.Miner(&minerB, &client, opts...).
|
||||||
|
Miner(&minerC, &client, opts...).
|
||||||
{
|
Start()
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := minerA.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -97,28 +96,6 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
|
|
||||||
build.Clock.Sleep(time.Second)
|
build.Clock.Sleep(time.Second)
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for ctx.Err() == nil {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := minerA.MineOne(ctx, kit.MineNext); err != nil {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
// context was canceled, ignore the error.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
cancel()
|
|
||||||
<-done
|
|
||||||
}()
|
|
||||||
|
|
||||||
minerB := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
|
|
||||||
minerC := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
|
|
||||||
|
|
||||||
maddrB, err := minerB.ActorAddress(ctx)
|
maddrB, err := minerB.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
maddrC, err := minerC.ActorAddress(ctx)
|
maddrC, err := minerC.ActorAddress(ctx)
|
||||||
@ -129,20 +106,20 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
|
|
||||||
// pledge sectors on C, go through a PP, check for power
|
// pledge sectors on C, go through a PP, check for power
|
||||||
{
|
{
|
||||||
kit.PledgeSectors(t, ctx, minerC, sectorsC, 0, nil)
|
minerC.PledgeSectors(ctx, sectorsC, 0, nil)
|
||||||
|
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Printf("Running one proving period (miner C)\n")
|
t.Log("Running one proving period (miner C)")
|
||||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
|
t.Logf("End for head.Height > %d", di.PeriodStart+di.WPoStProvingPeriod*2)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
head, err := client.ChainHead(ctx)
|
head, err := client.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if head.Height() > di.PeriodStart+provingPeriod*2 {
|
if head.Height() > di.PeriodStart+provingPeriod*2 {
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
t.Logf("Now head.Height = %d", head.Height())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
@ -163,7 +140,7 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if head.Height() > upgradeH+provingPeriod {
|
if head.Height() > upgradeH+provingPeriod {
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
t.Logf("Now head.Height = %d", head.Height())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
@ -214,8 +191,9 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.GreaterOrEqual(t, nv, network.Version12)
|
require.GreaterOrEqual(t, nv, network.Version12)
|
||||||
|
|
||||||
minerD := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
|
ens.Miner(&minerD, &client, opts...).
|
||||||
minerE := n[0].Stb(ctx, t, kit.TestSpt, defaultFrom)
|
Miner(&minerE, &client, opts...).
|
||||||
|
Start()
|
||||||
|
|
||||||
maddrD, err := minerD.ActorAddress(ctx)
|
maddrD, err := minerD.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -223,7 +201,7 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// first round of miner checks
|
// first round of miner checks
|
||||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
|
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, true, types.EmptyTSK)
|
checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, true, types.EmptyTSK)
|
||||||
|
|
||||||
checkMiner(maddrB, types.NewInt(0), false, false, types.EmptyTSK)
|
checkMiner(maddrB, types.NewInt(0), false, false, types.EmptyTSK)
|
||||||
@ -231,10 +209,10 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
|
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
|
||||||
|
|
||||||
// pledge sectors on minerB/minerD, stop post on minerC
|
// pledge sectors on minerB/minerD, stop post on minerC
|
||||||
kit.PledgeSectors(t, ctx, minerB, sectorsB, 0, nil)
|
minerB.PledgeSectors(ctx, sectorsB, 0, nil)
|
||||||
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
|
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
|
||||||
|
|
||||||
kit.PledgeSectors(t, ctx, minerD, sectorsD, 0, nil)
|
minerD.PledgeSectors(ctx, sectorsD, 0, nil)
|
||||||
checkMiner(maddrD, types.NewInt(0), true, true, types.EmptyTSK)
|
checkMiner(maddrD, types.NewInt(0), true, true, types.EmptyTSK)
|
||||||
|
|
||||||
minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
|
minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
|
||||||
@ -279,7 +257,7 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if head.Height() > upgradeH+provingPeriod+(provingPeriod/2) {
|
if head.Height() > upgradeH+provingPeriod+(provingPeriod/2) {
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
t.Logf("Now head.Height = %d", head.Height())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
@ -293,14 +271,14 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if head.Height() > upgradeH+(provingPeriod*3) {
|
if head.Height() > upgradeH+(provingPeriod*3) {
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
t.Logf("Now head.Height = %d", head.Height())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// second round of miner checks
|
// second round of miner checks
|
||||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
|
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
|
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, true, types.EmptyTSK)
|
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, true, types.EmptyTSK)
|
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, true, types.EmptyTSK)
|
||||||
@ -349,7 +327,7 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
}, nil)
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Println("sent termination message:", smsg.Cid())
|
t.Log("sent termination message:", smsg.Cid())
|
||||||
|
|
||||||
r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true)
|
r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -365,13 +343,13 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if head.Height() > upgradeH+(provingPeriod*5) {
|
if head.Height() > upgradeH+(provingPeriod*5) {
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
t.Logf("Now head.Height = %d", head.Height())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
}
|
}
|
||||||
|
|
||||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.GenesisPreseals), true, true, types.EmptyTSK)
|
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
|
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
|
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
|
||||||
checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK)
|
checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK)
|
||||||
|
49
itests/deals_concurrent_test.go
Normal file
49
itests/deals_concurrent_test.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDealCyclesConcurrent(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
|
blockTime := 10 * time.Millisecond
|
||||||
|
|
||||||
|
// For these tests where the block time is artificially short, just use
|
||||||
|
// a deal start epoch that is guaranteed to be far enough in the future
|
||||||
|
// so that the deal starts sealing in time
|
||||||
|
startEpoch := abi.ChainEpoch(2 << 12)
|
||||||
|
|
||||||
|
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
|
||||||
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||||
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
|
|
||||||
|
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
|
||||||
|
N: n,
|
||||||
|
FastRetrieval: fastRetrieval,
|
||||||
|
CarExport: carExport,
|
||||||
|
StartEpoch: startEpoch,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: add 2, 4, 8, more when this graphsync issue is fixed: https://github.com/ipfs/go-graphsync/issues/175#
|
||||||
|
cycles := []int{1}
|
||||||
|
for _, n := range cycles {
|
||||||
|
n := n
|
||||||
|
ns := fmt.Sprintf("%d", n)
|
||||||
|
t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) })
|
||||||
|
t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) })
|
||||||
|
t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) })
|
||||||
|
t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) })
|
||||||
|
}
|
||||||
|
}
|
101
itests/deals_offline_test.go
Normal file
101
itests/deals_offline_test.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOfflineDealFlow(t *testing.T) {
|
||||||
|
blocktime := 10 * time.Millisecond
|
||||||
|
|
||||||
|
// For these tests where the block time is artificially short, just use
|
||||||
|
// a deal start epoch that is guaranteed to be far enough in the future
|
||||||
|
// so that the deal starts sealing in time
|
||||||
|
startEpoch := abi.ChainEpoch(2 << 12)
|
||||||
|
|
||||||
|
runTest := func(t *testing.T, fastRet bool) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||||
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
|
|
||||||
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
|
|
||||||
|
// Create a random file and import on the client.
|
||||||
|
res, inFile := client.CreateImportFile(ctx, 1, 0)
|
||||||
|
|
||||||
|
// Get the piece size and commP
|
||||||
|
rootCid := res.Root
|
||||||
|
pieceInfo, err := client.ClientDealPieceCID(ctx, rootCid)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Log("FILE CID:", rootCid)
|
||||||
|
|
||||||
|
// Create a storage deal with the miner
|
||||||
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
addr, err := client.WalletDefaultAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Manual storage deal (offline deal)
|
||||||
|
dataRef := &storagemarket.DataRef{
|
||||||
|
TransferType: storagemarket.TTManual,
|
||||||
|
Root: rootCid,
|
||||||
|
PieceCid: &pieceInfo.PieceCID,
|
||||||
|
PieceSize: pieceInfo.PieceSize.Unpadded(),
|
||||||
|
}
|
||||||
|
|
||||||
|
proposalCid, err := client.ClientStartDeal(ctx, &api.StartDealParams{
|
||||||
|
Data: dataRef,
|
||||||
|
Wallet: addr,
|
||||||
|
Miner: maddr,
|
||||||
|
EpochPrice: types.NewInt(1000000),
|
||||||
|
DealStartEpoch: startEpoch,
|
||||||
|
MinBlocksDuration: uint64(build.MinDealDuration),
|
||||||
|
FastRetrieval: fastRet,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
||||||
|
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||||
|
return cd.State == storagemarket.StorageDealCheckForAcceptance
|
||||||
|
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
|
||||||
|
|
||||||
|
// Create a CAR file from the raw file
|
||||||
|
carFileDir := t.TempDir()
|
||||||
|
carFilePath := filepath.Join(carFileDir, "out.car")
|
||||||
|
err = client.ClientGenCar(ctx, api.FileRef{Path: inFile}, carFilePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Import the CAR file on the miner - this is the equivalent to
|
||||||
|
// transferring the file across the wire in a normal (non-offline) deal
|
||||||
|
err = miner.DealsImportData(ctx, *proposalCid, carFilePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Wait for the deal to be published
|
||||||
|
dh.WaitDealPublished(ctx, proposalCid)
|
||||||
|
|
||||||
|
t.Logf("deal published, retrieving")
|
||||||
|
|
||||||
|
// Retrieve the deal
|
||||||
|
outFile := dh.PerformRetrieval(ctx, proposalCid, rootCid, false)
|
||||||
|
|
||||||
|
kit.AssertFilesEqual(t, inFile, outFile)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("stdretrieval", func(t *testing.T) { runTest(t, false) })
|
||||||
|
t.Run("fastretrieval", func(t *testing.T) { runTest(t, true) })
|
||||||
|
}
|
61
itests/deals_power_test.go
Normal file
61
itests/deals_power_test.go
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFirstDealEnablesMining(t *testing.T) {
|
||||||
|
// test making a deal with a fresh miner, and see if it starts to mine.
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
|
var (
|
||||||
|
client kit.TestFullNode
|
||||||
|
genMiner kit.TestMiner // bootstrap
|
||||||
|
provider kit.TestMiner // no sectors, will need to create one
|
||||||
|
)
|
||||||
|
|
||||||
|
ens := kit.NewEnsemble(t, kit.MockProofs())
|
||||||
|
ens.FullNode(&client)
|
||||||
|
ens.Miner(&genMiner, &client)
|
||||||
|
ens.Miner(&provider, &client, kit.PresealSectors(0))
|
||||||
|
ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
dh := kit.NewDealHarness(t, &client, &provider)
|
||||||
|
|
||||||
|
ref, _ := client.CreateImportFile(ctx, 5, 0)
|
||||||
|
|
||||||
|
t.Log("FILE CID:", ref.Root)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// start a goroutine to monitor head changes from the client
|
||||||
|
// once the provider has mined a block, thanks to the power acquired from the deal,
|
||||||
|
// we pass the test.
|
||||||
|
providerMined := make(chan struct{})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_ = client.WaitTillChain(ctx, kit.BlockMinedBy(provider.ActorAddr))
|
||||||
|
close(providerMined)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// now perform the deal.
|
||||||
|
deal := dh.StartDeal(ctx, ref.Root, false, 0)
|
||||||
|
|
||||||
|
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||||
|
|
||||||
|
<-providerMined
|
||||||
|
}
|
131
itests/deals_pricing_test.go
Normal file
131
itests/deals_pricing_test.go
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
|
||||||
|
var (
|
||||||
|
ctx = context.Background()
|
||||||
|
blocktime = time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
|
client, miner, ens := kit.EnsembleMinimal(t)
|
||||||
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ppb = int64(1)
|
||||||
|
unsealPrice = int64(77)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Set unsealed price to non-zero
|
||||||
|
ask, err := miner.MarketGetRetrievalAsk(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
ask.PricePerByte = abi.NewTokenAmount(ppb)
|
||||||
|
ask.UnsealPrice = abi.NewTokenAmount(unsealPrice)
|
||||||
|
err = miner.MarketSetRetrievalAsk(ctx, ask)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
|
|
||||||
|
deal1, res1, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
|
||||||
|
|
||||||
|
// one more storage deal for the same data
|
||||||
|
_, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
|
||||||
|
require.Equal(t, res1.Root, res2.Root)
|
||||||
|
|
||||||
|
// Retrieval
|
||||||
|
dealInfo, err := client.ClientGetDealInfo(ctx, *deal1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// fetch quote -> zero for unsealed price since unsealed file already exists.
|
||||||
|
offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, offers, 2)
|
||||||
|
require.Equal(t, offers[0], offers[1])
|
||||||
|
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
|
||||||
|
require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64())
|
||||||
|
|
||||||
|
// remove ONLY one unsealed file
|
||||||
|
ss, err := miner.StorageList(context.Background())
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = miner.SectorsList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
iLoop:
|
||||||
|
for storeID, sd := range ss {
|
||||||
|
for _, sector := range sd {
|
||||||
|
err := miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed)
|
||||||
|
require.NoError(t, err)
|
||||||
|
break iLoop // remove ONLY one
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get retrieval quote -> zero for unsealed price as unsealed file exists.
|
||||||
|
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, offers, 2)
|
||||||
|
require.Equal(t, offers[0], offers[1])
|
||||||
|
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
|
||||||
|
require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64())
|
||||||
|
|
||||||
|
// remove the other unsealed file as well
|
||||||
|
ss, err = miner.StorageList(context.Background())
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = miner.SectorsList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
for storeID, sd := range ss {
|
||||||
|
for _, sector := range sd {
|
||||||
|
require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetch quote -> non-zero for unseal price as we no more unsealed files.
|
||||||
|
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, offers, 2)
|
||||||
|
require.Equal(t, offers[0], offers[1])
|
||||||
|
require.Equal(t, uint64(unsealPrice), offers[0].UnsealPrice.Uint64())
|
||||||
|
total := (dealInfo.Size * uint64(ppb)) + uint64(unsealPrice)
|
||||||
|
require.Equal(t, total, offers[0].MinPrice.Uint64())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestZeroPricePerByteRetrieval(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
|
var (
|
||||||
|
blockTime = 10 * time.Millisecond
|
||||||
|
startEpoch = abi.ChainEpoch(2 << 12)
|
||||||
|
)
|
||||||
|
|
||||||
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||||
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
ask, err := miner.MarketGetRetrievalAsk(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ask.PricePerByte = abi.NewTokenAmount(0)
|
||||||
|
err = miner.MarketSetRetrievalAsk(ctx, ask)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
|
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
|
||||||
|
N: 1,
|
||||||
|
StartEpoch: startEpoch,
|
||||||
|
})
|
||||||
|
}
|
108
itests/deals_publish_test.go
Normal file
108
itests/deals_publish_test.go
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPublishDealsBatching(t *testing.T) {
|
||||||
|
var (
|
||||||
|
ctx = context.Background()
|
||||||
|
publishPeriod = 10 * time.Second
|
||||||
|
maxDealsPerMsg = uint64(2) // Set max deals per publish deals message to 2
|
||||||
|
startEpoch = abi.ChainEpoch(2 << 12)
|
||||||
|
)
|
||||||
|
|
||||||
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
|
opts := node.Override(new(*storageadapter.DealPublisher),
|
||||||
|
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||||
|
Period: publishPeriod,
|
||||||
|
MaxDealsPerMsg: maxDealsPerMsg,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts(opts))
|
||||||
|
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
|
||||||
|
|
||||||
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
|
|
||||||
|
// Starts a deal and waits until it's published
|
||||||
|
runDealTillPublish := func(rseed int) {
|
||||||
|
res, _ := client.CreateImportFile(ctx, rseed, 0)
|
||||||
|
|
||||||
|
upds, err := client.ClientGetDealUpdates(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
dh.StartDeal(ctx, res.Root, false, startEpoch)
|
||||||
|
|
||||||
|
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
for upd := range upds {
|
||||||
|
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
|
||||||
|
done <- struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run three deals in parallel
|
||||||
|
done := make(chan struct{}, maxDealsPerMsg+1)
|
||||||
|
for rseed := 1; rseed <= 3; rseed++ {
|
||||||
|
rseed := rseed
|
||||||
|
go func() {
|
||||||
|
runDealTillPublish(rseed)
|
||||||
|
done <- struct{}{}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for two of the deals to be published
|
||||||
|
for i := 0; i < int(maxDealsPerMsg); i++ {
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expect a single PublishStorageDeals message that includes the first two deals
|
||||||
|
msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
count := 0
|
||||||
|
for _, msgCid := range msgCids {
|
||||||
|
msg, err := client.ChainGetMessage(ctx, msgCid)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if msg.Method == market.Methods.PublishStorageDeals {
|
||||||
|
count++
|
||||||
|
var pubDealsParams market2.PublishStorageDealsParams
|
||||||
|
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.Equal(t, 1, count)
|
||||||
|
|
||||||
|
// The third deal should be published once the publish period expires.
|
||||||
|
// Allow a little padding as it takes a moment for the state change to
|
||||||
|
// be noticed by the client.
|
||||||
|
padding := 10 * time.Second
|
||||||
|
select {
|
||||||
|
case <-time.After(publishPeriod + padding):
|
||||||
|
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
|
||||||
|
case <-done: // Success
|
||||||
|
}
|
||||||
|
}
|
@ -1,633 +1,35 @@
|
|||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
|
||||||
"github.com/filecoin-project/lotus/miner"
|
|
||||||
"github.com/filecoin-project/lotus/node"
|
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDealCycle(t *testing.T) {
|
func TestDealsWithSealingAndRPC(t *testing.T) {
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
blockTime := 10 * time.Millisecond
|
|
||||||
|
|
||||||
// For these tests where the block time is artificially short, just use
|
|
||||||
// a deal start epoch that is guaranteed to be far enough in the future
|
|
||||||
// so that the deal starts sealing in time
|
|
||||||
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
|
||||||
|
|
||||||
t.Run("TestFullDealCycle_Single", func(t *testing.T) {
|
|
||||||
runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
|
|
||||||
})
|
|
||||||
t.Run("TestFullDealCycle_Two", func(t *testing.T) {
|
|
||||||
runFullDealCycles(t, 2, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
|
|
||||||
})
|
|
||||||
t.Run("WithExportedCAR", func(t *testing.T) {
|
|
||||||
runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, true, false, dealStartEpoch)
|
|
||||||
})
|
|
||||||
t.Run("TestFastRetrievalDealCycle", func(t *testing.T) {
|
|
||||||
runFastRetrievalDealFlowT(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
|
|
||||||
})
|
|
||||||
t.Run("TestZeroPricePerByteRetrievalDealFlow", func(t *testing.T) {
|
|
||||||
runZeroPricePerByteRetrievalDealFlow(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAPIDealFlowReal(t *testing.T) {
|
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode")
|
t.Skip("skipping test in short mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
// TODO: just set this globally?
|
var blockTime = 1 * time.Second
|
||||||
oldDelay := policy.GetPreCommitChallengeDelay()
|
|
||||||
policy.SetPreCommitChallengeDelay(5)
|
|
||||||
t.Cleanup(func() {
|
|
||||||
policy.SetPreCommitChallengeDelay(oldDelay)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("basic", func(t *testing.T) {
|
|
||||||
runFullDealCycles(t, 1, kit.Builder, time.Second, false, false, 0)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("fast-retrieval", func(t *testing.T) {
|
|
||||||
runFullDealCycles(t, 1, kit.Builder, time.Second, false, true, 0)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("retrieval-second", func(t *testing.T) {
|
|
||||||
runSecondDealRetrievalTest(t, kit.Builder, time.Second)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("quote-price-for-non-unsealed-retrieval", func(t *testing.T) {
|
|
||||||
runQuotePriceForUnsealedRetrieval(t, kit.Builder, time.Second, 0)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func runQuotePriceForUnsealedRetrieval(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
|
|
||||||
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
|
|
||||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
|
||||||
|
|
||||||
ppb := int64(1)
|
|
||||||
unsealPrice := int64(77)
|
|
||||||
|
|
||||||
// Set unsealed price to non-zero
|
|
||||||
ask, err := miner.MarketGetRetrievalAsk(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
ask.PricePerByte = abi.NewTokenAmount(ppb)
|
|
||||||
ask.UnsealPrice = abi.NewTokenAmount(unsealPrice)
|
|
||||||
err = miner.MarketSetRetrievalAsk(ctx, ask)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
|
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC()) // no mock proofs.
|
||||||
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
|
|
||||||
_, info, fcid := dh.MakeFullDeal(kit.MakeFullDealParams{
|
t.Run("stdretrieval", func(t *testing.T) {
|
||||||
Ctx: ctx,
|
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1})
|
||||||
Rseed: 6,
|
|
||||||
CarExport: false,
|
|
||||||
FastRet: false,
|
|
||||||
StartEpoch: startEpoch,
|
|
||||||
DoRetrieval: false,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// one more storage deal for the same data
|
t.Run("fastretrieval", func(t *testing.T) {
|
||||||
_, _, fcid2 := dh.MakeFullDeal(kit.MakeFullDealParams{
|
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true})
|
||||||
Ctx: ctx,
|
|
||||||
Rseed: 6,
|
|
||||||
CarExport: false,
|
|
||||||
FastRet: false,
|
|
||||||
StartEpoch: startEpoch,
|
|
||||||
DoRetrieval: false,
|
|
||||||
})
|
|
||||||
require.Equal(t, fcid, fcid2)
|
|
||||||
|
|
||||||
// fetch quote -> zero for unsealed price since unsealed file already exists.
|
|
||||||
offers, err := client.ClientFindData(ctx, fcid, &info.PieceCID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, offers, 2)
|
|
||||||
require.Equal(t, offers[0], offers[1])
|
|
||||||
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
|
|
||||||
require.Equal(t, info.Size*uint64(ppb), offers[0].MinPrice.Uint64())
|
|
||||||
|
|
||||||
// remove ONLY one unsealed file
|
|
||||||
ss, err := miner.StorageList(context.Background())
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = miner.SectorsList(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
iLoop:
|
|
||||||
for storeID, sd := range ss {
|
|
||||||
for _, sector := range sd {
|
|
||||||
require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed))
|
|
||||||
// remove ONLY one
|
|
||||||
break iLoop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// get retrieval quote -> zero for unsealed price as unsealed file exists.
|
|
||||||
offers, err = client.ClientFindData(ctx, fcid, &info.PieceCID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, offers, 2)
|
|
||||||
require.Equal(t, offers[0], offers[1])
|
|
||||||
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
|
|
||||||
require.Equal(t, info.Size*uint64(ppb), offers[0].MinPrice.Uint64())
|
|
||||||
|
|
||||||
// remove the other unsealed file as well
|
|
||||||
ss, err = miner.StorageList(context.Background())
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = miner.SectorsList(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
for storeID, sd := range ss {
|
|
||||||
for _, sector := range sd {
|
|
||||||
require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetch quote -> non-zero for unseal price as we no more unsealed files.
|
|
||||||
offers, err = client.ClientFindData(ctx, fcid, &info.PieceCID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, offers, 2)
|
|
||||||
require.Equal(t, offers[0], offers[1])
|
|
||||||
require.Equal(t, uint64(unsealPrice), offers[0].UnsealPrice.Uint64())
|
|
||||||
total := (info.Size * uint64(ppb)) + uint64(unsealPrice)
|
|
||||||
require.Equal(t, total, offers[0].MinPrice.Uint64())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPublishDealsBatching(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
b := kit.MockMinerBuilder
|
|
||||||
blocktime := 10 * time.Millisecond
|
|
||||||
startEpoch := abi.ChainEpoch(2 << 12)
|
|
||||||
|
|
||||||
publishPeriod := 10 * time.Second
|
|
||||||
maxDealsPerMsg := uint64(2)
|
|
||||||
|
|
||||||
// Set max deals per publish deals message to 2
|
|
||||||
minerDef := []kit.StorageMiner{{
|
|
||||||
Full: 0,
|
|
||||||
Opts: node.Override(
|
|
||||||
new(*storageadapter.DealPublisher),
|
|
||||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
|
||||||
Period: publishPeriod,
|
|
||||||
MaxDealsPerMsg: maxDealsPerMsg,
|
|
||||||
})),
|
|
||||||
Preseal: kit.PresealGenesis,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// Create a connect client and miner node
|
|
||||||
n, sn := b(t, kit.OneFull, minerDef)
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
|
||||||
|
|
||||||
// Starts a deal and waits until it's published
|
|
||||||
runDealTillPublish := func(rseed int) {
|
|
||||||
res, _, _, err := kit.CreateImportFile(ctx, client, rseed, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
upds, err := client.ClientGetDealUpdates(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
dh.StartDeal(ctx, res.Root, false, startEpoch)
|
|
||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
for upd := range upds {
|
|
||||||
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
|
|
||||||
done <- struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run three deals in parallel
|
|
||||||
done := make(chan struct{}, maxDealsPerMsg+1)
|
|
||||||
for rseed := 1; rseed <= 3; rseed++ {
|
|
||||||
rseed := rseed
|
|
||||||
go func() {
|
|
||||||
runDealTillPublish(rseed)
|
|
||||||
done <- struct{}{}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for two of the deals to be published
|
|
||||||
for i := 0; i < int(maxDealsPerMsg); i++ {
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expect a single PublishStorageDeals message that includes the first two deals
|
|
||||||
msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
count := 0
|
|
||||||
for _, msgCid := range msgCids {
|
|
||||||
msg, err := client.ChainGetMessage(ctx, msgCid)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if msg.Method == market.Methods.PublishStorageDeals {
|
|
||||||
count++
|
|
||||||
var pubDealsParams market2.PublishStorageDealsParams
|
|
||||||
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
require.Equal(t, 1, count)
|
|
||||||
|
|
||||||
// The third deal should be published once the publish period expires.
|
|
||||||
// Allow a little padding as it takes a moment for the state change to
|
|
||||||
// be noticed by the client.
|
|
||||||
padding := 10 * time.Second
|
|
||||||
select {
|
|
||||||
case <-time.After(publishPeriod + padding):
|
|
||||||
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
|
|
||||||
case <-done: // Success
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDealMining(t *testing.T) {
|
|
||||||
// test making a deal with a fresh miner, and see if it starts to mine.
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
b := kit.MockMinerBuilder
|
|
||||||
blocktime := 50 * time.Millisecond
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
fulls, miners := b(t,
|
|
||||||
kit.OneFull,
|
|
||||||
[]kit.StorageMiner{
|
|
||||||
{Full: 0, Preseal: kit.PresealGenesis},
|
|
||||||
{Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node
|
|
||||||
})
|
|
||||||
client := fulls[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
genesisMiner := miners[0]
|
|
||||||
provider := miners[1]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := provider.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
data := make([]byte, 600)
|
|
||||||
rand.New(rand.NewSource(5)).Read(data)
|
|
||||||
|
|
||||||
r := bytes.NewReader(data)
|
|
||||||
fcid, err := client.ClientImportLocal(ctx, r)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("FILE CID: ", fcid)
|
|
||||||
|
|
||||||
var mine int32 = 1
|
|
||||||
done := make(chan struct{})
|
|
||||||
minedTwo := make(chan struct{})
|
|
||||||
|
|
||||||
m2addr, err := miners[1].ActorAddress(context.TODO())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
|
|
||||||
complChan := minedTwo
|
|
||||||
for atomic.LoadInt32(&mine) != 0 {
|
|
||||||
wait := make(chan int)
|
|
||||||
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
|
|
||||||
n := 0
|
|
||||||
if mined {
|
|
||||||
n = 1
|
|
||||||
}
|
|
||||||
wait <- n
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miners[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miners[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expect := <-wait
|
|
||||||
expect += <-wait
|
|
||||||
|
|
||||||
time.Sleep(blocktime)
|
|
||||||
if expect == 0 {
|
|
||||||
// null block
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var nodeOneMined bool
|
|
||||||
for _, node := range miners {
|
|
||||||
mb, err := node.MiningBase(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, b := range mb.Blocks() {
|
|
||||||
if b.Miner == m2addr {
|
|
||||||
nodeOneMined = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if nodeOneMined && complChan != nil {
|
|
||||||
close(complChan)
|
|
||||||
complChan = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, provider)
|
|
||||||
|
|
||||||
deal := dh.StartDeal(ctx, fcid, false, 0)
|
|
||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
|
||||||
|
|
||||||
<-minedTwo
|
|
||||||
|
|
||||||
atomic.StoreInt32(&mine, 0)
|
|
||||||
fmt.Println("shutting down mining")
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOfflineDealFlow(t *testing.T) {
|
|
||||||
blocktime := 10 * time.Millisecond
|
|
||||||
|
|
||||||
// For these tests where the block time is artificially short, just use
|
|
||||||
// a deal start epoch that is guaranteed to be far enough in the future
|
|
||||||
// so that the deal starts sealing in time
|
|
||||||
startEpoch := abi.ChainEpoch(2 << 12)
|
|
||||||
|
|
||||||
runTest := func(t *testing.T, fastRet bool) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fulls, miners := kit.MockMinerBuilder(t, kit.OneFull, kit.OneMiner)
|
|
||||||
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
|
|
||||||
|
|
||||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
|
||||||
|
|
||||||
// Create a random file and import on the client.
|
|
||||||
res, path, data, err := kit.CreateImportFile(ctx, client, 1, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Get the piece size and commP
|
|
||||||
fcid := res.Root
|
|
||||||
pieceInfo, err := client.ClientDealPieceCID(ctx, fcid)
|
|
||||||
require.NoError(t, err)
|
|
||||||
fmt.Println("FILE CID: ", fcid)
|
|
||||||
|
|
||||||
// Create a storage deal with the miner
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
addr, err := client.WalletDefaultAddress(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Manual storage deal (offline deal)
|
|
||||||
dataRef := &storagemarket.DataRef{
|
|
||||||
TransferType: storagemarket.TTManual,
|
|
||||||
Root: fcid,
|
|
||||||
PieceCid: &pieceInfo.PieceCID,
|
|
||||||
PieceSize: pieceInfo.PieceSize.Unpadded(),
|
|
||||||
}
|
|
||||||
|
|
||||||
proposalCid, err := client.ClientStartDeal(ctx, &api.StartDealParams{
|
|
||||||
Data: dataRef,
|
|
||||||
Wallet: addr,
|
|
||||||
Miner: maddr,
|
|
||||||
EpochPrice: types.NewInt(1000000),
|
|
||||||
DealStartEpoch: startEpoch,
|
|
||||||
MinBlocksDuration: uint64(build.MinDealDuration),
|
|
||||||
FastRetrieval: fastRet,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
|
||||||
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Eventually(t, func() bool {
|
|
||||||
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
|
|
||||||
return cd.State == storagemarket.StorageDealCheckForAcceptance
|
|
||||||
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
|
|
||||||
|
|
||||||
// Create a CAR file from the raw file
|
|
||||||
carFileDir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-car")
|
|
||||||
require.NoError(t, err)
|
|
||||||
carFilePath := filepath.Join(carFileDir, "out.car")
|
|
||||||
err = client.ClientGenCar(ctx, api.FileRef{Path: path}, carFilePath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Import the CAR file on the miner - this is the equivalent to
|
|
||||||
// transferring the file across the wire in a normal (non-offline) deal
|
|
||||||
err = miner.DealsImportData(ctx, *proposalCid, carFilePath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait for the deal to be published
|
|
||||||
dh.WaitDealPublished(ctx, proposalCid)
|
|
||||||
|
|
||||||
t.Logf("deal published, retrieving")
|
|
||||||
|
|
||||||
// Retrieve the deal
|
|
||||||
dh.TestRetrieval(ctx, fcid, &pieceInfo.PieceCID, false, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("NormalRetrieval", func(t *testing.T) {
|
|
||||||
runTest(t, false)
|
|
||||||
})
|
|
||||||
t.Run("FastRetrieval", func(t *testing.T) {
|
|
||||||
runTest(t, true)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
}
|
t.Run("fastretrieval-twodeals-sequential", func(t *testing.T) {
|
||||||
|
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true})
|
||||||
func runFullDealCycles(t *testing.T, n int, b kit.APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true})
|
||||||
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
|
|
||||||
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
|
|
||||||
|
|
||||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
|
||||||
|
|
||||||
baseseed := 6
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
_, _, _ = dh.MakeFullDeal(kit.MakeFullDealParams{
|
|
||||||
Ctx: context.Background(),
|
|
||||||
Rseed: baseseed + i,
|
|
||||||
CarExport: carExport,
|
|
||||||
FastRet: fastRet,
|
|
||||||
StartEpoch: startEpoch,
|
|
||||||
DoRetrieval: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func runFastRetrievalDealFlowT(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
|
|
||||||
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
|
|
||||||
|
|
||||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
|
||||||
|
|
||||||
data := make([]byte, 1600)
|
|
||||||
rand.New(rand.NewSource(int64(8))).Read(data)
|
|
||||||
|
|
||||||
r := bytes.NewReader(data)
|
|
||||||
fcid, err := client.ClientImportLocal(ctx, r)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("FILE CID: ", fcid)
|
|
||||||
|
|
||||||
deal := dh.StartDeal(ctx, fcid, true, startEpoch)
|
|
||||||
dh.WaitDealPublished(ctx, deal)
|
|
||||||
|
|
||||||
fmt.Println("deal published, retrieving")
|
|
||||||
|
|
||||||
// Retrieval
|
|
||||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
dh.TestRetrieval(ctx, fcid, &info.PieceCID, false, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func runSecondDealRetrievalTest(t *testing.T, b kit.APIBuilder, blocktime time.Duration) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
|
|
||||||
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
|
|
||||||
|
|
||||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
|
||||||
|
|
||||||
{
|
|
||||||
data1 := make([]byte, 800)
|
|
||||||
rand.New(rand.NewSource(int64(3))).Read(data1)
|
|
||||||
r := bytes.NewReader(data1)
|
|
||||||
|
|
||||||
fcid1, err := client.ClientImportLocal(ctx, r)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
data2 := make([]byte, 800)
|
|
||||||
rand.New(rand.NewSource(int64(9))).Read(data2)
|
|
||||||
r2 := bytes.NewReader(data2)
|
|
||||||
|
|
||||||
fcid2, err := client.ClientImportLocal(ctx, r2)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
deal1 := dh.StartDeal(ctx, fcid1, true, 0)
|
|
||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
dh.WaitDealSealed(ctx, deal1, true, false, nil)
|
|
||||||
|
|
||||||
deal2 := dh.StartDeal(ctx, fcid2, true, 0)
|
|
||||||
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
dh.WaitDealSealed(ctx, deal2, false, false, nil)
|
|
||||||
|
|
||||||
// Retrieval
|
|
||||||
info, err := client.ClientGetDealInfo(ctx, *deal2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
rf, _ := miner.SectorsRefs(ctx)
|
|
||||||
fmt.Printf("refs: %+v\n", rf)
|
|
||||||
|
|
||||||
dh.TestRetrieval(ctx, fcid2, &info.PieceCID, false, data2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func runZeroPricePerByteRetrievalDealFlow(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
fulls, miners := b(t, kit.OneFull, kit.OneMiner)
|
|
||||||
client, miner := fulls[0].FullNode.(*impl.FullNodeAPI), miners[0]
|
|
||||||
|
|
||||||
kit.ConnectAndStartMining(t, blocktime, miner, client)
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, miner)
|
|
||||||
|
|
||||||
// Set price-per-byte to zero
|
|
||||||
ask, err := miner.MarketGetRetrievalAsk(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ask.PricePerByte = abi.NewTokenAmount(0)
|
|
||||||
err = miner.MarketSetRetrievalAsk(ctx, ask)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, _, _ = dh.MakeFullDeal(kit.MakeFullDealParams{
|
|
||||||
Ctx: ctx,
|
|
||||||
Rseed: 6,
|
|
||||||
CarExport: false,
|
|
||||||
FastRet: false,
|
|
||||||
StartEpoch: startEpoch,
|
|
||||||
DoRetrieval: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -5,31 +5,28 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
|
||||||
"github.com/filecoin-project/lotus/itests/multisig"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-jsonrpc"
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/client"
|
"github.com/filecoin-project/lotus/api/client"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/cli"
|
"github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/filecoin-project/lotus/gateway"
|
"github.com/filecoin-project/lotus/gateway"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
"github.com/filecoin-project/lotus/itests/multisig"
|
||||||
"github.com/filecoin-project/lotus/node"
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
|
||||||
init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
|
init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
|
||||||
multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
|
multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -37,22 +34,14 @@ const (
|
|||||||
maxStateWaitLookbackLimit = stmgr.LookbackNoLimit
|
maxStateWaitLookbackLimit = stmgr.LookbackNoLimit
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
|
||||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
|
||||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite
|
// TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite
|
||||||
// node that is connected through a gateway to a full API node
|
// node that is connected through a gateway to a full API node
|
||||||
func TestGatewayWalletMsig(t *testing.T) {
|
func TestGatewayWalletMsig(t *testing.T) {
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes := startNodes(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
|
nodes := startNodes(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
|
||||||
defer nodes.closer()
|
|
||||||
|
|
||||||
lite := nodes.lite
|
lite := nodes.lite
|
||||||
full := nodes.full
|
full := nodes.full
|
||||||
@ -112,7 +101,6 @@ func TestGatewayWalletMsig(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, err
|
return cid.Undef, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return lite.MpoolPush(ctx, sm)
|
return lite.MpoolPush(ctx, sm)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -180,26 +168,24 @@ func TestGatewayWalletMsig(t *testing.T) {
|
|||||||
// TestGatewayMsigCLI tests that msig CLI calls can be made
|
// TestGatewayMsigCLI tests that msig CLI calls can be made
|
||||||
// on a lite node that is connected through a gateway to a full API node
|
// on a lite node that is connected through a gateway to a full API node
|
||||||
func TestGatewayMsigCLI(t *testing.T) {
|
func TestGatewayMsigCLI(t *testing.T) {
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
|
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
|
||||||
defer nodes.closer()
|
|
||||||
|
|
||||||
lite := nodes.lite
|
lite := nodes.lite
|
||||||
multisig.RunMultisigTests(t, lite)
|
multisig.RunMultisigTests(t, lite)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatewayDealFlow(t *testing.T) {
|
func TestGatewayDealFlow(t *testing.T) {
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
|
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
|
||||||
defer nodes.closer()
|
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
// For these tests where the block time is artificially short, just use
|
// For these tests where the block time is artificially short, just use
|
||||||
// a deal start epoch that is guaranteed to be far enough in the future
|
// a deal start epoch that is guaranteed to be far enough in the future
|
||||||
@ -207,33 +193,27 @@ func TestGatewayDealFlow(t *testing.T) {
|
|||||||
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, nodes.lite, nodes.miner)
|
dh := kit.NewDealHarness(t, nodes.lite, nodes.miner)
|
||||||
dh.MakeFullDeal(kit.MakeFullDealParams{
|
dealCid, res, _ := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{
|
||||||
Ctx: ctx,
|
Rseed: 6,
|
||||||
Rseed: 6,
|
StartEpoch: dealStartEpoch,
|
||||||
CarExport: false,
|
|
||||||
FastRet: false,
|
|
||||||
StartEpoch: dealStartEpoch,
|
|
||||||
DoRetrieval: true,
|
|
||||||
})
|
})
|
||||||
|
dh.PerformRetrieval(ctx, dealCid, res.Root, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatewayCLIDealFlow(t *testing.T) {
|
func TestGatewayCLIDealFlow(t *testing.T) {
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
|
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
|
||||||
defer nodes.closer()
|
|
||||||
|
|
||||||
kit.RunClientTest(t, cli.Commands, nodes.lite)
|
kit.RunClientTest(t, cli.Commands, nodes.lite)
|
||||||
}
|
}
|
||||||
|
|
||||||
type testNodes struct {
|
type testNodes struct {
|
||||||
lite kit.TestFullNode
|
lite *kit.TestFullNode
|
||||||
full kit.TestFullNode
|
full *kit.TestFullNode
|
||||||
miner kit.TestMiner
|
miner *kit.TestMiner
|
||||||
closer jsonrpc.ClientCloser
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func startNodesWithFunds(
|
func startNodesWithFunds(
|
||||||
@ -249,8 +229,8 @@ func startNodesWithFunds(
|
|||||||
fullWalletAddr, err := nodes.full.WalletDefaultAddress(ctx)
|
fullWalletAddr, err := nodes.full.WalletDefaultAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Create a wallet on the lite node
|
// Get the lite node default wallet address.
|
||||||
liteWalletAddr, err := nodes.lite.WalletNew(ctx, types.KTSecp256k1)
|
liteWalletAddr, err := nodes.lite.WalletDefaultAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Send some funds from the full node to the lite node
|
// Send some funds from the full node to the lite node
|
||||||
@ -269,66 +249,47 @@ func startNodes(
|
|||||||
) *testNodes {
|
) *testNodes {
|
||||||
var closer jsonrpc.ClientCloser
|
var closer jsonrpc.ClientCloser
|
||||||
|
|
||||||
// Create one miner and two full nodes.
|
var (
|
||||||
|
full *kit.TestFullNode
|
||||||
|
miner *kit.TestMiner
|
||||||
|
lite kit.TestFullNode
|
||||||
|
)
|
||||||
|
|
||||||
|
// - Create one full node and one lite node
|
||||||
// - Put a gateway server in front of full node 1
|
// - Put a gateway server in front of full node 1
|
||||||
// - Start full node 2 in lite mode
|
// - Start full node 2 in lite mode
|
||||||
// - Connect lite node -> gateway server -> full node
|
// - Connect lite node -> gateway server -> full node
|
||||||
opts := append(
|
|
||||||
// Full node
|
|
||||||
kit.OneFull,
|
|
||||||
// Lite node
|
|
||||||
kit.FullNodeOpts{
|
|
||||||
Lite: true,
|
|
||||||
Opts: func(nodes []kit.TestFullNode) node.Option {
|
|
||||||
fullNode := nodes[0]
|
|
||||||
|
|
||||||
// Create a gateway server in front of the full node
|
// create the full node and the miner.
|
||||||
gwapi := gateway.NewNode(fullNode, lookbackCap, stateWaitLookbackLimit)
|
var ens *kit.Ensemble
|
||||||
handler, err := gateway.Handler(gwapi)
|
full, miner, ens = kit.EnsembleMinimal(t, kit.MockProofs())
|
||||||
require.NoError(t, err)
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
|
|
||||||
srv, _ := kit.CreateRPCServer(t, handler)
|
// Create a gateway server in front of the full node
|
||||||
|
gwapi := gateway.NewNode(full, lookbackCap, stateWaitLookbackLimit)
|
||||||
// Create a gateway client API that connects to the gateway server
|
handler, err := gateway.Handler(gwapi)
|
||||||
var gapi api.Gateway
|
|
||||||
gapi, closer, err = client.NewGatewayRPCV1(ctx, "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Provide the gateway API to dependency injection
|
|
||||||
return node.Override(new(api.Gateway), gapi)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
n, sn := kit.RPCMockMinerBuilder(t, opts, kit.OneMiner)
|
|
||||||
|
|
||||||
full := n[0]
|
|
||||||
lite := n[1]
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
// Get the listener address for the full node
|
|
||||||
fullAddr, err := full.NetAddrsListen(ctx)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Connect the miner and the full node
|
srv, _ := kit.CreateRPCServer(t, handler)
|
||||||
err = miner.NetConnect(ctx, fullAddr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Connect the miner and the lite node (so that the lite node can send
|
// Create a gateway client API that connects to the gateway server
|
||||||
// data to the miner)
|
var gapi api.Gateway
|
||||||
liteAddr, err := lite.NetAddrsListen(ctx)
|
gapi, closer, err = client.NewGatewayRPCV1(ctx, "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
|
||||||
require.NoError(t, err)
|
|
||||||
err = miner.NetConnect(ctx, liteAddr)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(closer)
|
||||||
|
|
||||||
// Start mining blocks
|
ens.FullNode(&lite,
|
||||||
bm := kit.NewBlockMiner(t, miner)
|
kit.LiteNode(),
|
||||||
bm.MineBlocks(ctx, blocktime)
|
kit.ThroughRPC(),
|
||||||
t.Cleanup(bm.Stop)
|
kit.ConstructorOpts(
|
||||||
|
node.Override(new(api.Gateway), gapi),
|
||||||
|
),
|
||||||
|
).Start().InterconnectAll()
|
||||||
|
|
||||||
return &testNodes{lite: lite, full: full, miner: miner, closer: closer}
|
return &testNodes{lite: &lite, full: full, miner: miner}
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendFunds(ctx context.Context, fromNode kit.TestFullNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error {
|
func sendFunds(ctx context.Context, fromNode *kit.TestFullNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error {
|
||||||
msg := &types.Message{
|
msg := &types.Message{
|
||||||
From: fromAddr,
|
From: fromAddr,
|
||||||
To: toAddr,
|
To: toAddr,
|
||||||
|
@ -15,14 +15,14 @@ import (
|
|||||||
// BlockMiner is a utility that makes a test miner Mine blocks on a timer.
|
// BlockMiner is a utility that makes a test miner Mine blocks on a timer.
|
||||||
type BlockMiner struct {
|
type BlockMiner struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
miner TestMiner
|
miner *TestMiner
|
||||||
|
|
||||||
nextNulls int64
|
nextNulls int64
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBlockMiner(t *testing.T, miner TestMiner) *BlockMiner {
|
func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
|
||||||
return &BlockMiner{
|
return &BlockMiner{
|
||||||
t: t,
|
t: t,
|
||||||
miner: miner,
|
miner: miner,
|
||||||
@ -69,7 +69,7 @@ func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) {
|
|||||||
atomic.AddInt64(&bm.nextNulls, int64(rounds))
|
atomic.AddInt64(&bm.nextNulls, int64(rounds))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn TestFullNode, cb func(abi.ChainEpoch)) {
|
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb func(abi.ChainEpoch)) {
|
||||||
for i := 0; i < 1000; i++ {
|
for i := 0; i < 1000; i++ {
|
||||||
var (
|
var (
|
||||||
success bool
|
success bool
|
||||||
@ -93,7 +93,7 @@ func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn TestFullNode, cb fu
|
|||||||
|
|
||||||
if success {
|
if success {
|
||||||
// Wait until it shows up on the given full nodes ChainHead
|
// Wait until it shows up on the given full nodes ChainHead
|
||||||
nloops := 50
|
nloops := 200
|
||||||
for i := 0; i < nloops; i++ {
|
for i := 0; i < nloops; i++ {
|
||||||
ts, err := fn.ChainHead(ctx)
|
ts, err := fn.ChainHead(ctx)
|
||||||
require.NoError(bm.t, err)
|
require.NoError(bm.t, err)
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// RunClientTest exercises some of the Client CLI commands
|
// RunClientTest exercises some of the Client CLI commands
|
||||||
func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode) {
|
func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode *TestFullNode) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
@ -1,50 +1,68 @@
|
|||||||
package kit
|
package kit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
files "github.com/ipfs/go-ipfs-files"
|
|
||||||
"github.com/ipld/go-car"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
"github.com/ipfs/go-cid"
|
||||||
|
files "github.com/ipfs/go-ipfs-files"
|
||||||
ipld "github.com/ipfs/go-ipld-format"
|
ipld "github.com/ipfs/go-ipld-format"
|
||||||
dag "github.com/ipfs/go-merkledag"
|
dag "github.com/ipfs/go-merkledag"
|
||||||
dstest "github.com/ipfs/go-merkledag/test"
|
dstest "github.com/ipfs/go-merkledag/test"
|
||||||
unixfile "github.com/ipfs/go-unixfs/file"
|
unixfile "github.com/ipfs/go-unixfs/file"
|
||||||
|
"github.com/ipld/go-car"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DealHarness struct {
|
type DealHarness struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
client api.FullNode
|
client *TestFullNode
|
||||||
miner TestMiner
|
miner *TestMiner
|
||||||
}
|
}
|
||||||
|
|
||||||
type MakeFullDealParams struct {
|
type MakeFullDealParams struct {
|
||||||
Ctx context.Context
|
Rseed int
|
||||||
Rseed int
|
FastRet bool
|
||||||
CarExport bool
|
StartEpoch abi.ChainEpoch
|
||||||
FastRet bool
|
|
||||||
StartEpoch abi.ChainEpoch
|
// SuspendUntilCryptoeconStable suspends deal-making, until cryptoecon
|
||||||
DoRetrieval bool
|
// parameters are stabilised. This affects projected collateral, and tests
|
||||||
|
// will fail in network version 13 and higher if deals are started too soon
|
||||||
|
// after network birth.
|
||||||
|
//
|
||||||
|
// The reason is that the formula for collateral calculation takes
|
||||||
|
// circulating supply into account:
|
||||||
|
//
|
||||||
|
// [portion of power this deal will be] * [~1% of tokens].
|
||||||
|
//
|
||||||
|
// In the first epochs after genesis, the total circulating supply is
|
||||||
|
// changing dramatically in percentual terms. Therefore, if the deal is
|
||||||
|
// proposed too soon, by the time it gets published on chain, the quoted
|
||||||
|
// provider collateral will no longer be valid.
|
||||||
|
//
|
||||||
|
// The observation is that deals fail with:
|
||||||
|
//
|
||||||
|
// GasEstimateMessageGas error: estimating gas used: message execution
|
||||||
|
// failed: exit 16, reason: Provider collateral out of bounds. (RetCode=16)
|
||||||
|
//
|
||||||
|
// Enabling this will suspend deal-making until the network has reached a
|
||||||
|
// height of 300.
|
||||||
|
SuspendUntilCryptoeconStable bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDealHarness creates a test harness that contains testing utilities for deals.
|
// NewDealHarness creates a test harness that contains testing utilities for deals.
|
||||||
func NewDealHarness(t *testing.T, client api.FullNode, miner TestMiner) *DealHarness {
|
func NewDealHarness(t *testing.T, client *TestFullNode, miner *TestMiner) *DealHarness {
|
||||||
return &DealHarness{
|
return &DealHarness{
|
||||||
t: t,
|
t: t,
|
||||||
client: client,
|
client: client,
|
||||||
@ -52,43 +70,39 @@ func NewDealHarness(t *testing.T, client api.FullNode, miner TestMiner) *DealHar
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) MakeFullDeal(params MakeFullDealParams) ([]byte,
|
// MakeOnlineDeal makes an online deal, generating a random file with the
|
||||||
*api.DealInfo, cid.Cid) {
|
// supplied seed, and setting the specified fast retrieval flag and start epoch
|
||||||
res, _, data, err := CreateImportFile(params.Ctx, dh.client, params.Rseed, 0)
|
// on the storage deal. It returns when the deal is sealed.
|
||||||
if err != nil {
|
//
|
||||||
dh.t.Fatal(err)
|
// TODO: convert input parameters to struct, and add size as an input param.
|
||||||
|
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
|
||||||
|
res, path = dh.client.CreateImportFile(ctx, params.Rseed, 0)
|
||||||
|
|
||||||
|
dh.t.Logf("FILE CID: %s", res.Root)
|
||||||
|
|
||||||
|
if params.SuspendUntilCryptoeconStable {
|
||||||
|
dh.t.Logf("deal-making suspending until cryptecon parameters have stabilised")
|
||||||
|
ts := dh.client.WaitTillChain(ctx, HeightAtLeast(300))
|
||||||
|
dh.t.Logf("deal-making continuing; current height is %d", ts.Height())
|
||||||
}
|
}
|
||||||
|
|
||||||
fcid := res.Root
|
deal = dh.StartDeal(ctx, res.Root, params.FastRet, params.StartEpoch)
|
||||||
fmt.Println("FILE CID: ", fcid)
|
|
||||||
|
|
||||||
deal := dh.StartDeal(params.Ctx, fcid, params.FastRet, params.StartEpoch)
|
|
||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
dh.WaitDealSealed(params.Ctx, deal, false, false, nil)
|
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||||
|
|
||||||
// Retrieval
|
return deal, res, path
|
||||||
info, err := dh.client.ClientGetDealInfo(params.Ctx, *deal)
|
|
||||||
require.NoError(dh.t, err)
|
|
||||||
|
|
||||||
if params.DoRetrieval {
|
|
||||||
dh.TestRetrieval(params.Ctx, fcid, &info.PieceCID, params.CarExport, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, info, fcid
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StartDeal starts a storage deal between the client and the miner.
|
||||||
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
||||||
maddr, err := dh.miner.ActorAddress(ctx)
|
maddr, err := dh.miner.ActorAddress(ctx)
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
addr, err := dh.client.WalletDefaultAddress(ctx)
|
addr, err := dh.client.WalletDefaultAddress(ctx)
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
|
||||||
deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{
|
deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{
|
||||||
Data: &storagemarket.DataRef{
|
Data: &storagemarket.DataRef{
|
||||||
TransferType: storagemarket.TTGraphsync,
|
TransferType: storagemarket.TTGraphsync,
|
||||||
@ -101,12 +115,12 @@ func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool
|
|||||||
MinBlocksDuration: uint64(build.MinDealDuration),
|
MinBlocksDuration: uint64(build.MinDealDuration),
|
||||||
FastRetrieval: fastRet,
|
FastRetrieval: fastRet,
|
||||||
})
|
})
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatalf("%+v", err)
|
|
||||||
}
|
|
||||||
return deal
|
return deal
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WaitDealSealed waits until the deal is sealed.
|
||||||
func (dh *DealHarness) WaitDealSealed(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
|
func (dh *DealHarness) WaitDealSealed(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
|
||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
@ -128,7 +142,7 @@ loop:
|
|||||||
case storagemarket.StorageDealError:
|
case storagemarket.StorageDealError:
|
||||||
dh.t.Fatal("deal errored", di.Message)
|
dh.t.Fatal("deal errored", di.Message)
|
||||||
case storagemarket.StorageDealActive:
|
case storagemarket.StorageDealActive:
|
||||||
fmt.Println("COMPLETE", di)
|
dh.t.Log("COMPLETE", di)
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,7 +157,7 @@ loop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
|
dh.t.Logf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
|
||||||
time.Sleep(time.Second / 2)
|
time.Sleep(time.Second / 2)
|
||||||
if cb != nil {
|
if cb != nil {
|
||||||
cb()
|
cb()
|
||||||
@ -151,13 +165,14 @@ loop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WaitDealPublished waits until the deal is published.
|
||||||
func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
||||||
subCtx, cancel := context.WithCancel(ctx)
|
subCtx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
updates, err := dh.miner.MarketGetDealUpdates(subCtx)
|
updates, err := dh.miner.MarketGetDealUpdates(subCtx)
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@ -172,10 +187,10 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
|||||||
case storagemarket.StorageDealError:
|
case storagemarket.StorageDealError:
|
||||||
dh.t.Fatal("deal errored", di.Message)
|
dh.t.Fatal("deal errored", di.Message)
|
||||||
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
||||||
fmt.Println("COMPLETE", di)
|
dh.t.Log("COMPLETE", di)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
|
dh.t.Log("Deal state: ", storagemarket.DealStates[di.State])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -194,133 +209,103 @@ func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
|
|||||||
require.NoError(dh.t, dh.miner.SectorStartSealing(ctx, snum))
|
require.NoError(dh.t, dh.miner.SectorStartSealing(ctx, snum))
|
||||||
}
|
}
|
||||||
|
|
||||||
flushSealingBatches(dh.t, ctx, dh.miner)
|
dh.miner.FlushSealingBatches(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) TestRetrieval(ctx context.Context, fcid cid.Cid, piece *cid.Cid, carExport bool, expect []byte) {
|
func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool) (path string) {
|
||||||
offers, err := dh.client.ClientFindData(ctx, fcid, piece)
|
// perform retrieval.
|
||||||
if err != nil {
|
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
||||||
dh.t.Fatal(err)
|
require.NoError(dh.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
if len(offers) < 1 {
|
offers, err := dh.client.ClientFindData(ctx, root, &info.PieceCID)
|
||||||
dh.t.Fatal("no offers")
|
require.NoError(dh.t, err)
|
||||||
}
|
require.NotEmpty(dh.t, offers, "no offers")
|
||||||
|
|
||||||
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
|
carFile, err := ioutil.TempFile(dh.t.TempDir(), "ret-car")
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
defer carFile.Close() //nolint:errcheck
|
||||||
defer os.RemoveAll(rpath) //nolint:errcheck
|
|
||||||
|
|
||||||
caddr, err := dh.client.WalletDefaultAddress(ctx)
|
caddr, err := dh.client.WalletDefaultAddress(ctx)
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ref := &api.FileRef{
|
ref := &api.FileRef{
|
||||||
Path: filepath.Join(rpath, "ret"),
|
Path: carFile.Name(),
|
||||||
IsCAR: carExport,
|
IsCAR: carExport,
|
||||||
}
|
}
|
||||||
|
|
||||||
updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
|
updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
|
||||||
for update := range updates {
|
for update := range updates {
|
||||||
if update.Err != "" {
|
require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err)
|
||||||
dh.t.Fatalf("retrieval failed: %s", update.Err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
|
|
||||||
if err != nil {
|
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret := carFile.Name()
|
||||||
if carExport {
|
if carExport {
|
||||||
rdata = dh.ExtractCarData(ctx, rdata, rpath)
|
actualFile := dh.ExtractFileFromCAR(ctx, carFile)
|
||||||
|
ret = actualFile.Name()
|
||||||
|
_ = actualFile.Close() //nolint:errcheck
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(rdata, expect) {
|
return ret
|
||||||
dh.t.Fatal("wrong expect retrieved")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) ExtractCarData(ctx context.Context, rdata []byte, rpath string) []byte {
|
func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, file *os.File) (out *os.File) {
|
||||||
bserv := dstest.Bserv()
|
bserv := dstest.Bserv()
|
||||||
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
|
ch, err := car.LoadCar(bserv.Blockstore(), file)
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
|
||||||
b, err := bserv.GetBlock(ctx, ch.Roots[0])
|
b, err := bserv.GetBlock(ctx, ch.Roots[0])
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
|
||||||
nd, err := ipld.Decode(b)
|
nd, err := ipld.Decode(b)
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
|
||||||
dserv := dag.NewDAGService(bserv)
|
dserv := dag.NewDAGService(bserv)
|
||||||
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
tmpfile, err := ioutil.TempFile(dh.t.TempDir(), "file-in-car")
|
||||||
outPath := filepath.Join(rpath, "retLoadedCAR")
|
require.NoError(dh.t, err)
|
||||||
if err := files.WriteTo(fil, outPath); err != nil {
|
|
||||||
dh.t.Fatal(err)
|
defer tmpfile.Close() //nolint:errcheck
|
||||||
}
|
|
||||||
rdata, err = ioutil.ReadFile(outPath)
|
err = files.WriteTo(fil, tmpfile.Name())
|
||||||
if err != nil {
|
require.NoError(dh.t, err)
|
||||||
dh.t.Fatal(err)
|
|
||||||
}
|
return tmpfile
|
||||||
return rdata
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type DealsScaffold struct {
|
type RunConcurrentDealsOpts struct {
|
||||||
Ctx context.Context
|
N int
|
||||||
Client *impl.FullNodeAPI
|
FastRetrieval bool
|
||||||
Miner TestMiner
|
CarExport bool
|
||||||
BlockMiner *BlockMiner
|
StartEpoch abi.ChainEpoch
|
||||||
}
|
}
|
||||||
|
|
||||||
func ConnectAndStartMining(t *testing.T, blocktime time.Duration, miner TestMiner, clients ...api.FullNode) *BlockMiner {
|
func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) {
|
||||||
ctx := context.Background()
|
errgrp, _ := errgroup.WithContext(context.Background())
|
||||||
|
for i := 0; i < opts.N; i++ {
|
||||||
for _, c := range clients {
|
i := i
|
||||||
addrinfo, err := c.NetAddrsListen(ctx)
|
errgrp.Go(func() (err error) {
|
||||||
if err != nil {
|
defer func() {
|
||||||
t.Fatal(err)
|
// This is necessary because golang can't deal with test
|
||||||
}
|
// failures being reported from children goroutines ¯\_(ツ)_/¯
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
if r := recover(); r != nil {
|
||||||
t.Fatal(err)
|
err = fmt.Errorf("deal failed: %s", r)
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), MakeFullDealParams{
|
||||||
|
Rseed: 5 + i,
|
||||||
|
FastRet: opts.FastRetrieval,
|
||||||
|
StartEpoch: opts.StartEpoch,
|
||||||
|
})
|
||||||
|
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, opts.CarExport)
|
||||||
|
AssertFilesEqual(dh.t, inPath, outPath)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
require.NoError(dh.t, errgrp.Wait())
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
blockMiner := NewBlockMiner(t, miner)
|
|
||||||
blockMiner.MineBlocks(ctx, blocktime)
|
|
||||||
t.Cleanup(blockMiner.Stop)
|
|
||||||
return blockMiner
|
|
||||||
}
|
|
||||||
|
|
||||||
type TestDealState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
TestDealStateFailed = TestDealState(-1)
|
|
||||||
TestDealStateInProgress = TestDealState(0)
|
|
||||||
TestDealStateComplete = TestDealState(1)
|
|
||||||
)
|
|
||||||
|
|
||||||
// CategorizeDealState categorizes deal states into one of three states:
|
|
||||||
// Complete, InProgress, Failed.
|
|
||||||
func CategorizeDealState(dealStatus string) TestDealState {
|
|
||||||
switch dealStatus {
|
|
||||||
case "StorageDealFailing", "StorageDealError":
|
|
||||||
return TestDealStateFailed
|
|
||||||
case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed":
|
|
||||||
return TestDealStateComplete
|
|
||||||
}
|
|
||||||
return TestDealStateInProgress
|
|
||||||
}
|
}
|
||||||
|
21
itests/kit/deals_state.go
Normal file
21
itests/kit/deals_state.go
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
type TestDealState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
TestDealStateFailed = TestDealState(-1)
|
||||||
|
TestDealStateInProgress = TestDealState(0)
|
||||||
|
TestDealStateComplete = TestDealState(1)
|
||||||
|
)
|
||||||
|
|
||||||
|
// CategorizeDealState categorizes deal states into one of three states:
|
||||||
|
// Complete, InProgress, Failed.
|
||||||
|
func CategorizeDealState(dealStatus string) TestDealState {
|
||||||
|
switch dealStatus {
|
||||||
|
case "StorageDealFailing", "StorageDealError":
|
||||||
|
return TestDealStateFailed
|
||||||
|
case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed":
|
||||||
|
return TestDealStateComplete
|
||||||
|
}
|
||||||
|
return TestDealStateInProgress
|
||||||
|
}
|
646
itests/kit/ensemble.go
Normal file
646
itests/kit/ensemble.go
Normal file
@ -0,0 +1,646 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"io/ioutil"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
"github.com/filecoin-project/go-storedcounter"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||||
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
|
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
||||||
|
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
||||||
|
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||||
|
"github.com/filecoin-project/lotus/genesis"
|
||||||
|
lotusminer "github.com/filecoin-project/lotus/miner"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
|
"github.com/filecoin-project/lotus/node/modules"
|
||||||
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
|
"github.com/filecoin-project/lotus/storage/mockstorage"
|
||||||
|
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||||
|
power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power"
|
||||||
|
"github.com/ipfs/go-datastore"
|
||||||
|
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
chain.BootstrapPeerThreshold = 1
|
||||||
|
messagepool.HeadChangeCoalesceMinDelay = time.Microsecond
|
||||||
|
messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond
|
||||||
|
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensemble is a collection of nodes instantiated within a test.
|
||||||
|
//
|
||||||
|
// Create a new ensemble with:
|
||||||
|
//
|
||||||
|
// ens := kit.NewEnsemble()
|
||||||
|
//
|
||||||
|
// Create full nodes and miners:
|
||||||
|
//
|
||||||
|
// var full TestFullNode
|
||||||
|
// var miner TestMiner
|
||||||
|
// ens.FullNode(&full, opts...) // populates a full node
|
||||||
|
// ens.Miner(&miner, &full, opts...) // populates a miner, using the full node as its chain daemon
|
||||||
|
//
|
||||||
|
// It is possible to pass functional options to set initial balances,
|
||||||
|
// presealed sectors, owner keys, etc.
|
||||||
|
//
|
||||||
|
// After the initial nodes are added, call `ens.Start()` to forge genesis
|
||||||
|
// and start the network. Mining will NOT be started automatically. It needs
|
||||||
|
// to be started explicitly by calling `BeginMining`.
|
||||||
|
//
|
||||||
|
// Nodes also need to be connected with one another, either via `ens.Connect()`
|
||||||
|
// or `ens.InterconnectAll()`. A common inchantation for simple tests is to do:
|
||||||
|
//
|
||||||
|
// ens.InterconnectAll().BeginMining(blocktime)
|
||||||
|
//
|
||||||
|
// You can continue to add more nodes, but you must always follow with
|
||||||
|
// `ens.Start()` to activate the new nodes.
|
||||||
|
//
|
||||||
|
// The API is chainable, so it's possible to do a lot in a very succinct way:
|
||||||
|
//
|
||||||
|
// kit.NewEnsemble().FullNode(&full).Miner(&miner, &full).Start().InterconnectAll().BeginMining()
|
||||||
|
//
|
||||||
|
// You can also find convenient fullnode:miner presets, such as 1:1, 1:2,
|
||||||
|
// and 2:1, e.g.:
|
||||||
|
//
|
||||||
|
// kit.EnsembleMinimal()
|
||||||
|
// kit.EnsembleOneTwo()
|
||||||
|
// kit.EnsembleTwoOne()
|
||||||
|
//
|
||||||
|
type Ensemble struct {
|
||||||
|
t *testing.T
|
||||||
|
bootstrapped bool
|
||||||
|
genesisBlock bytes.Buffer
|
||||||
|
mn mocknet.Mocknet
|
||||||
|
options *ensembleOpts
|
||||||
|
|
||||||
|
inactive struct {
|
||||||
|
fullnodes []*TestFullNode
|
||||||
|
miners []*TestMiner
|
||||||
|
}
|
||||||
|
active struct {
|
||||||
|
fullnodes []*TestFullNode
|
||||||
|
miners []*TestMiner
|
||||||
|
bms map[*TestMiner]*BlockMiner
|
||||||
|
}
|
||||||
|
genesis struct {
|
||||||
|
miners []genesis.Miner
|
||||||
|
accounts []genesis.Actor
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEnsemble instantiates a new blank Ensemble.
|
||||||
|
func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble {
|
||||||
|
options := DefaultEnsembleOpts
|
||||||
|
for _, o := range opts {
|
||||||
|
err := o(&options)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := &Ensemble{t: t, options: &options}
|
||||||
|
n.active.bms = make(map[*TestMiner]*BlockMiner)
|
||||||
|
|
||||||
|
// add accounts from ensemble options to genesis.
|
||||||
|
for _, acc := range options.accounts {
|
||||||
|
n.genesis.accounts = append(n.genesis.accounts, genesis.Actor{
|
||||||
|
Type: genesis.TAccount,
|
||||||
|
Balance: acc.initialBalance,
|
||||||
|
Meta: (&genesis.AccountMeta{Owner: acc.key.Address}).ActorMeta(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// FullNode enrolls a new full node.
|
||||||
|
func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||||
|
options := DefaultNodeOpts
|
||||||
|
for _, o := range opts {
|
||||||
|
err := o(&options)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := wallet.GenerateKey(types.KTBLS)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
if !n.bootstrapped && !options.balance.IsZero() {
|
||||||
|
// if we still haven't forged genesis, create a key+address, and assign
|
||||||
|
// it some FIL; this will be set as the default wallet when the node is
|
||||||
|
// started.
|
||||||
|
genacc := genesis.Actor{
|
||||||
|
Type: genesis.TAccount,
|
||||||
|
Balance: options.balance,
|
||||||
|
Meta: (&genesis.AccountMeta{Owner: key.Address}).ActorMeta(),
|
||||||
|
}
|
||||||
|
|
||||||
|
n.genesis.accounts = append(n.genesis.accounts, genacc)
|
||||||
|
}
|
||||||
|
|
||||||
|
*full = TestFullNode{t: n.t, options: options, DefaultKey: key}
|
||||||
|
n.inactive.fullnodes = append(n.inactive.fullnodes, full)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Miner enrolls a new miner, using the provided full node for chain
|
||||||
|
// interactions.
|
||||||
|
func (n *Ensemble) Miner(miner *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||||
|
require.NotNil(n.t, full, "full node required when instantiating miner")
|
||||||
|
|
||||||
|
options := DefaultNodeOpts
|
||||||
|
for _, o := range opts {
|
||||||
|
err := o(&options)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
peerId, err := peer.IDFromPrivateKey(privkey)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
tdir, err := ioutil.TempDir("", "preseal-memgen")
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
minerCnt := len(n.inactive.miners) + len(n.active.miners)
|
||||||
|
|
||||||
|
actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt))
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
ownerKey := options.ownerKey
|
||||||
|
if !n.bootstrapped {
|
||||||
|
var (
|
||||||
|
sectors = options.sectors
|
||||||
|
k *types.KeyInfo
|
||||||
|
genm *genesis.Miner
|
||||||
|
)
|
||||||
|
|
||||||
|
// create the preseal commitment.
|
||||||
|
if n.options.mockProofs {
|
||||||
|
genm, k, err = mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, actorAddr, sectors)
|
||||||
|
} else {
|
||||||
|
genm, k, err = seed.PreSeal(actorAddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, sectors, tdir, []byte("make genesis mem random"), nil, true)
|
||||||
|
}
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
genm.PeerId = peerId
|
||||||
|
|
||||||
|
// create an owner key, and assign it some FIL.
|
||||||
|
ownerKey, err = wallet.NewKey(*k)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
genacc := genesis.Actor{
|
||||||
|
Type: genesis.TAccount,
|
||||||
|
Balance: options.balance,
|
||||||
|
Meta: (&genesis.AccountMeta{Owner: ownerKey.Address}).ActorMeta(),
|
||||||
|
}
|
||||||
|
|
||||||
|
n.genesis.miners = append(n.genesis.miners, *genm)
|
||||||
|
n.genesis.accounts = append(n.genesis.accounts, genacc)
|
||||||
|
} else {
|
||||||
|
require.NotNil(n.t, ownerKey, "worker key can't be null if initializing a miner after genesis")
|
||||||
|
}
|
||||||
|
|
||||||
|
*miner = TestMiner{
|
||||||
|
t: n.t,
|
||||||
|
ActorAddr: actorAddr,
|
||||||
|
OwnerKey: ownerKey,
|
||||||
|
FullNode: full,
|
||||||
|
PresealDir: tdir,
|
||||||
|
options: options,
|
||||||
|
}
|
||||||
|
|
||||||
|
miner.Libp2p.PeerID = peerId
|
||||||
|
miner.Libp2p.PrivKey = privkey
|
||||||
|
|
||||||
|
n.inactive.miners = append(n.inactive.miners, miner)
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts all enrolled nodes.
|
||||||
|
func (n *Ensemble) Start() *Ensemble {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
var gtempl *genesis.Template
|
||||||
|
if !n.bootstrapped {
|
||||||
|
// We haven't been bootstrapped yet, we need to generate genesis and
|
||||||
|
// create the networking backbone.
|
||||||
|
gtempl = n.generateGenesis()
|
||||||
|
n.mn = mocknet.New(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------
|
||||||
|
// FULL NODES
|
||||||
|
// ---------------------
|
||||||
|
|
||||||
|
// Create all inactive full nodes.
|
||||||
|
for i, full := range n.inactive.fullnodes {
|
||||||
|
opts := []node.Option{
|
||||||
|
node.FullAPI(&full.FullNode, node.Lite(full.options.lite)),
|
||||||
|
node.Online(),
|
||||||
|
node.Repo(repo.NewMemory(nil)),
|
||||||
|
node.MockHost(n.mn),
|
||||||
|
node.Test(),
|
||||||
|
|
||||||
|
// so that we subscribe to pubsub topics immediately
|
||||||
|
node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)),
|
||||||
|
}
|
||||||
|
|
||||||
|
// append any node builder options.
|
||||||
|
opts = append(opts, full.options.extraNodeOpts...)
|
||||||
|
|
||||||
|
// Either generate the genesis or inject it.
|
||||||
|
if i == 0 && !n.bootstrapped {
|
||||||
|
opts = append(opts, node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&n.genesisBlock, *gtempl)))
|
||||||
|
} else {
|
||||||
|
opts = append(opts, node.Override(new(modules.Genesis), modules.LoadGenesis(n.genesisBlock.Bytes())))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Are we mocking proofs?
|
||||||
|
if n.options.mockProofs {
|
||||||
|
opts = append(opts,
|
||||||
|
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
||||||
|
node.Override(new(ffiwrapper.Prover), mock.MockProver),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call option builders, passing active nodes as the parameter
|
||||||
|
for _, bopt := range full.options.optBuilders {
|
||||||
|
opts = append(opts, bopt(n.active.fullnodes))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct the full node.
|
||||||
|
stop, err := node.New(ctx, opts...)
|
||||||
|
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
addr, err := full.WalletImport(context.Background(), &full.DefaultKey.KeyInfo)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
err = full.WalletSetDefault(context.Background(), addr)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
// Are we hitting this node through its RPC?
|
||||||
|
if full.options.rpc {
|
||||||
|
withRPC := fullRpc(n.t, full)
|
||||||
|
n.inactive.fullnodes[i] = withRPC
|
||||||
|
}
|
||||||
|
|
||||||
|
n.t.Cleanup(func() { _ = stop(context.Background()) })
|
||||||
|
|
||||||
|
n.active.fullnodes = append(n.active.fullnodes, full)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are here, we have processed all inactive fullnodes and moved them
|
||||||
|
// to active, so clear the slice.
|
||||||
|
n.inactive.fullnodes = n.inactive.fullnodes[:0]
|
||||||
|
|
||||||
|
// Link all the nodes.
|
||||||
|
err := n.mn.LinkAll()
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
// ---------------------
|
||||||
|
// MINERS
|
||||||
|
// ---------------------
|
||||||
|
|
||||||
|
// Create all inactive miners.
|
||||||
|
for i, m := range n.inactive.miners {
|
||||||
|
if n.bootstrapped {
|
||||||
|
// this is a miner created after genesis, so it won't have a preseal.
|
||||||
|
// we need to create it on chain.
|
||||||
|
params, aerr := actors.SerializeParams(&power2.CreateMinerParams{
|
||||||
|
Owner: m.OwnerKey.Address,
|
||||||
|
Worker: m.OwnerKey.Address,
|
||||||
|
SealProofType: m.options.proofType,
|
||||||
|
Peer: abi.PeerID(m.Libp2p.PeerID),
|
||||||
|
})
|
||||||
|
require.NoError(n.t, aerr)
|
||||||
|
|
||||||
|
createStorageMinerMsg := &types.Message{
|
||||||
|
From: m.OwnerKey.Address,
|
||||||
|
To: power.Address,
|
||||||
|
Value: big.Zero(),
|
||||||
|
|
||||||
|
Method: power.Methods.CreateMiner,
|
||||||
|
Params: params,
|
||||||
|
|
||||||
|
GasLimit: 0,
|
||||||
|
GasPremium: big.NewInt(5252),
|
||||||
|
}
|
||||||
|
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode)
|
||||||
|
|
||||||
|
var retval power2.CreateMinerReturn
|
||||||
|
err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return))
|
||||||
|
require.NoError(n.t, err, "failed to create miner")
|
||||||
|
|
||||||
|
m.ActorAddr = retval.IDAddress
|
||||||
|
}
|
||||||
|
|
||||||
|
has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
// Only import the owner's full key into our companion full node, if we
|
||||||
|
// don't have it still.
|
||||||
|
if !has {
|
||||||
|
_, err = m.FullNode.WalletImport(ctx, &m.OwnerKey.KeyInfo)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// // Set it as the default address.
|
||||||
|
// err = m.FullNode.WalletSetDefault(ctx, m.OwnerAddr.Address)
|
||||||
|
// require.NoError(n.t, err)
|
||||||
|
|
||||||
|
r := repo.NewMemory(nil)
|
||||||
|
|
||||||
|
lr, err := r.Lock(repo.StorageMiner)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
ks, err := lr.KeyStore()
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
pk, err := m.Libp2p.PrivKey.Bytes()
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
err = ks.Put("libp2p-host", types.KeyInfo{
|
||||||
|
Type: "libp2p-host",
|
||||||
|
PrivateKey: pk,
|
||||||
|
})
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
ds, err := lr.Datastore(context.TODO(), "/metadata")
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
err = ds.Put(datastore.NewKey("miner-address"), m.ActorAddr.Bytes())
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix))
|
||||||
|
for i := 0; i < m.options.sectors; i++ {
|
||||||
|
_, err := nic.Next()
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
}
|
||||||
|
_, err = nic.Next()
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
err = lr.Close()
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)})
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
From: m.OwnerKey.Address,
|
||||||
|
To: m.ActorAddr,
|
||||||
|
Method: miner.Methods.ChangePeerID,
|
||||||
|
Params: enc,
|
||||||
|
Value: types.NewInt(0),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = m.FullNode.MpoolPushMessage(ctx, msg, nil)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
var mineBlock = make(chan lotusminer.MineReq)
|
||||||
|
opts := []node.Option{
|
||||||
|
node.StorageMiner(&m.StorageMiner),
|
||||||
|
node.Online(),
|
||||||
|
node.Repo(r),
|
||||||
|
node.Test(),
|
||||||
|
|
||||||
|
node.MockHost(n.mn),
|
||||||
|
|
||||||
|
node.Override(new(v1api.FullNode), m.FullNode.FullNode),
|
||||||
|
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
|
||||||
|
|
||||||
|
// disable resource filtering so that local worker gets assigned tasks
|
||||||
|
// regardless of system pressure.
|
||||||
|
node.Override(new(sectorstorage.SealerConfig), func() sectorstorage.SealerConfig {
|
||||||
|
scfg := config.DefaultStorageMiner()
|
||||||
|
scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled
|
||||||
|
return scfg.Storage
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
||||||
|
// append any node builder options.
|
||||||
|
opts = append(opts, m.options.extraNodeOpts...)
|
||||||
|
|
||||||
|
idAddr, err := address.IDFromAddress(m.ActorAddr)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
// preload preseals if the network still hasn't bootstrapped.
|
||||||
|
var presealSectors []abi.SectorID
|
||||||
|
if !n.bootstrapped {
|
||||||
|
sectors := n.genesis.miners[i].Sectors
|
||||||
|
for _, sector := range sectors {
|
||||||
|
presealSectors = append(presealSectors, abi.SectorID{
|
||||||
|
Miner: abi.ActorID(idAddr),
|
||||||
|
Number: sector.SectorID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.options.mockProofs {
|
||||||
|
opts = append(opts,
|
||||||
|
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
|
||||||
|
return mock.NewMockSectorMgr(presealSectors), nil
|
||||||
|
}),
|
||||||
|
node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))),
|
||||||
|
node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))),
|
||||||
|
node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))),
|
||||||
|
|
||||||
|
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
||||||
|
node.Override(new(ffiwrapper.Prover), mock.MockProver),
|
||||||
|
node.Unset(new(*sectorstorage.Manager)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// start node
|
||||||
|
stop, err := node.New(ctx, opts...)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
// using real proofs, therefore need real sectors.
|
||||||
|
if !n.bootstrapped && !n.options.mockProofs {
|
||||||
|
err := m.StorageAddLocal(ctx, m.PresealDir)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n.t.Cleanup(func() { _ = stop(context.Background()) })
|
||||||
|
|
||||||
|
// Are we hitting this node through its RPC?
|
||||||
|
if m.options.rpc {
|
||||||
|
withRPC := minerRpc(n.t, m)
|
||||||
|
n.inactive.miners[i] = withRPC
|
||||||
|
}
|
||||||
|
|
||||||
|
mineOne := func(ctx context.Context, req lotusminer.MineReq) error {
|
||||||
|
select {
|
||||||
|
case mineBlock <- req:
|
||||||
|
return nil
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
m.MineOne = mineOne
|
||||||
|
m.Stop = stop
|
||||||
|
|
||||||
|
n.active.miners = append(n.active.miners, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are here, we have processed all inactive miners and moved them
|
||||||
|
// to active, so clear the slice.
|
||||||
|
n.inactive.miners = n.inactive.miners[:0]
|
||||||
|
|
||||||
|
// Link all the nodes.
|
||||||
|
err = n.mn.LinkAll()
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
if !n.bootstrapped && len(n.active.miners) > 0 {
|
||||||
|
// We have *just* bootstrapped, so mine 2 blocks to setup some CE stuff in some actors
|
||||||
|
var wait sync.Mutex
|
||||||
|
wait.Lock()
|
||||||
|
|
||||||
|
observer := n.active.fullnodes[0]
|
||||||
|
|
||||||
|
bm := NewBlockMiner(n.t, n.active.miners[0])
|
||||||
|
n.t.Cleanup(bm.Stop)
|
||||||
|
|
||||||
|
bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) {
|
||||||
|
wait.Unlock()
|
||||||
|
})
|
||||||
|
wait.Lock()
|
||||||
|
bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) {
|
||||||
|
wait.Unlock()
|
||||||
|
})
|
||||||
|
wait.Lock()
|
||||||
|
}
|
||||||
|
|
||||||
|
n.bootstrapped = true
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// InterconnectAll connects all miners and full nodes to one another.
|
||||||
|
func (n *Ensemble) InterconnectAll() *Ensemble {
|
||||||
|
// connect full nodes to miners.
|
||||||
|
for _, from := range n.active.fullnodes {
|
||||||
|
for _, to := range n.active.miners {
|
||||||
|
// []*TestMiner to []api.CommonAPI type coercion not possible
|
||||||
|
// so cannot use variadic form.
|
||||||
|
n.Connect(from, to)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// connect full nodes between each other, skipping ourselves.
|
||||||
|
last := len(n.active.fullnodes) - 1
|
||||||
|
for i, from := range n.active.fullnodes {
|
||||||
|
if i == last {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, to := range n.active.fullnodes[i+1:] {
|
||||||
|
n.Connect(from, to)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect connects one full node to the provided full nodes.
|
||||||
|
func (n *Ensemble) Connect(from api.Common, to ...api.Common) *Ensemble {
|
||||||
|
addr, err := from.NetAddrsListen(context.Background())
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
for _, other := range to {
|
||||||
|
err = other.NetConnect(context.Background(), addr)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// BeginMining kicks off mining for the specified miners. If nil or 0-length,
|
||||||
|
// it will kick off mining for all enrolled and active miners. It also adds a
|
||||||
|
// cleanup function to stop all mining operations on test teardown.
|
||||||
|
func (n *Ensemble) BeginMining(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// wait one second to make sure that nodes are connected and have handshaken.
|
||||||
|
// TODO make this deterministic by listening to identify events on the
|
||||||
|
// libp2p eventbus instead (or something else).
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
var bms []*BlockMiner
|
||||||
|
if len(miners) == 0 {
|
||||||
|
// no miners have been provided explicitly, instantiate block miners
|
||||||
|
// for all active miners that aren't still mining.
|
||||||
|
for _, m := range n.active.miners {
|
||||||
|
if _, ok := n.active.bms[m]; ok {
|
||||||
|
continue // skip, already have a block miner
|
||||||
|
}
|
||||||
|
miners = append(miners, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range miners {
|
||||||
|
bm := NewBlockMiner(n.t, m)
|
||||||
|
bm.MineBlocks(ctx, blocktime)
|
||||||
|
n.t.Cleanup(bm.Stop)
|
||||||
|
|
||||||
|
bms = append(bms, bm)
|
||||||
|
|
||||||
|
n.active.bms[m] = bm
|
||||||
|
}
|
||||||
|
|
||||||
|
return bms
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Ensemble) generateGenesis() *genesis.Template {
|
||||||
|
var verifRoot = gen.DefaultVerifregRootkeyActor
|
||||||
|
if k := n.options.verifiedRoot.key; k != nil {
|
||||||
|
verifRoot = genesis.Actor{
|
||||||
|
Type: genesis.TAccount,
|
||||||
|
Balance: n.options.verifiedRoot.initialBalance,
|
||||||
|
Meta: (&genesis.AccountMeta{Owner: k.Address}).ActorMeta(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
templ := &genesis.Template{
|
||||||
|
NetworkVersion: network.Version0,
|
||||||
|
Accounts: n.genesis.accounts,
|
||||||
|
Miners: n.genesis.miners,
|
||||||
|
NetworkName: "test",
|
||||||
|
Timestamp: uint64(time.Now().Unix() - int64(n.options.pastOffset.Seconds())),
|
||||||
|
VerifregRootKey: verifRoot,
|
||||||
|
RemainderAccount: gen.DefaultRemainderAccountActor,
|
||||||
|
}
|
||||||
|
|
||||||
|
return templ
|
||||||
|
}
|
55
itests/kit/ensemble_opts.go
Normal file
55
itests/kit/ensemble_opts.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EnsembleOpt func(opts *ensembleOpts) error
|
||||||
|
|
||||||
|
type genesisAccount struct {
|
||||||
|
key *wallet.Key
|
||||||
|
initialBalance abi.TokenAmount
|
||||||
|
}
|
||||||
|
|
||||||
|
type ensembleOpts struct {
|
||||||
|
pastOffset time.Duration
|
||||||
|
verifiedRoot genesisAccount
|
||||||
|
accounts []genesisAccount
|
||||||
|
mockProofs bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultEnsembleOpts = ensembleOpts{
|
||||||
|
pastOffset: 10000000 * time.Second, // time sufficiently in the past to trigger catch-up mining.
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockProofs activates mock proofs for the entire ensemble.
|
||||||
|
func MockProofs() EnsembleOpt {
|
||||||
|
return func(opts *ensembleOpts) error {
|
||||||
|
opts.mockProofs = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RootVerifier specifies the key to be enlisted as the verified registry root,
|
||||||
|
// as well as the initial balance to be attributed during genesis.
|
||||||
|
func RootVerifier(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt {
|
||||||
|
return func(opts *ensembleOpts) error {
|
||||||
|
opts.verifiedRoot.key = key
|
||||||
|
opts.verifiedRoot.initialBalance = balance
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Account sets up an account at genesis with the specified key and balance.
|
||||||
|
func Account(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt {
|
||||||
|
return func(opts *ensembleOpts) error {
|
||||||
|
opts.accounts = append(opts.accounts, genesisAccount{
|
||||||
|
key: key,
|
||||||
|
initialBalance: balance,
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
70
itests/kit/ensemble_presets.go
Normal file
70
itests/kit/ensemble_presets.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
// EnsembleMinimal creates and starts an Ensemble with a single full node and a single miner.
|
||||||
|
// It does not interconnect nodes nor does it begin mining.
|
||||||
|
//
|
||||||
|
// This function supports passing both ensemble and node functional options.
|
||||||
|
// Functional options are applied to all nodes.
|
||||||
|
func EnsembleMinimal(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *Ensemble) {
|
||||||
|
eopts, nopts := siftOptions(t, opts)
|
||||||
|
|
||||||
|
var (
|
||||||
|
full TestFullNode
|
||||||
|
miner TestMiner
|
||||||
|
)
|
||||||
|
ens := NewEnsemble(t, eopts...).FullNode(&full, nopts...).Miner(&miner, &full, nopts...).Start()
|
||||||
|
return &full, &miner, ens
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsembleTwoOne creates and starts an Ensemble with two full nodes and one miner.
|
||||||
|
// It does not interconnect nodes nor does it begin mining.
|
||||||
|
//
|
||||||
|
// This function supports passing both ensemble and node functional options.
|
||||||
|
// Functional options are applied to all nodes.
|
||||||
|
func EnsembleTwoOne(t *testing.T, opts ...interface{}) (*TestFullNode, *TestFullNode, *TestMiner, *Ensemble) {
|
||||||
|
eopts, nopts := siftOptions(t, opts)
|
||||||
|
|
||||||
|
var (
|
||||||
|
one, two TestFullNode
|
||||||
|
miner TestMiner
|
||||||
|
)
|
||||||
|
ens := NewEnsemble(t, eopts...).FullNode(&one, nopts...).FullNode(&two, nopts...).Miner(&miner, &one, nopts...).Start()
|
||||||
|
return &one, &two, &miner, ens
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsembleOneTwo creates and starts an Ensemble with one full node and two miners.
|
||||||
|
// It does not interconnect nodes nor does it begin mining.
|
||||||
|
//
|
||||||
|
// This function supports passing both ensemble and node functional options.
|
||||||
|
// Functional options are applied to all nodes.
|
||||||
|
func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) {
|
||||||
|
eopts, nopts := siftOptions(t, opts)
|
||||||
|
|
||||||
|
var (
|
||||||
|
full TestFullNode
|
||||||
|
one, two TestMiner
|
||||||
|
)
|
||||||
|
ens := NewEnsemble(t, eopts...).
|
||||||
|
FullNode(&full, nopts...).
|
||||||
|
Miner(&one, &full, nopts...).
|
||||||
|
Miner(&two, &full, nopts...).
|
||||||
|
Start()
|
||||||
|
|
||||||
|
return &full, &one, &two, ens
|
||||||
|
}
|
||||||
|
|
||||||
|
func siftOptions(t *testing.T, opts []interface{}) (eopts []EnsembleOpt, nopts []NodeOpt) {
|
||||||
|
for _, v := range opts {
|
||||||
|
switch o := v.(type) {
|
||||||
|
case EnsembleOpt:
|
||||||
|
eopts = append(eopts, o)
|
||||||
|
case NodeOpt:
|
||||||
|
nopts = append(nopts, o)
|
||||||
|
default:
|
||||||
|
t.Fatalf("invalid option type: %T", o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return eopts, nopts
|
||||||
|
}
|
58
itests/kit/files.go
Normal file
58
itests/kit/files.go
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/minio/blake2b-simd"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateRandomFile creates a random file with the provided seed and the
|
||||||
|
// provided size.
|
||||||
|
func CreateRandomFile(t *testing.T, rseed, size int) (path string) {
|
||||||
|
if size == 0 {
|
||||||
|
size = 1600
|
||||||
|
}
|
||||||
|
|
||||||
|
source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size))
|
||||||
|
|
||||||
|
file, err := os.CreateTemp(t.TempDir(), "sourcefile.dat")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
n, err := io.Copy(file, source)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, n, size)
|
||||||
|
|
||||||
|
return file.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertFilesEqual compares two files by blake2b hash equality and
|
||||||
|
// fails the test if unequal.
|
||||||
|
func AssertFilesEqual(t *testing.T, left, right string) {
|
||||||
|
// initialize hashes.
|
||||||
|
leftH, rightH := blake2b.New256(), blake2b.New256()
|
||||||
|
|
||||||
|
// open files.
|
||||||
|
leftF, err := os.Open(left)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
rightF, err := os.Open(right)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// feed hash functions.
|
||||||
|
_, err = io.Copy(leftH, leftF)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = io.Copy(rightH, rightF)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// compute digests.
|
||||||
|
leftD, rightD := leftH.Sum(nil), rightH.Sum(nil)
|
||||||
|
|
||||||
|
require.True(t, bytes.Equal(leftD, rightD))
|
||||||
|
}
|
@ -5,6 +5,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
@ -13,11 +14,9 @@ import (
|
|||||||
|
|
||||||
// SendFunds sends funds from the default wallet of the specified sender node
|
// SendFunds sends funds from the default wallet of the specified sender node
|
||||||
// to the recipient address.
|
// to the recipient address.
|
||||||
func SendFunds(ctx context.Context, t *testing.T, sender TestFullNode, recipient address.Address, amount abi.TokenAmount) {
|
func SendFunds(ctx context.Context, t *testing.T, sender *TestFullNode, recipient address.Address, amount abi.TokenAmount) {
|
||||||
senderAddr, err := sender.WalletDefaultAddress(ctx)
|
senderAddr, err := sender.WalletDefaultAddress(ctx)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := &types.Message{
|
msg := &types.Message{
|
||||||
From: senderAddr,
|
From: senderAddr,
|
||||||
@ -26,14 +25,10 @@ func SendFunds(ctx context.Context, t *testing.T, sender TestFullNode, recipient
|
|||||||
}
|
}
|
||||||
|
|
||||||
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
|
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
|
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
require.EqualValues(t, 0, res.Receipt.ExitCode, "did not successfully send funds")
|
||||||
if res.Receipt.ExitCode != 0 {
|
|
||||||
t.Fatal("did not successfully send money")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,6 @@ package kit
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
@ -12,21 +11,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
bin := os.Args[0]
|
|
||||||
if !strings.HasSuffix(bin, ".test") {
|
|
||||||
panic("package itests/kit must only be imported from tests")
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = logging.SetLogLevel("*", "INFO")
|
_ = logging.SetLogLevel("*", "INFO")
|
||||||
|
|
||||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
||||||
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
||||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||||
|
|
||||||
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
|
||||||
}
|
|
||||||
build.InsecurePoStValidation = true
|
build.InsecurePoStValidation = true
|
||||||
|
|
||||||
|
if err := os.Setenv("BELLMAN_NO_GPU", "1"); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Setenv("LOTUS_DISABLE_WATCHDOG", "1"); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to set LOTUS_DISABLE_WATCHDOG env variable: %s", err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ import (
|
|||||||
func QuietMiningLogs() {
|
func QuietMiningLogs() {
|
||||||
lotuslog.SetupLogLevels()
|
lotuslog.SetupLogLevels()
|
||||||
|
|
||||||
_ = logging.SetLogLevel("miner", "ERROR")
|
_ = logging.SetLogLevel("miner", "ERROR") // set this to INFO to watch mining happen.
|
||||||
_ = logging.SetLogLevel("chainstore", "ERROR")
|
_ = logging.SetLogLevel("chainstore", "ERROR")
|
||||||
_ = logging.SetLogLevel("chain", "ERROR")
|
_ = logging.SetLogLevel("chain", "ERROR")
|
||||||
_ = logging.SetLogLevel("sub", "ERROR")
|
_ = logging.SetLogLevel("sub", "ERROR")
|
||||||
|
@ -1,87 +0,0 @@
|
|||||||
package kit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
)
|
|
||||||
|
|
||||||
func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) (TestFullNode, address.Address) {
|
|
||||||
n, sn := RPCMockMinerBuilder(t, OneFull, OneMiner)
|
|
||||||
|
|
||||||
full := n[0]
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
// Get everyone connected
|
|
||||||
addrs, err := full.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrs); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start mining blocks
|
|
||||||
bm := NewBlockMiner(t, miner)
|
|
||||||
bm.MineBlocks(ctx, blocktime)
|
|
||||||
t.Cleanup(bm.Stop)
|
|
||||||
|
|
||||||
// Get the full node's wallet address
|
|
||||||
fullAddr, err := full.WalletDefaultAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create mock CLI
|
|
||||||
return full, fullAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]TestFullNode, []address.Address) {
|
|
||||||
n, sn := RPCMockMinerBuilder(t, TwoFull, OneMiner)
|
|
||||||
|
|
||||||
fullNode1 := n[0]
|
|
||||||
fullNode2 := n[1]
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
// Get everyone connected
|
|
||||||
addrs, err := fullNode1.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fullNode2.NetConnect(ctx, addrs); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrs); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start mining blocks
|
|
||||||
bm := NewBlockMiner(t, miner)
|
|
||||||
bm.MineBlocks(ctx, blocktime)
|
|
||||||
t.Cleanup(bm.Stop)
|
|
||||||
|
|
||||||
// Send some funds to register the second node
|
|
||||||
fullNodeAddr2, err := fullNode2.WalletNew(ctx, types.KTSecp256k1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18))
|
|
||||||
|
|
||||||
// Get the first node's address
|
|
||||||
fullNodeAddr1, err := fullNode1.WalletDefaultAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create mock CLI
|
|
||||||
return n, []address.Address{fullNodeAddr1, fullNodeAddr2}
|
|
||||||
}
|
|
@ -1,670 +0,0 @@
|
|||||||
package kit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
"github.com/filecoin-project/lotus/node/config"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
|
||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
|
||||||
"github.com/filecoin-project/go-storedcounter"
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/api/client"
|
|
||||||
"github.com/filecoin-project/lotus/api/v1api"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
|
||||||
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
|
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
|
||||||
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
|
|
||||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
|
||||||
"github.com/filecoin-project/lotus/genesis"
|
|
||||||
lotusminer "github.com/filecoin-project/lotus/miner"
|
|
||||||
"github.com/filecoin-project/lotus/node"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
|
||||||
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
|
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
|
||||||
"github.com/filecoin-project/lotus/storage/mockstorage"
|
|
||||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
|
||||||
power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power"
|
|
||||||
"github.com/ipfs/go-datastore"
|
|
||||||
"github.com/libp2p/go-libp2p-core/crypto"
|
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
|
||||||
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
|
||||||
"github.com/multiformats/go-multiaddr"
|
|
||||||
manet "github.com/multiformats/go-multiaddr/net"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
chain.BootstrapPeerThreshold = 1
|
|
||||||
messagepool.HeadChangeCoalesceMinDelay = time.Microsecond
|
|
||||||
messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond
|
|
||||||
messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond
|
|
||||||
}
|
|
||||||
|
|
||||||
func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Address, act address.Address, pk crypto.PrivKey, tnd TestFullNode, mn mocknet.Mocknet, opts node.Option) TestMiner {
|
|
||||||
r := repo.NewMemory(nil)
|
|
||||||
|
|
||||||
lr, err := r.Lock(repo.StorageMiner)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ks, err := lr.KeyStore()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
kbytes, err := pk.Bytes()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = ks.Put("libp2p-host", types.KeyInfo{
|
|
||||||
Type: "libp2p-host",
|
|
||||||
PrivateKey: kbytes,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ds, err := lr.Datastore(context.TODO(), "/metadata")
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = ds.Put(datastore.NewKey("miner-address"), act.Bytes())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix))
|
|
||||||
for i := 0; i < GenesisPreseals; i++ {
|
|
||||||
_, err := nic.Next()
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
_, err = nic.Next()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = lr.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
peerid, err := peer.IDFromPrivateKey(pk)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(peerid)})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
msg := &types.Message{
|
|
||||||
To: act,
|
|
||||||
From: waddr,
|
|
||||||
Method: miner.Methods.ChangePeerID,
|
|
||||||
Params: enc,
|
|
||||||
Value: types.NewInt(0),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = tnd.MpoolPushMessage(ctx, msg, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// start node
|
|
||||||
var minerapi api.StorageMiner
|
|
||||||
|
|
||||||
mineBlock := make(chan lotusminer.MineReq)
|
|
||||||
stop, err := node.New(ctx,
|
|
||||||
node.StorageMiner(&minerapi),
|
|
||||||
node.Online(),
|
|
||||||
node.Repo(r),
|
|
||||||
node.Test(),
|
|
||||||
|
|
||||||
node.MockHost(mn),
|
|
||||||
|
|
||||||
node.Override(new(v1api.FullNode), tnd),
|
|
||||||
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, act)),
|
|
||||||
|
|
||||||
node.Override(new(*sectorstorage.SealerConfig), func() *sectorstorage.SealerConfig {
|
|
||||||
scfg := config.DefaultStorageMiner()
|
|
||||||
scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled
|
|
||||||
return &scfg.Storage
|
|
||||||
}),
|
|
||||||
|
|
||||||
opts,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to construct node: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Cleanup(func() { _ = stop(context.Background()) })
|
|
||||||
|
|
||||||
/*// Bootstrap with full node
|
|
||||||
remoteAddrs, err := tnd.NetAddrsListen(Ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = minerapi.NetConnect(Ctx, remoteAddrs)
|
|
||||||
require.NoError(t, err)*/
|
|
||||||
mineOne := func(ctx context.Context, req lotusminer.MineReq) error {
|
|
||||||
select {
|
|
||||||
case mineBlock <- req:
|
|
||||||
return nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return TestMiner{StorageMiner: minerapi, MineOne: mineOne, Stop: stop}
|
|
||||||
}
|
|
||||||
|
|
||||||
func storageBuilder(parentNode TestFullNode, mn mocknet.Mocknet, opts node.Option) MinerBuilder {
|
|
||||||
return func(ctx context.Context, t *testing.T, spt abi.RegisteredSealProof, owner address.Address) TestMiner {
|
|
||||||
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
minerPid, err := peer.IDFromPrivateKey(pk)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
params, serr := actors.SerializeParams(&power2.CreateMinerParams{
|
|
||||||
Owner: owner,
|
|
||||||
Worker: owner,
|
|
||||||
SealProofType: spt,
|
|
||||||
Peer: abi.PeerID(minerPid),
|
|
||||||
})
|
|
||||||
require.NoError(t, serr)
|
|
||||||
|
|
||||||
createStorageMinerMsg := &types.Message{
|
|
||||||
To: power.Address,
|
|
||||||
From: owner,
|
|
||||||
Value: big.Zero(),
|
|
||||||
|
|
||||||
Method: power.Methods.CreateMiner,
|
|
||||||
Params: params,
|
|
||||||
|
|
||||||
GasLimit: 0,
|
|
||||||
GasPremium: big.NewInt(5252),
|
|
||||||
}
|
|
||||||
|
|
||||||
signed, err := parentNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
mw, err := parentNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, exitcode.Ok, mw.Receipt.ExitCode)
|
|
||||||
|
|
||||||
var retval power2.CreateMinerReturn
|
|
||||||
err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return CreateTestStorageNode(ctx, t, owner, retval.IDAddress, pk, parentNode, mn, opts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Builder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
|
||||||
return mockBuilderOpts(t, fullOpts, storage, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func RPCBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
|
||||||
return mockBuilderOpts(t, fullOpts, storage, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func MockMinerBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
|
||||||
return mockMinerBuilderOpts(t, fullOpts, storage, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func RPCMockMinerBuilder(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner) {
|
|
||||||
return mockMinerBuilderOpts(t, fullOpts, storage, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mockBuilderOpts(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner, rpc bool) ([]TestFullNode, []TestMiner) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
t.Cleanup(cancel)
|
|
||||||
|
|
||||||
mn := mocknet.New(ctx)
|
|
||||||
|
|
||||||
fulls := make([]TestFullNode, len(fullOpts))
|
|
||||||
miners := make([]TestMiner, len(storage))
|
|
||||||
|
|
||||||
// *****
|
|
||||||
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
minerPid, err := peer.IDFromPrivateKey(pk)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var genbuf bytes.Buffer
|
|
||||||
|
|
||||||
if len(storage) > 1 {
|
|
||||||
panic("need more peer IDs")
|
|
||||||
}
|
|
||||||
// *****
|
|
||||||
|
|
||||||
// PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE
|
|
||||||
// TODO: would be great if there was a better way to fake the preseals
|
|
||||||
|
|
||||||
var (
|
|
||||||
genms []genesis.Miner
|
|
||||||
maddrs []address.Address
|
|
||||||
genaccs []genesis.Actor
|
|
||||||
keys []*wallet.Key
|
|
||||||
)
|
|
||||||
|
|
||||||
var presealDirs []string
|
|
||||||
for i := 0; i < len(storage); i++ {
|
|
||||||
maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
tdir, err := ioutil.TempDir("", "preseal-memgen")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
genm, k, err := seed.PreSeal(maddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, GenesisPreseals, tdir, []byte("make genesis mem random"), nil, true)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
genm.PeerId = minerPid
|
|
||||||
|
|
||||||
wk, err := wallet.NewKey(*k)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
genaccs = append(genaccs, genesis.Actor{
|
|
||||||
Type: genesis.TAccount,
|
|
||||||
Balance: big.Mul(big.NewInt(400000000), types.NewInt(build.FilecoinPrecision)),
|
|
||||||
Meta: (&genesis.AccountMeta{Owner: wk.Address}).ActorMeta(),
|
|
||||||
})
|
|
||||||
|
|
||||||
keys = append(keys, wk)
|
|
||||||
presealDirs = append(presealDirs, tdir)
|
|
||||||
maddrs = append(maddrs, maddr)
|
|
||||||
genms = append(genms, *genm)
|
|
||||||
}
|
|
||||||
|
|
||||||
rkhKey, err := wallet.GenerateKey(types.KTSecp256k1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
vrk := genesis.Actor{
|
|
||||||
Type: genesis.TAccount,
|
|
||||||
Balance: big.Mul(big.Div(big.NewInt(int64(build.FilBase)), big.NewInt(100)), big.NewInt(int64(build.FilecoinPrecision))),
|
|
||||||
Meta: (&genesis.AccountMeta{Owner: rkhKey.Address}).ActorMeta(),
|
|
||||||
}
|
|
||||||
keys = append(keys, rkhKey)
|
|
||||||
|
|
||||||
templ := &genesis.Template{
|
|
||||||
NetworkVersion: network.Version0,
|
|
||||||
Accounts: genaccs,
|
|
||||||
Miners: genms,
|
|
||||||
NetworkName: "test",
|
|
||||||
Timestamp: uint64(time.Now().Unix() - 10000), // some time sufficiently far in the past
|
|
||||||
VerifregRootKey: vrk,
|
|
||||||
RemainderAccount: gen.DefaultRemainderAccountActor,
|
|
||||||
}
|
|
||||||
|
|
||||||
// END PRESEAL SECTION
|
|
||||||
|
|
||||||
for i := 0; i < len(fullOpts); i++ {
|
|
||||||
var genesis node.Option
|
|
||||||
if i == 0 {
|
|
||||||
genesis = node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&genbuf, *templ))
|
|
||||||
} else {
|
|
||||||
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genbuf.Bytes()))
|
|
||||||
}
|
|
||||||
|
|
||||||
stop, err := node.New(ctx,
|
|
||||||
node.FullAPI(&fulls[i].FullNode, node.Lite(fullOpts[i].Lite)),
|
|
||||||
node.Online(),
|
|
||||||
node.Repo(repo.NewMemory(nil)),
|
|
||||||
node.MockHost(mn),
|
|
||||||
node.Test(),
|
|
||||||
|
|
||||||
genesis,
|
|
||||||
|
|
||||||
fullOpts[i].Opts(fulls),
|
|
||||||
)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Cleanup(func() { _ = stop(context.Background()) })
|
|
||||||
|
|
||||||
if rpc {
|
|
||||||
fulls[i] = fullRpc(t, fulls[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
fulls[i].Stb = storageBuilder(fulls[i], mn, node.Options())
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := fulls[0].FullNode.WalletImport(ctx, &rkhKey.KeyInfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, def := range storage {
|
|
||||||
// TODO: support non-bootstrap miners
|
|
||||||
if i != 0 {
|
|
||||||
t.Fatal("only one storage node supported")
|
|
||||||
}
|
|
||||||
if def.Full != 0 {
|
|
||||||
t.Fatal("storage nodes only supported on the first full node")
|
|
||||||
}
|
|
||||||
|
|
||||||
f := fulls[def.Full]
|
|
||||||
if _, err := f.FullNode.WalletImport(ctx, &keys[i].KeyInfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := f.FullNode.WalletSetDefault(ctx, keys[i].Address); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
genMiner := maddrs[i]
|
|
||||||
wa := genms[i].Worker
|
|
||||||
|
|
||||||
opts := def.Opts
|
|
||||||
if opts == nil {
|
|
||||||
opts = node.Options()
|
|
||||||
}
|
|
||||||
miners[i] = CreateTestStorageNode(ctx, t, wa, genMiner, pk, f, mn, opts)
|
|
||||||
if err := miners[i].StorageAddLocal(ctx, presealDirs[i]); err != nil {
|
|
||||||
t.Fatalf("%+v", err)
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
sma := miners[i].StorageMiner.(*impl.StorageMinerAPI)
|
|
||||||
|
|
||||||
psd := presealDirs[i]
|
|
||||||
*/
|
|
||||||
if rpc {
|
|
||||||
miners[i] = storerRpc(t, miners[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := mn.LinkAll(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(miners) > 0 {
|
|
||||||
// Mine 2 blocks to setup some CE stuff in some actors
|
|
||||||
var wait sync.Mutex
|
|
||||||
wait.Lock()
|
|
||||||
|
|
||||||
bm := NewBlockMiner(t, miners[0])
|
|
||||||
t.Cleanup(bm.Stop)
|
|
||||||
|
|
||||||
bm.MineUntilBlock(ctx, fulls[0], func(epoch abi.ChainEpoch) {
|
|
||||||
wait.Unlock()
|
|
||||||
})
|
|
||||||
|
|
||||||
wait.Lock()
|
|
||||||
bm.MineUntilBlock(ctx, fulls[0], func(epoch abi.ChainEpoch) {
|
|
||||||
wait.Unlock()
|
|
||||||
})
|
|
||||||
wait.Lock()
|
|
||||||
}
|
|
||||||
|
|
||||||
return fulls, miners
|
|
||||||
}
|
|
||||||
|
|
||||||
func mockMinerBuilderOpts(t *testing.T, fullOpts []FullNodeOpts, storage []StorageMiner, rpc bool) ([]TestFullNode, []TestMiner) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
t.Cleanup(cancel)
|
|
||||||
|
|
||||||
mn := mocknet.New(ctx)
|
|
||||||
|
|
||||||
fulls := make([]TestFullNode, len(fullOpts))
|
|
||||||
miners := make([]TestMiner, len(storage))
|
|
||||||
|
|
||||||
var genbuf bytes.Buffer
|
|
||||||
|
|
||||||
// PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE
|
|
||||||
// TODO: would be great if there was a better way to fake the preseals
|
|
||||||
|
|
||||||
var (
|
|
||||||
genms []genesis.Miner
|
|
||||||
genaccs []genesis.Actor
|
|
||||||
maddrs []address.Address
|
|
||||||
keys []*wallet.Key
|
|
||||||
pidKeys []crypto.PrivKey
|
|
||||||
)
|
|
||||||
for i := 0; i < len(storage); i++ {
|
|
||||||
maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
preseals := storage[i].Preseal
|
|
||||||
if preseals == PresealGenesis {
|
|
||||||
preseals = GenesisPreseals
|
|
||||||
}
|
|
||||||
|
|
||||||
genm, k, err := mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, maddr, preseals)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pk, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
minerPid, err := peer.IDFromPrivateKey(pk)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
genm.PeerId = minerPid
|
|
||||||
|
|
||||||
wk, err := wallet.NewKey(*k)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
genaccs = append(genaccs, genesis.Actor{
|
|
||||||
Type: genesis.TAccount,
|
|
||||||
Balance: big.Mul(big.NewInt(400000000), types.NewInt(build.FilecoinPrecision)),
|
|
||||||
Meta: (&genesis.AccountMeta{Owner: wk.Address}).ActorMeta(),
|
|
||||||
})
|
|
||||||
|
|
||||||
keys = append(keys, wk)
|
|
||||||
pidKeys = append(pidKeys, pk)
|
|
||||||
maddrs = append(maddrs, maddr)
|
|
||||||
genms = append(genms, *genm)
|
|
||||||
}
|
|
||||||
|
|
||||||
rkhKey, err := wallet.GenerateKey(types.KTSecp256k1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
vrk := genesis.Actor{
|
|
||||||
Type: genesis.TAccount,
|
|
||||||
Balance: big.Mul(big.Div(big.NewInt(int64(build.FilBase)), big.NewInt(100)), big.NewInt(int64(build.FilecoinPrecision))),
|
|
||||||
Meta: (&genesis.AccountMeta{Owner: rkhKey.Address}).ActorMeta(),
|
|
||||||
}
|
|
||||||
keys = append(keys, rkhKey)
|
|
||||||
|
|
||||||
templ := &genesis.Template{
|
|
||||||
NetworkVersion: network.Version0,
|
|
||||||
Accounts: genaccs,
|
|
||||||
Miners: genms,
|
|
||||||
NetworkName: "test",
|
|
||||||
Timestamp: uint64(time.Now().Unix()) - (build.BlockDelaySecs * 20000),
|
|
||||||
VerifregRootKey: vrk,
|
|
||||||
RemainderAccount: gen.DefaultRemainderAccountActor,
|
|
||||||
}
|
|
||||||
|
|
||||||
// END PRESEAL SECTION
|
|
||||||
|
|
||||||
for i := 0; i < len(fullOpts); i++ {
|
|
||||||
var genesis node.Option
|
|
||||||
if i == 0 {
|
|
||||||
genesis = node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&genbuf, *templ))
|
|
||||||
} else {
|
|
||||||
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genbuf.Bytes()))
|
|
||||||
}
|
|
||||||
|
|
||||||
stop, err := node.New(ctx,
|
|
||||||
node.FullAPI(&fulls[i].FullNode, node.Lite(fullOpts[i].Lite)),
|
|
||||||
node.Online(),
|
|
||||||
node.Repo(repo.NewMemory(nil)),
|
|
||||||
node.MockHost(mn),
|
|
||||||
node.Test(),
|
|
||||||
|
|
||||||
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
|
||||||
node.Override(new(ffiwrapper.Prover), mock.MockProver),
|
|
||||||
|
|
||||||
// so that we subscribe to pubsub topics immediately
|
|
||||||
node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)),
|
|
||||||
|
|
||||||
genesis,
|
|
||||||
|
|
||||||
fullOpts[i].Opts(fulls),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%+v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Cleanup(func() { _ = stop(context.Background()) })
|
|
||||||
|
|
||||||
if rpc {
|
|
||||||
fulls[i] = fullRpc(t, fulls[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
fulls[i].Stb = storageBuilder(fulls[i], mn, node.Options(
|
|
||||||
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
|
|
||||||
return mock.NewMockSectorMgr(nil), nil
|
|
||||||
}),
|
|
||||||
|
|
||||||
node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))),
|
|
||||||
node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))),
|
|
||||||
node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))),
|
|
||||||
node.Override(new(*sectorstorage.SealerConfig), func() *sectorstorage.SealerConfig {
|
|
||||||
scfg := config.DefaultStorageMiner()
|
|
||||||
scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled
|
|
||||||
return &scfg.Storage
|
|
||||||
}),
|
|
||||||
|
|
||||||
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
|
||||||
node.Override(new(ffiwrapper.Prover), mock.MockProver),
|
|
||||||
node.Unset(new(*sectorstorage.Manager)),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := fulls[0].FullNode.WalletImport(ctx, &rkhKey.KeyInfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, def := range storage {
|
|
||||||
// TODO: support non-bootstrap miners
|
|
||||||
|
|
||||||
minerID := abi.ActorID(genesis2.MinerStart + uint64(i))
|
|
||||||
|
|
||||||
if def.Full != 0 {
|
|
||||||
t.Fatal("storage nodes only supported on the first full node")
|
|
||||||
}
|
|
||||||
|
|
||||||
f := fulls[def.Full]
|
|
||||||
if _, err := f.FullNode.WalletImport(ctx, &keys[i].KeyInfo); err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if err := f.FullNode.WalletSetDefault(ctx, keys[i].Address); err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sectors := make([]abi.SectorID, len(genms[i].Sectors))
|
|
||||||
for i, sector := range genms[i].Sectors {
|
|
||||||
sectors[i] = abi.SectorID{
|
|
||||||
Miner: minerID,
|
|
||||||
Number: sector.SectorID,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := def.Opts
|
|
||||||
if opts == nil {
|
|
||||||
opts = node.Options()
|
|
||||||
}
|
|
||||||
miners[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options(
|
|
||||||
node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) {
|
|
||||||
return mock.NewMockSectorMgr(sectors), nil
|
|
||||||
}),
|
|
||||||
|
|
||||||
node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))),
|
|
||||||
node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))),
|
|
||||||
node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))),
|
|
||||||
|
|
||||||
node.Override(new(ffiwrapper.Verifier), mock.MockVerifier),
|
|
||||||
node.Override(new(ffiwrapper.Prover), mock.MockProver),
|
|
||||||
node.Unset(new(*sectorstorage.Manager)),
|
|
||||||
opts,
|
|
||||||
))
|
|
||||||
|
|
||||||
if rpc {
|
|
||||||
miners[i] = storerRpc(t, miners[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := mn.LinkAll(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
bm := NewBlockMiner(t, miners[0])
|
|
||||||
|
|
||||||
if len(miners) > 0 {
|
|
||||||
// Mine 2 blocks to setup some CE stuff in some actors
|
|
||||||
var wait sync.Mutex
|
|
||||||
wait.Lock()
|
|
||||||
|
|
||||||
bm.MineUntilBlock(ctx, fulls[0], func(abi.ChainEpoch) {
|
|
||||||
wait.Unlock()
|
|
||||||
})
|
|
||||||
wait.Lock()
|
|
||||||
bm.MineUntilBlock(ctx, fulls[0], func(abi.ChainEpoch) {
|
|
||||||
wait.Unlock()
|
|
||||||
})
|
|
||||||
wait.Lock()
|
|
||||||
}
|
|
||||||
|
|
||||||
return fulls, miners
|
|
||||||
}
|
|
||||||
|
|
||||||
func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) {
|
|
||||||
testServ := httptest.NewServer(handler)
|
|
||||||
t.Cleanup(testServ.Close)
|
|
||||||
t.Cleanup(testServ.CloseClientConnections)
|
|
||||||
|
|
||||||
addr := testServ.Listener.Addr()
|
|
||||||
maddr, err := manet.FromNetAddr(addr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return testServ, maddr
|
|
||||||
}
|
|
||||||
|
|
||||||
func fullRpc(t *testing.T, nd TestFullNode) TestFullNode {
|
|
||||||
handler, err := node.FullNodeHandler(nd.FullNode, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
srv, maddr := CreateRPCServer(t, handler)
|
|
||||||
|
|
||||||
var ret TestFullNode
|
|
||||||
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Cleanup(stop)
|
|
||||||
ret.ListenAddr, ret.FullNode = maddr, cl
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func storerRpc(t *testing.T, nd TestMiner) TestMiner {
|
|
||||||
handler, err := node.MinerHandler(nd.StorageMiner, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
srv, maddr := CreateRPCServer(t, handler)
|
|
||||||
|
|
||||||
var ret TestMiner
|
|
||||||
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Cleanup(stop)
|
|
||||||
|
|
||||||
ret.ListenAddr, ret.StorageMiner, ret.MineOne = maddr, cl, nd.MineOne
|
|
||||||
return ret
|
|
||||||
}
|
|
85
itests/kit/node_full.go
Normal file
85
itests/kit/node_full.go
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestFullNode represents a full node enrolled in an Ensemble.
|
||||||
|
type TestFullNode struct {
|
||||||
|
v1api.FullNode
|
||||||
|
|
||||||
|
t *testing.T
|
||||||
|
|
||||||
|
// ListenAddr is the address on which an API server is listening, if an
|
||||||
|
// API server is created for this Node.
|
||||||
|
ListenAddr multiaddr.Multiaddr
|
||||||
|
DefaultKey *wallet.Key
|
||||||
|
|
||||||
|
options nodeOpts
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateImportFile creates a random file with the specified seed and size, and
|
||||||
|
// imports it into the full node.
|
||||||
|
func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, path string) {
|
||||||
|
path = CreateRandomFile(f.t, rseed, size)
|
||||||
|
res, err := f.ClientImport(ctx, api.FileRef{Path: path})
|
||||||
|
require.NoError(f.t, err)
|
||||||
|
return res, path
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitTillChain waits until a specified chain condition is met. It returns
|
||||||
|
// the first tipset where the condition is met.
|
||||||
|
func (f *TestFullNode) WaitTillChain(ctx context.Context, pred ChainPredicate) *types.TipSet {
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
heads, err := f.ChainNotify(ctx)
|
||||||
|
require.NoError(f.t, err)
|
||||||
|
|
||||||
|
for chg := range heads {
|
||||||
|
for _, c := range chg {
|
||||||
|
if c.Type != "apply" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ts := c.Val; pred(ts) {
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.Fail(f.t, "chain condition not met")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainPredicate encapsulates a chain condition.
|
||||||
|
type ChainPredicate func(set *types.TipSet) bool
|
||||||
|
|
||||||
|
// HeightAtLeast returns a ChainPredicate that is satisfied when the chain
|
||||||
|
// height is equal or higher to the target.
|
||||||
|
func HeightAtLeast(target abi.ChainEpoch) ChainPredicate {
|
||||||
|
return func(ts *types.TipSet) bool {
|
||||||
|
return ts.Height() >= target
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockMinedBy returns a ChainPredicate that is satisfied when we observe the
|
||||||
|
// first block mined by the specified miner.
|
||||||
|
func BlockMinedBy(miner address.Address) ChainPredicate {
|
||||||
|
return func(ts *types.TipSet) bool {
|
||||||
|
for _, b := range ts.Blocks() {
|
||||||
|
if b.Miner == miner {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
121
itests/kit/node_miner.go
Normal file
121
itests/kit/node_miner.go
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
|
"github.com/filecoin-project/lotus/miner"
|
||||||
|
libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestMiner represents a miner enrolled in an Ensemble.
|
||||||
|
type TestMiner struct {
|
||||||
|
api.StorageMiner
|
||||||
|
|
||||||
|
t *testing.T
|
||||||
|
|
||||||
|
// ListenAddr is the address on which an API server is listening, if an
|
||||||
|
// API server is created for this Node
|
||||||
|
ListenAddr multiaddr.Multiaddr
|
||||||
|
|
||||||
|
ActorAddr address.Address
|
||||||
|
OwnerKey *wallet.Key
|
||||||
|
MineOne func(context.Context, miner.MineReq) error
|
||||||
|
Stop func(context.Context) error
|
||||||
|
|
||||||
|
FullNode *TestFullNode
|
||||||
|
PresealDir string
|
||||||
|
|
||||||
|
Libp2p struct {
|
||||||
|
PeerID peer.ID
|
||||||
|
PrivKey libp2pcrypto.PrivKey
|
||||||
|
}
|
||||||
|
|
||||||
|
options nodeOpts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNotif <-chan struct{}) {
|
||||||
|
toCheck := tm.StartPledge(ctx, n, existing, blockNotif)
|
||||||
|
|
||||||
|
for len(toCheck) > 0 {
|
||||||
|
tm.FlushSealingBatches(ctx)
|
||||||
|
|
||||||
|
states := map[api.SectorState]int{}
|
||||||
|
for n := range toCheck {
|
||||||
|
st, err := tm.StorageMiner.SectorsStatus(ctx, n, false)
|
||||||
|
require.NoError(tm.t, err)
|
||||||
|
states[st.State]++
|
||||||
|
if st.State == api.SectorState(sealing.Proving) {
|
||||||
|
delete(toCheck, n)
|
||||||
|
}
|
||||||
|
if strings.Contains(string(st.State), "Fail") {
|
||||||
|
tm.t.Fatal("sector in a failed state", st.State)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
build.Clock.Sleep(100 * time.Millisecond)
|
||||||
|
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *TestMiner) StartPledge(ctx context.Context, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} {
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
if i%3 == 0 && blockNotif != nil {
|
||||||
|
<-blockNotif
|
||||||
|
tm.t.Log("WAIT")
|
||||||
|
}
|
||||||
|
tm.t.Logf("PLEDGING %d", i)
|
||||||
|
_, err := tm.StorageMiner.PledgeSector(ctx)
|
||||||
|
require.NoError(tm.t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
s, err := tm.StorageMiner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
|
||||||
|
require.NoError(tm.t, err)
|
||||||
|
fmt.Printf("Sectors: %d\n", len(s))
|
||||||
|
if len(s) >= n+existing {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
build.Clock.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("All sectors is fsm\n")
|
||||||
|
|
||||||
|
s, err := tm.StorageMiner.SectorsList(ctx)
|
||||||
|
require.NoError(tm.t, err)
|
||||||
|
|
||||||
|
toCheck := map[abi.SectorNumber]struct{}{}
|
||||||
|
for _, number := range s {
|
||||||
|
toCheck[number] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return toCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *TestMiner) FlushSealingBatches(ctx context.Context) {
|
||||||
|
pcb, err := tm.StorageMiner.SectorPreCommitFlush(ctx)
|
||||||
|
require.NoError(tm.t, err)
|
||||||
|
if pcb != nil {
|
||||||
|
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
|
||||||
|
}
|
||||||
|
|
||||||
|
cb, err := tm.StorageMiner.SectorCommitFlush(ctx)
|
||||||
|
require.NoError(tm.t, err)
|
||||||
|
if cb != nil {
|
||||||
|
fmt.Printf("COMMIT BATCH: %+v\n", cb)
|
||||||
|
}
|
||||||
|
}
|
107
itests/kit/node_opts.go
Normal file
107
itests/kit/node_opts.go
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultPresealsPerBootstrapMiner is the number of preseals that every
|
||||||
|
// bootstrap miner has by default. It can be overridden through the
|
||||||
|
// PresealSectors option.
|
||||||
|
const DefaultPresealsPerBootstrapMiner = 2
|
||||||
|
|
||||||
|
const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||||
|
|
||||||
|
// nodeOpts is an options accumulating struct, where functional options are
|
||||||
|
// merged into.
|
||||||
|
type nodeOpts struct {
|
||||||
|
balance abi.TokenAmount
|
||||||
|
lite bool
|
||||||
|
sectors int
|
||||||
|
rpc bool
|
||||||
|
ownerKey *wallet.Key
|
||||||
|
extraNodeOpts []node.Option
|
||||||
|
optBuilders []OptBuilder
|
||||||
|
proofType abi.RegisteredSealProof
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultNodeOpts are the default options that will be applied to test nodes.
|
||||||
|
var DefaultNodeOpts = nodeOpts{
|
||||||
|
balance: big.Mul(big.NewInt(100000000), types.NewInt(build.FilecoinPrecision)),
|
||||||
|
sectors: DefaultPresealsPerBootstrapMiner,
|
||||||
|
proofType: abi.RegisteredSealProof_StackedDrg2KiBV1_1, // default _concrete_ proof type for non-genesis miners (notice the _1) for new actors versions.
|
||||||
|
}
|
||||||
|
|
||||||
|
// OptBuilder is used to create an option after some other node is already
|
||||||
|
// active. Takes all active nodes as a parameter.
|
||||||
|
type OptBuilder func(activeNodes []*TestFullNode) node.Option
|
||||||
|
|
||||||
|
// NodeOpt is a functional option for test nodes.
|
||||||
|
type NodeOpt func(opts *nodeOpts) error
|
||||||
|
|
||||||
|
// OwnerBalance specifies the balance to be attributed to a miner's owner
|
||||||
|
// account. Only relevant when creating a miner.
|
||||||
|
func OwnerBalance(balance abi.TokenAmount) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.balance = balance
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LiteNode specifies that this node will be a lite node. Only relevant when
|
||||||
|
// creating a fullnode.
|
||||||
|
func LiteNode() NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.lite = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PresealSectors specifies the amount of preseal sectors to give to a miner
|
||||||
|
// at genesis. Only relevant when creating a miner.
|
||||||
|
func PresealSectors(sectors int) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.sectors = sectors
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ThroughRPC makes interactions with this node throughout the test flow through
|
||||||
|
// the JSON-RPC API.
|
||||||
|
func ThroughRPC() NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.rpc = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OwnerAddr sets the owner address of a miner. Only relevant when creating
|
||||||
|
// a miner.
|
||||||
|
func OwnerAddr(wk *wallet.Key) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.ownerKey = wk
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConstructorOpts are Lotus node constructor options that are passed as-is to
|
||||||
|
// the node.
|
||||||
|
func ConstructorOpts(extra ...node.Option) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.extraNodeOpts = extra
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProofType sets the proof type for this node. If you're using new actor
|
||||||
|
// versions, this should be a _1 proof type.
|
||||||
|
func ProofType(proofType abi.RegisteredSealProof) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.proofType = proofType
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
90
itests/kit/node_opts_nv.go
Normal file
90
itests/kit/node_opts_nv.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultTestUpgradeSchedule
|
||||||
|
var DefaultTestUpgradeSchedule = stmgr.UpgradeSchedule{{
|
||||||
|
Network: network.Version9,
|
||||||
|
Height: 1,
|
||||||
|
Migration: stmgr.UpgradeActorsV2,
|
||||||
|
}, {
|
||||||
|
Network: network.Version10,
|
||||||
|
Height: 2,
|
||||||
|
Migration: stmgr.UpgradeActorsV3,
|
||||||
|
}, {
|
||||||
|
Network: network.Version12,
|
||||||
|
Height: 3,
|
||||||
|
Migration: stmgr.UpgradeActorsV4,
|
||||||
|
}, {
|
||||||
|
Network: network.Version13,
|
||||||
|
Height: 4,
|
||||||
|
Migration: stmgr.UpgradeActorsV5,
|
||||||
|
}}
|
||||||
|
|
||||||
|
func LatestActorsAt(upgradeHeight abi.ChainEpoch) node.Option {
|
||||||
|
// Attention: Update this when introducing new actor versions or your tests will be sad
|
||||||
|
return NetworkUpgradeAt(network.Version13, upgradeHeight)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstantaneousNetworkVersion starts the network instantaneously at the
|
||||||
|
// specified version in height 1.
|
||||||
|
func InstantaneousNetworkVersion(version network.Version) node.Option {
|
||||||
|
// composes all migration functions
|
||||||
|
var mf stmgr.MigrationFunc = func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error) {
|
||||||
|
var state = oldState
|
||||||
|
for _, u := range DefaultTestUpgradeSchedule {
|
||||||
|
if u.Network > version {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
state, err = u.Migration(ctx, sm, cache, cb, state, height, ts)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return state, nil
|
||||||
|
}
|
||||||
|
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{
|
||||||
|
{Network: version, Height: 1, Migration: mf},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func NetworkUpgradeAt(version network.Version, upgradeHeight abi.ChainEpoch) node.Option {
|
||||||
|
schedule := stmgr.UpgradeSchedule{}
|
||||||
|
for _, upgrade := range DefaultTestUpgradeSchedule {
|
||||||
|
if upgrade.Network > version {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
schedule = append(schedule, upgrade)
|
||||||
|
}
|
||||||
|
|
||||||
|
if upgradeHeight > 0 {
|
||||||
|
schedule[len(schedule)-1].Height = upgradeHeight
|
||||||
|
}
|
||||||
|
|
||||||
|
return node.Override(new(stmgr.UpgradeSchedule), schedule)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SDRUpgradeAt(calico, persian abi.ChainEpoch) node.Option {
|
||||||
|
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||||
|
Network: network.Version6,
|
||||||
|
Height: 1,
|
||||||
|
Migration: stmgr.UpgradeActorsV2,
|
||||||
|
}, {
|
||||||
|
Network: network.Version7,
|
||||||
|
Height: calico,
|
||||||
|
Migration: stmgr.UpgradeCalico,
|
||||||
|
}, {
|
||||||
|
Network: network.Version8,
|
||||||
|
Height: persian,
|
||||||
|
}})
|
||||||
|
}
|
@ -1,153 +0,0 @@
|
|||||||
package kit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/multiformats/go-multiaddr"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
|
|
||||||
lapi "github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/api/v1api"
|
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
|
||||||
"github.com/filecoin-project/lotus/miner"
|
|
||||||
"github.com/filecoin-project/lotus/node"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MinerBuilder func(context.Context, *testing.T, abi.RegisteredSealProof, address.Address) TestMiner
|
|
||||||
|
|
||||||
type TestFullNode struct {
|
|
||||||
v1api.FullNode
|
|
||||||
// ListenAddr is the address on which an API server is listening, if an
|
|
||||||
// API server is created for this Node
|
|
||||||
ListenAddr multiaddr.Multiaddr
|
|
||||||
|
|
||||||
Stb MinerBuilder
|
|
||||||
}
|
|
||||||
|
|
||||||
type TestMiner struct {
|
|
||||||
lapi.StorageMiner
|
|
||||||
// ListenAddr is the address on which an API server is listening, if an
|
|
||||||
// API server is created for this Node
|
|
||||||
ListenAddr multiaddr.Multiaddr
|
|
||||||
|
|
||||||
MineOne func(context.Context, miner.MineReq) error
|
|
||||||
Stop func(context.Context) error
|
|
||||||
}
|
|
||||||
|
|
||||||
var PresealGenesis = -1
|
|
||||||
|
|
||||||
const GenesisPreseals = 2
|
|
||||||
|
|
||||||
const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
|
||||||
|
|
||||||
// Options for setting up a mock storage Miner
|
|
||||||
type StorageMiner struct {
|
|
||||||
Full int
|
|
||||||
Opts node.Option
|
|
||||||
Preseal int
|
|
||||||
}
|
|
||||||
|
|
||||||
type OptionGenerator func([]TestFullNode) node.Option
|
|
||||||
|
|
||||||
// Options for setting up a mock full node
|
|
||||||
type FullNodeOpts struct {
|
|
||||||
Lite bool // run node in "lite" mode
|
|
||||||
Opts OptionGenerator // generate dependency injection options
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIBuilder is a function which is invoked in test suite to provide
|
|
||||||
// test nodes and networks
|
|
||||||
//
|
|
||||||
// fullOpts array defines options for each full node
|
|
||||||
// storage array defines storage nodes, numbers in the array specify full node
|
|
||||||
// index the storage node 'belongs' to
|
|
||||||
type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestFullNode, []TestMiner)
|
|
||||||
|
|
||||||
func DefaultFullOpts(nFull int) []FullNodeOpts {
|
|
||||||
full := make([]FullNodeOpts, nFull)
|
|
||||||
for i := range full {
|
|
||||||
full[i] = FullNodeOpts{
|
|
||||||
Opts: func(nodes []TestFullNode) node.Option {
|
|
||||||
return node.Options()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return full
|
|
||||||
}
|
|
||||||
|
|
||||||
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
|
||||||
var OneFull = DefaultFullOpts(1)
|
|
||||||
var TwoFull = DefaultFullOpts(2)
|
|
||||||
|
|
||||||
var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
|
||||||
// Attention: Update this when introducing new actor versions or your tests will be sad
|
|
||||||
return FullNodeWithNetworkUpgradeAt(network.Version13, upgradeHeight)
|
|
||||||
}
|
|
||||||
|
|
||||||
var FullNodeWithNetworkUpgradeAt = func(version network.Version, upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
|
||||||
fullSchedule := stmgr.UpgradeSchedule{{
|
|
||||||
// prepare for upgrade.
|
|
||||||
Network: network.Version9,
|
|
||||||
Height: 1,
|
|
||||||
Migration: stmgr.UpgradeActorsV2,
|
|
||||||
}, {
|
|
||||||
Network: network.Version10,
|
|
||||||
Height: 2,
|
|
||||||
Migration: stmgr.UpgradeActorsV3,
|
|
||||||
}, {
|
|
||||||
Network: network.Version12,
|
|
||||||
Height: 3,
|
|
||||||
Migration: stmgr.UpgradeActorsV4,
|
|
||||||
}, {
|
|
||||||
Network: network.Version13,
|
|
||||||
Height: 4,
|
|
||||||
Migration: stmgr.UpgradeActorsV5,
|
|
||||||
}}
|
|
||||||
|
|
||||||
schedule := stmgr.UpgradeSchedule{}
|
|
||||||
for _, upgrade := range fullSchedule {
|
|
||||||
if upgrade.Network > version {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
schedule = append(schedule, upgrade)
|
|
||||||
}
|
|
||||||
|
|
||||||
if upgradeHeight > 0 {
|
|
||||||
schedule[len(schedule)-1].Height = upgradeHeight
|
|
||||||
}
|
|
||||||
|
|
||||||
return FullNodeOpts{
|
|
||||||
Opts: func(nodes []TestFullNode) node.Option {
|
|
||||||
return node.Override(new(stmgr.UpgradeSchedule), schedule)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
|
|
||||||
return FullNodeOpts{
|
|
||||||
Opts: func(nodes []TestFullNode) node.Option {
|
|
||||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
|
||||||
Network: network.Version6,
|
|
||||||
Height: 1,
|
|
||||||
Migration: stmgr.UpgradeActorsV2,
|
|
||||||
}, {
|
|
||||||
Network: network.Version7,
|
|
||||||
Height: calico,
|
|
||||||
Migration: stmgr.UpgradeCalico,
|
|
||||||
}, {
|
|
||||||
Network: network.Version8,
|
|
||||||
Height: persian,
|
|
||||||
}})
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var MineNext = miner.MineReq{
|
|
||||||
InjectNulls: 0,
|
|
||||||
Done: func(bool, abi.ChainEpoch, error) {},
|
|
||||||
}
|
|
@ -1,88 +0,0 @@
|
|||||||
package kit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func PledgeSectors(t *testing.T, ctx context.Context, miner TestMiner, n, existing int, blockNotif <-chan struct{}) { //nolint:golint
|
|
||||||
toCheck := StartPledge(t, ctx, miner, n, existing, blockNotif)
|
|
||||||
|
|
||||||
for len(toCheck) > 0 {
|
|
||||||
flushSealingBatches(t, ctx, miner)
|
|
||||||
|
|
||||||
states := map[api.SectorState]int{}
|
|
||||||
for n := range toCheck {
|
|
||||||
st, err := miner.SectorsStatus(ctx, n, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
states[st.State]++
|
|
||||||
if st.State == api.SectorState(sealing.Proving) {
|
|
||||||
delete(toCheck, n)
|
|
||||||
}
|
|
||||||
if strings.Contains(string(st.State), "Fail") {
|
|
||||||
t.Fatal("sector in a failed state", st.State)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(100 * time.Millisecond)
|
|
||||||
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func flushSealingBatches(t *testing.T, ctx context.Context, miner TestMiner) { //nolint:golint
|
|
||||||
pcb, err := miner.SectorPreCommitFlush(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if pcb != nil {
|
|
||||||
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
|
|
||||||
}
|
|
||||||
|
|
||||||
cb, err := miner.SectorCommitFlush(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if cb != nil {
|
|
||||||
fmt.Printf("COMMIT BATCH: %+v\n", cb)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func StartPledge(t *testing.T, ctx context.Context, miner TestMiner, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { //nolint:golint
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
if i%3 == 0 && blockNotif != nil {
|
|
||||||
<-blockNotif
|
|
||||||
t.Log("WAIT")
|
|
||||||
}
|
|
||||||
t.Logf("PLEDGING %d", i)
|
|
||||||
_, err := miner.PledgeSector(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
|
|
||||||
require.NoError(t, err)
|
|
||||||
fmt.Printf("Sectors: %d\n", len(s))
|
|
||||||
if len(s) >= n+existing {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(100 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("All sectors is fsm\n")
|
|
||||||
|
|
||||||
s, err := miner.SectorsList(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
toCheck := map[abi.SectorNumber]struct{}{}
|
|
||||||
for _, number := range s {
|
|
||||||
toCheck[number] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return toCheck
|
|
||||||
}
|
|
53
itests/kit/rpc.go
Normal file
53
itests/kit/rpc.go
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api/client"
|
||||||
|
"github.com/filecoin-project/lotus/node"
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func CreateRPCServer(t *testing.T, handler http.Handler) (*httptest.Server, multiaddr.Multiaddr) {
|
||||||
|
testServ := httptest.NewServer(handler)
|
||||||
|
t.Cleanup(testServ.Close)
|
||||||
|
t.Cleanup(testServ.CloseClientConnections)
|
||||||
|
|
||||||
|
addr := testServ.Listener.Addr()
|
||||||
|
maddr, err := manet.FromNetAddr(addr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return testServ, maddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode {
|
||||||
|
handler, err := node.FullNodeHandler(f.FullNode, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
srv, maddr := CreateRPCServer(t, handler)
|
||||||
|
|
||||||
|
cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(stop)
|
||||||
|
f.ListenAddr, f.FullNode = maddr, cl
|
||||||
|
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func minerRpc(t *testing.T, m *TestMiner) *TestMiner {
|
||||||
|
handler, err := node.MinerHandler(m.StorageMiner, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
srv, maddr := CreateRPCServer(t, handler)
|
||||||
|
|
||||||
|
cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v0", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(stop)
|
||||||
|
|
||||||
|
m.ListenAddr, m.StorageMiner = maddr, cl
|
||||||
|
return m
|
||||||
|
}
|
@ -14,21 +14,21 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RunMultisigTests(t *testing.T, clientNode kit.TestFullNode) {
|
func RunMultisigTests(t *testing.T, client *kit.TestFullNode) {
|
||||||
// Create mock CLI
|
// Create mock CLI
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
||||||
clientCLI := mockCLI.Client(clientNode.ListenAddr)
|
clientCLI := mockCLI.Client(client.ListenAddr)
|
||||||
|
|
||||||
// Create some wallets on the node to use for testing multisig
|
// Create some wallets on the node to use for testing multisig
|
||||||
var walletAddrs []address.Address
|
var walletAddrs []address.Address
|
||||||
for i := 0; i < 4; i++ {
|
for i := 0; i < 4; i++ {
|
||||||
addr, err := clientNode.WalletNew(ctx, types.KTSecp256k1)
|
addr, err := client.WalletNew(ctx, types.KTSecp256k1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
walletAddrs = append(walletAddrs, addr)
|
walletAddrs = append(walletAddrs, addr)
|
||||||
|
|
||||||
kit.SendFunds(ctx, t, clientNode, addr, types.NewInt(1e15))
|
kit.SendFunds(ctx, t, client, addr, types.NewInt(1e15))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create an msig with three of the addresses and threshold of two sigs
|
// Create an msig with three of the addresses and threshold of two sigs
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
package itests
|
package itests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -12,12 +10,11 @@ import (
|
|||||||
|
|
||||||
// TestMultisig does a basic test to exercise the multisig CLI commands
|
// TestMultisig does a basic test to exercise the multisig CLI commands
|
||||||
func TestMultisig(t *testing.T) {
|
func TestMultisig(t *testing.T) {
|
||||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blockTime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
|
||||||
clientNode, _ := kit.StartOneNodeOneMiner(ctx, t, blocktime)
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
|
|
||||||
multisig.RunMultisigTests(t, clientNode)
|
multisig.RunMultisigTests(t, client)
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,6 @@ package itests
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -10,6 +9,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
@ -30,63 +30,45 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
|||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
n, sn := kit.MockMinerBuilder(t, kit.TwoFull, kit.OneMiner)
|
blockTime := 5 * time.Millisecond
|
||||||
|
|
||||||
paymentCreator := n[0]
|
var (
|
||||||
paymentReceiver := n[1]
|
paymentCreator kit.TestFullNode
|
||||||
miner := sn[0]
|
paymentReceiver kit.TestFullNode
|
||||||
|
miner kit.TestMiner
|
||||||
|
)
|
||||||
|
|
||||||
// get everyone connected
|
ens := kit.NewEnsemble(t, kit.MockProofs()).
|
||||||
addrs, err := paymentCreator.NetAddrsListen(ctx)
|
FullNode(&paymentCreator).
|
||||||
if err != nil {
|
FullNode(&paymentReceiver).
|
||||||
t.Fatal(err)
|
Miner(&miner, &paymentCreator).
|
||||||
}
|
Start().
|
||||||
|
InterconnectAll()
|
||||||
if err := paymentReceiver.NetConnect(ctx, addrs); err != nil {
|
bms := ens.BeginMining(blockTime)
|
||||||
t.Fatal(err)
|
bm := bms[0]
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrs); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// start mining blocks
|
|
||||||
bm := kit.NewBlockMiner(t, miner)
|
|
||||||
bm.MineBlocks(ctx, 5*time.Millisecond)
|
|
||||||
t.Cleanup(bm.Stop)
|
|
||||||
|
|
||||||
// send some funds to register the receiver
|
// send some funds to register the receiver
|
||||||
receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1)
|
receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
kit.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
|
kit.SendFunds(ctx, t, &paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
|
||||||
|
|
||||||
// setup the payment channel
|
// setup the payment channel
|
||||||
createrAddr, err := paymentCreator.WalletDefaultAddress(ctx)
|
createrAddr, err := paymentCreator.WalletDefaultAddress(ctx)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
channelAmt := int64(7000)
|
channelAmt := int64(7000)
|
||||||
channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt))
|
channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt))
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel)
|
channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// allocate three lanes
|
// allocate three lanes
|
||||||
var lanes []uint64
|
var lanes []uint64
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
lane, err := paymentCreator.PaychAllocateLane(ctx, channel)
|
lane, err := paymentCreator.PaychAllocateLane(ctx, channel)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
lanes = append(lanes, lane)
|
lanes = append(lanes, lane)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,45 +77,28 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
|||||||
// supersedes the voucher with a value of 1000
|
// supersedes the voucher with a value of 1000
|
||||||
for _, lane := range lanes {
|
for _, lane := range lanes {
|
||||||
vouch1, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), lane)
|
vouch1, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), lane)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.NotNil(t, vouch1.Voucher, "Not enough funds to create voucher: missing %d", vouch1.Shortfall)
|
||||||
}
|
|
||||||
if vouch1.Voucher == nil {
|
|
||||||
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch1.Shortfall))
|
|
||||||
}
|
|
||||||
vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane)
|
vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.NotNil(t, vouch2.Voucher, "Not enough funds to create voucher: missing %d", vouch2.Shortfall)
|
||||||
}
|
|
||||||
if vouch2.Voucher == nil {
|
|
||||||
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch2.Shortfall))
|
|
||||||
}
|
|
||||||
delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1.Voucher, nil, abi.NewTokenAmount(1000))
|
delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1.Voucher, nil, abi.NewTokenAmount(1000))
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.EqualValues(t, abi.NewTokenAmount(1000), delta1, "voucher didn't have the right amount")
|
||||||
}
|
|
||||||
if !delta1.Equals(abi.NewTokenAmount(1000)) {
|
|
||||||
t.Fatal("voucher didn't have the right amount")
|
|
||||||
}
|
|
||||||
delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2.Voucher, nil, abi.NewTokenAmount(1000))
|
delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2.Voucher, nil, abi.NewTokenAmount(1000))
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.EqualValues(t, abi.NewTokenAmount(1000), delta2, "voucher didn't have the right amount")
|
||||||
}
|
|
||||||
if !delta2.Equals(abi.NewTokenAmount(1000)) {
|
|
||||||
t.Fatal("voucher didn't have the right amount")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// settle the payment channel
|
// settle the payment channel
|
||||||
settleMsgCid, err := paymentCreator.PaychSettle(ctx, channel)
|
settleMsgCid, err := paymentCreator.PaychSettle(ctx, channel)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle")
|
res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle")
|
||||||
if res.Receipt.ExitCode != 0 {
|
require.EqualValues(t, 0, res.Receipt.ExitCode, "Unable to settle payment channel")
|
||||||
t.Fatal("Unable to settle payment channel")
|
|
||||||
}
|
|
||||||
|
|
||||||
creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(paymentCreator)))
|
creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(paymentCreator)))
|
||||||
|
|
||||||
@ -170,9 +135,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
|||||||
}, int(build.MessageConfidence)+1, build.Finality, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
|
}, int(build.MessageConfidence)+1, build.Finality, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
|
||||||
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
|
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
|
||||||
})
|
})
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-finished:
|
case <-finished:
|
||||||
@ -182,75 +145,49 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
|||||||
|
|
||||||
// Create a new voucher now that some vouchers have already been submitted
|
// Create a new voucher now that some vouchers have already been submitted
|
||||||
vouchRes, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), 3)
|
vouchRes, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), 3)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.NotNil(t, vouchRes.Voucher, "Not enough funds to create voucher: missing %d", vouchRes.Shortfall)
|
||||||
}
|
|
||||||
if vouchRes.Voucher == nil {
|
|
||||||
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouchRes.Shortfall))
|
|
||||||
}
|
|
||||||
vdelta, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouchRes.Voucher, nil, abi.NewTokenAmount(1000))
|
vdelta, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouchRes.Voucher, nil, abi.NewTokenAmount(1000))
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.EqualValues(t, abi.NewTokenAmount(1000), vdelta, "voucher didn't have the right amount")
|
||||||
}
|
|
||||||
if !vdelta.Equals(abi.NewTokenAmount(1000)) {
|
|
||||||
t.Fatal("voucher didn't have the right amount")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new voucher whose value would exceed the channel balance
|
// Create a new voucher whose value would exceed the channel balance
|
||||||
excessAmt := abi.NewTokenAmount(1000)
|
excessAmt := abi.NewTokenAmount(1000)
|
||||||
vouchRes, err = paymentCreator.PaychVoucherCreate(ctx, channel, excessAmt, 4)
|
vouchRes, err = paymentCreator.PaychVoucherCreate(ctx, channel, excessAmt, 4)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.Nil(t, vouchRes.Voucher, "Expected not to be able to create voucher whose value would exceed channel balance")
|
||||||
}
|
require.EqualValues(t, excessAmt, vouchRes.Shortfall, "Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall)
|
||||||
if vouchRes.Voucher != nil {
|
|
||||||
t.Fatal("Expected not to be able to create voucher whose value would exceed channel balance")
|
|
||||||
}
|
|
||||||
if !vouchRes.Shortfall.Equals(excessAmt) {
|
|
||||||
t.Fatal(fmt.Errorf("Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a voucher whose value would exceed the channel balance
|
// Add a voucher whose value would exceed the channel balance
|
||||||
vouch := &paych.SignedVoucher{ChannelAddr: channel, Amount: excessAmt, Lane: 4, Nonce: 1}
|
vouch := &paych.SignedVoucher{ChannelAddr: channel, Amount: excessAmt, Lane: 4, Nonce: 1}
|
||||||
vb, err := vouch.SigningBytes()
|
vb, err := vouch.SigningBytes()
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
sig, err := paymentCreator.WalletSign(ctx, createrAddr, vb)
|
sig, err := paymentCreator.WalletSign(ctx, createrAddr, vb)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
vouch.Signature = sig
|
vouch.Signature = sig
|
||||||
_, err = paymentReceiver.PaychVoucherAdd(ctx, channel, vouch, nil, abi.NewTokenAmount(1000))
|
_, err = paymentReceiver.PaychVoucherAdd(ctx, channel, vouch, nil, abi.NewTokenAmount(1000))
|
||||||
if err == nil {
|
require.Errorf(t, err, "Expected shortfall error of %d", excessAmt)
|
||||||
t.Fatal(fmt.Errorf("Expected shortfall error of %d", excessAmt))
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for the settlement period to pass before collecting
|
// wait for the settlement period to pass before collecting
|
||||||
waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, policy.PaychSettleDelay)
|
waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, policy.PaychSettleDelay)
|
||||||
|
|
||||||
creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
|
creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// collect funds (from receiver, though either party can do it)
|
// collect funds (from receiver, though either party can do it)
|
||||||
collectMsg, err := paymentReceiver.PaychCollect(ctx, channel)
|
collectMsg, err := paymentReceiver.PaychCollect(ctx, channel)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true)
|
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel")
|
||||||
}
|
|
||||||
if res.Receipt.ExitCode != 0 {
|
|
||||||
t.Fatal("unable to collect on payment channel")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, check the balance for the creator
|
// Finally, check the balance for the creator
|
||||||
currentCreatorBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
|
currentCreatorBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The highest nonce voucher that the creator sent on each lane is 2000
|
// The highest nonce voucher that the creator sent on each lane is 2000
|
||||||
totalVouchers := int64(len(lanes) * 2000)
|
totalVouchers := int64(len(lanes) * 2000)
|
||||||
@ -260,12 +197,7 @@ func TestPaymentChannelsAPI(t *testing.T) {
|
|||||||
// channel amount - total voucher value
|
// channel amount - total voucher value
|
||||||
expectedRefund := channelAmt - totalVouchers
|
expectedRefund := channelAmt - totalVouchers
|
||||||
delta := big.Sub(currentCreatorBalance, creatorPreCollectBalance)
|
delta := big.Sub(currentCreatorBalance, creatorPreCollectBalance)
|
||||||
if !delta.Equals(abi.NewTokenAmount(expectedRefund)) {
|
require.EqualValues(t, abi.NewTokenAmount(expectedRefund), delta, "did not send correct funds from creator: expected %d, got %d", expectedRefund, delta)
|
||||||
t.Fatalf("did not send correct funds from creator: expected %d, got %d", expectedRefund, delta)
|
|
||||||
}
|
|
||||||
|
|
||||||
// shut down mining
|
|
||||||
bm.Stop()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymentReceiver kit.TestFullNode, receiverAddr address.Address, count int) {
|
func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymentReceiver kit.TestFullNode, receiverAddr address.Address, count int) {
|
||||||
@ -286,14 +218,10 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymen
|
|||||||
From: receiverAddr,
|
From: receiverAddr,
|
||||||
Value: types.NewInt(0),
|
Value: types.NewInt(0),
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1, api.LookbackNoLimit, true)
|
_, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1, api.LookbackNoLimit, true)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -301,15 +229,12 @@ func waitForMessage(ctx context.Context, t *testing.T, paymentCreator kit.TestFu
|
|||||||
ctx, cancel := context.WithTimeout(ctx, duration)
|
ctx, cancel := context.WithTimeout(ctx, duration)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
fmt.Println("Waiting for", desc)
|
t.Log("Waiting for", desc)
|
||||||
|
|
||||||
res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1, api.LookbackNoLimit, true)
|
res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1, api.LookbackNoLimit, true)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
fmt.Println("Error waiting for", desc, err)
|
require.EqualValues(t, 0, res.Receipt.ExitCode, "did not successfully send %s", desc)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
t.Log("Confirmed", desc)
|
||||||
if res.Receipt.ExitCode != 0 {
|
|
||||||
t.Fatalf("did not successfully send %s", desc)
|
|
||||||
}
|
|
||||||
fmt.Println("Confirmed", desc)
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
@ -41,11 +41,12 @@ func TestPaymentChannelsBasic(t *testing.T) {
|
|||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
|
||||||
paymentCreator := nodes[0]
|
var (
|
||||||
paymentReceiver := nodes[1]
|
paymentCreator kit.TestFullNode
|
||||||
creatorAddr := addrs[0]
|
paymentReceiver kit.TestFullNode
|
||||||
receiverAddr := addrs[1]
|
)
|
||||||
|
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
|
||||||
|
|
||||||
// Create mock CLI
|
// Create mock CLI
|
||||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
||||||
@ -70,12 +71,16 @@ func TestPaymentChannelsBasic(t *testing.T) {
|
|||||||
// creator: paych settle <channel>
|
// creator: paych settle <channel>
|
||||||
creatorCLI.RunCmd("paych", "settle", chAddr.String())
|
creatorCLI.RunCmd("paych", "settle", chAddr.String())
|
||||||
|
|
||||||
|
t.Log("wait for chain to reach settle height")
|
||||||
|
|
||||||
// Wait for the chain to reach the settle height
|
// Wait for the chain to reach the settle height
|
||||||
chState := getPaychState(ctx, t, paymentReceiver, chAddr)
|
chState := getPaychState(ctx, t, paymentReceiver, chAddr)
|
||||||
sa, err := chState.SettlingAt()
|
sa, err := chState.SettlingAt()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
waitForHeight(ctx, t, paymentReceiver, sa)
|
waitForHeight(ctx, t, paymentReceiver, sa)
|
||||||
|
|
||||||
|
t.Log("settle height reached")
|
||||||
|
|
||||||
// receiver: paych collect <channel>
|
// receiver: paych collect <channel>
|
||||||
receiverCLI.RunCmd("paych", "collect", chAddr.String())
|
receiverCLI.RunCmd("paych", "collect", chAddr.String())
|
||||||
}
|
}
|
||||||
@ -93,10 +98,11 @@ func TestPaymentChannelStatus(t *testing.T) {
|
|||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
var (
|
||||||
paymentCreator := nodes[0]
|
paymentCreator kit.TestFullNode
|
||||||
creatorAddr := addrs[0]
|
paymentReceiver kit.TestFullNode
|
||||||
receiverAddr := addrs[1]
|
)
|
||||||
|
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
|
||||||
|
|
||||||
// Create mock CLI
|
// Create mock CLI
|
||||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
||||||
@ -172,11 +178,11 @@ func TestPaymentChannelVouchers(t *testing.T) {
|
|||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
var (
|
||||||
paymentCreator := nodes[0]
|
paymentCreator kit.TestFullNode
|
||||||
paymentReceiver := nodes[1]
|
paymentReceiver kit.TestFullNode
|
||||||
creatorAddr := addrs[0]
|
)
|
||||||
receiverAddr := addrs[1]
|
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
|
||||||
|
|
||||||
// Create mock CLI
|
// Create mock CLI
|
||||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
||||||
@ -304,10 +310,11 @@ func TestPaymentChannelVoucherCreateShortfall(t *testing.T) {
|
|||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
nodes, addrs := kit.StartTwoNodesOneMiner(ctx, t, blocktime)
|
var (
|
||||||
paymentCreator := nodes[0]
|
paymentCreator kit.TestFullNode
|
||||||
creatorAddr := addrs[0]
|
paymentReceiver kit.TestFullNode
|
||||||
receiverAddr := addrs[1]
|
)
|
||||||
|
creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime)
|
||||||
|
|
||||||
// Create mock CLI
|
// Create mock CLI
|
||||||
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
mockCLI := kit.NewMockCLI(ctx, t, cli.Commands)
|
||||||
@ -406,3 +413,25 @@ func getPaychState(ctx context.Context, t *testing.T, node kit.TestFullNode, chA
|
|||||||
|
|
||||||
return chState
|
return chState
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func startPaychCreatorReceiverMiner(ctx context.Context, t *testing.T, paymentCreator *kit.TestFullNode, paymentReceiver *kit.TestFullNode, blocktime time.Duration) (address.Address, address.Address) {
|
||||||
|
var miner kit.TestMiner
|
||||||
|
opts := kit.ThroughRPC()
|
||||||
|
kit.NewEnsemble(t, kit.MockProofs()).
|
||||||
|
FullNode(paymentCreator, opts).
|
||||||
|
FullNode(paymentReceiver, opts).
|
||||||
|
Miner(&miner, paymentCreator).
|
||||||
|
Start().
|
||||||
|
InterconnectAll().
|
||||||
|
BeginMining(blocktime)
|
||||||
|
|
||||||
|
// Send some funds to the second node
|
||||||
|
receiverAddr, err := paymentReceiver.WalletDefaultAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
kit.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
|
||||||
|
|
||||||
|
// Get the first node's address
|
||||||
|
creatorAddr, err := paymentCreator.WalletDefaultAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return creatorAddr, receiverAddr
|
||||||
|
}
|
||||||
|
@ -12,7 +12,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
bminer "github.com/filecoin-project/lotus/miner"
|
bminer "github.com/filecoin-project/lotus/miner"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@ -31,18 +30,10 @@ func TestSDRUpgrade(t *testing.T) {
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithSDRAt(500, 1000)}, kit.OneMiner)
|
opts := kit.ConstructorOpts(kit.SDRUpgradeAt(500, 1000))
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
|
||||||
miner := sn[0]
|
ens.InterconnectAll()
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
build.Clock.Sleep(time.Second)
|
||||||
|
|
||||||
pledge := make(chan struct{})
|
pledge := make(chan struct{})
|
||||||
@ -53,7 +44,7 @@ func TestSDRUpgrade(t *testing.T) {
|
|||||||
round := 0
|
round := 0
|
||||||
for atomic.LoadInt64(&mine) != 0 {
|
for atomic.LoadInt64(&mine) != 0 {
|
||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
if err := miner.MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||||
|
|
||||||
}}); err != nil {
|
}}); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
@ -88,7 +79,7 @@ func TestSDRUpgrade(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// before.
|
// before.
|
||||||
kit.PledgeSectors(t, ctx, miner, 9, 0, pledge)
|
miner.PledgeSectors(ctx, 9, 0, pledge)
|
||||||
|
|
||||||
s, err := miner.SectorsList(ctx)
|
s, err := miner.SectorsList(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -4,70 +4,38 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
|
||||||
bminer "github.com/filecoin-project/lotus/miner"
|
|
||||||
"github.com/filecoin-project/lotus/node"
|
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPledgeSectors(t *testing.T) {
|
func TestPledgeSectors(t *testing.T) {
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
|
blockTime := 50 * time.Millisecond
|
||||||
|
|
||||||
|
runTest := func(t *testing.T, nSectors int) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
n, sn := b(t, kit.OneFull, kit.OneMiner)
|
_, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
miner.PledgeSectors(ctx, nSectors, 0, nil)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for atomic.LoadInt64(&mine) != 0 {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
|
||||||
|
|
||||||
}}); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
|
|
||||||
|
|
||||||
atomic.StoreInt64(&mine, 0)
|
|
||||||
<-done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("1", func(t *testing.T) {
|
t.Run("1", func(t *testing.T) {
|
||||||
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 1)
|
runTest(t, 1)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("100", func(t *testing.T) {
|
t.Run("100", func(t *testing.T) {
|
||||||
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
|
runTest(t, 100)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("1000", func(t *testing.T) {
|
t.Run("1000", func(t *testing.T) {
|
||||||
@ -75,52 +43,24 @@ func TestPledgeSectors(t *testing.T) {
|
|||||||
t.Skip("skipping test in short mode")
|
t.Skip("skipping test in short mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 1000)
|
runTest(t, 1000)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPledgeBatching(t *testing.T) {
|
func TestPledgeBatching(t *testing.T) {
|
||||||
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
|
blockTime := 50 * time.Millisecond
|
||||||
|
|
||||||
|
runTest := func(t *testing.T, nSectors int) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
|
opts := kit.ConstructorOpts(kit.LatestActorsAt(-1))
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
|
||||||
miner := sn[0]
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
client.WaitTillChain(ctx, kit.HeightAtLeast(10))
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
toCheck := miner.StartPledge(ctx, nSectors, 0, nil)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for atomic.LoadInt64(&mine) != 0 {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
|
||||||
|
|
||||||
}}); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
h, err := client.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if h.Height() > 10 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
toCheck := kit.StartPledge(t, ctx, miner, nSectors, 0, nil)
|
|
||||||
|
|
||||||
for len(toCheck) > 0 {
|
for len(toCheck) > 0 {
|
||||||
states := map[api.SectorState]int{}
|
states := map[api.SectorState]int{}
|
||||||
@ -157,80 +97,27 @@ func TestPledgeBatching(t *testing.T) {
|
|||||||
build.Clock.Sleep(100 * time.Millisecond)
|
build.Clock.Sleep(100 * time.Millisecond)
|
||||||
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreInt64(&mine, 0)
|
|
||||||
<-done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("100", func(t *testing.T) {
|
t.Run("100", func(t *testing.T) {
|
||||||
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
|
runTest(t, 100)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPledgeBeforeNv13(t *testing.T) {
|
func TestPledgeBeforeNv13(t *testing.T) {
|
||||||
runTest := func(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int) {
|
blocktime := 50 * time.Millisecond
|
||||||
|
|
||||||
|
runTest := func(t *testing.T, nSectors int) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
n, sn := b(t, []kit.FullNodeOpts{
|
opts := kit.ConstructorOpts(kit.LatestActorsAt(1000000000))
|
||||||
{
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
|
||||||
Opts: func(nodes []kit.TestFullNode) node.Option {
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
|
||||||
Network: network.Version9,
|
|
||||||
Height: 1,
|
|
||||||
Migration: stmgr.UpgradeActorsV2,
|
|
||||||
}, {
|
|
||||||
Network: network.Version10,
|
|
||||||
Height: 2,
|
|
||||||
Migration: stmgr.UpgradeActorsV3,
|
|
||||||
}, {
|
|
||||||
Network: network.Version12,
|
|
||||||
Height: 3,
|
|
||||||
Migration: stmgr.UpgradeActorsV4,
|
|
||||||
}, {
|
|
||||||
Network: network.Version13,
|
|
||||||
Height: 1000000000,
|
|
||||||
Migration: stmgr.UpgradeActorsV5,
|
|
||||||
}})
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, kit.OneMiner)
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
client.WaitTillChain(ctx, kit.HeightAtLeast(10))
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
toCheck := miner.StartPledge(ctx, nSectors, 0, nil)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for atomic.LoadInt64(&mine) != 0 {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
|
||||||
|
|
||||||
}}); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
h, err := client.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if h.Height() > 10 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
toCheck := kit.StartPledge(t, ctx, miner, nSectors, 0, nil)
|
|
||||||
|
|
||||||
for len(toCheck) > 0 {
|
for len(toCheck) > 0 {
|
||||||
states := map[api.SectorState]int{}
|
states := map[api.SectorState]int{}
|
||||||
@ -250,12 +137,9 @@ func TestPledgeBeforeNv13(t *testing.T) {
|
|||||||
build.Clock.Sleep(100 * time.Millisecond)
|
build.Clock.Sleep(100 * time.Millisecond)
|
||||||
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreInt64(&mine, 0)
|
|
||||||
<-done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("100-before-nv13", func(t *testing.T) {
|
t.Run("100-before-nv13", func(t *testing.T) {
|
||||||
runTest(t, kit.MockMinerBuilder, 50*time.Millisecond, 100)
|
runTest(t, 100)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -2,17 +2,14 @@ package itests
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -21,49 +18,15 @@ func TestTerminate(t *testing.T) {
|
|||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
const blocktime = 2 * time.Millisecond
|
var (
|
||||||
|
blocktime = 2 * time.Millisecond
|
||||||
nSectors := uint64(2)
|
nSectors = 2
|
||||||
|
ctx = context.Background()
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
n, sn := kit.MockMinerBuilder(t,
|
|
||||||
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
|
|
||||||
[]kit.StorageMiner{{Full: 0, Preseal: int(nSectors)}},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
opts := kit.ConstructorOpts(kit.LatestActorsAt(-1))
|
||||||
miner := sn[0]
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.PresealSectors(nSectors), opts)
|
||||||
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for ctx.Err() == nil {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
// context was canceled, ignore the error.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
cancel()
|
|
||||||
<-done
|
|
||||||
}()
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -74,31 +37,24 @@ func TestTerminate(t *testing.T) {
|
|||||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors)))
|
||||||
|
|
||||||
fmt.Printf("Seal a sector\n")
|
t.Log("Seal a sector")
|
||||||
|
|
||||||
kit.PledgeSectors(t, ctx, miner, 1, 0, nil)
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||||
|
|
||||||
fmt.Printf("wait for power\n")
|
t.Log("wait for power")
|
||||||
|
|
||||||
{
|
{
|
||||||
// Wait until proven.
|
// Wait until proven.
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
|
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 20 // 20 is some slack for the proof to be submitted + applied
|
||||||
fmt.Printf("End for head.Height > %d\n", waitUntil)
|
t.Logf("End for head.Height > %d", waitUntil)
|
||||||
|
|
||||||
for {
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
head, err := client.ChainHead(ctx)
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > waitUntil {
|
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nSectors++
|
nSectors++
|
||||||
@ -106,9 +62,9 @@ func TestTerminate(t *testing.T) {
|
|||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors))
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors)))
|
||||||
|
|
||||||
fmt.Println("Terminate a sector")
|
t.Log("Terminate a sector")
|
||||||
|
|
||||||
toTerminate := abi.SectorNumber(3)
|
toTerminate := abi.SectorNumber(3)
|
||||||
|
|
||||||
@ -121,7 +77,7 @@ loop:
|
|||||||
si, err := miner.SectorsStatus(ctx, toTerminate, false)
|
si, err := miner.SectorsStatus(ctx, toTerminate, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Println("state: ", si.State, msgTriggerred)
|
t.Log("state: ", si.State, msgTriggerred)
|
||||||
|
|
||||||
switch sealing.SectorState(si.State) {
|
switch sealing.SectorState(si.State) {
|
||||||
case sealing.Terminating:
|
case sealing.Terminating:
|
||||||
@ -137,7 +93,7 @@ loop:
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if c != nil {
|
if c != nil {
|
||||||
msgTriggerred = true
|
msgTriggerred = true
|
||||||
fmt.Println("terminate message:", c)
|
t.Log("terminate message:", c)
|
||||||
|
|
||||||
{
|
{
|
||||||
p, err := miner.SectorTerminatePending(ctx)
|
p, err := miner.SectorTerminatePending(ctx)
|
||||||
@ -153,11 +109,14 @@ loop:
|
|||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// need to wait for message to be mined and applied.
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
// check power decreased
|
// check power decreased
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
|
require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower)
|
||||||
|
|
||||||
// check in terminated set
|
// check in terminated set
|
||||||
{
|
{
|
||||||
@ -177,22 +136,15 @@ loop:
|
|||||||
|
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for {
|
|
||||||
head, err := client.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
|
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 20 // slack like above
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
t.Logf("End for head.Height > %d", waitUntil)
|
||||||
break
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
}
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
|
||||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
|
||||||
|
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
|
require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower)
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,6 @@ package itests
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -13,7 +12,6 @@ import (
|
|||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/filecoin-project/lotus/node"
|
"github.com/filecoin-project/lotus/node"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -25,10 +23,10 @@ func TestTapeFix(t *testing.T) {
|
|||||||
// The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case
|
// The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case
|
||||||
// TODO: Make the mock sector size configurable and reenable this
|
// TODO: Make the mock sector size configurable and reenable this
|
||||||
// t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
|
// t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
|
||||||
t.Run("after", func(t *testing.T) { testTapeFix(t, kit.MockMinerBuilder, blocktime, true) })
|
t.Run("after", func(t *testing.T) { testTapeFix(t, blocktime, true) })
|
||||||
}
|
}
|
||||||
|
|
||||||
func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after bool) {
|
func testTapeFix(t *testing.T, blocktime time.Duration, after bool) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -44,46 +42,14 @@ func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
n, sn := b(t, []kit.FullNodeOpts{{Opts: func(_ []kit.TestFullNode) node.Option {
|
nopts := kit.ConstructorOpts(node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule))
|
||||||
return node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule)
|
_, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), nopts)
|
||||||
}}}, kit.OneMiner)
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for ctx.Err() == nil {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
// context was canceled, ignore the error.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
cancel()
|
|
||||||
<-done
|
|
||||||
}()
|
|
||||||
|
|
||||||
sid, err := miner.PledgeSector(ctx)
|
sid, err := miner.PledgeSector(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Printf("All sectors is fsm\n")
|
t.Log("All sectors is fsm")
|
||||||
|
|
||||||
// If before, we expect the precommit to fail
|
// If before, we expect the precommit to fail
|
||||||
successState := api.SectorState(sealing.CommitFailed)
|
successState := api.SectorState(sealing.CommitFailed)
|
||||||
@ -101,6 +67,6 @@ func testTapeFix(t *testing.T, b kit.APIBuilder, blocktime time.Duration, after
|
|||||||
}
|
}
|
||||||
require.NotEqual(t, failureState, st.State)
|
require.NotEqual(t, failureState, st.State)
|
||||||
build.Clock.Sleep(100 * time.Millisecond)
|
build.Clock.Sleep(100 * time.Millisecond)
|
||||||
fmt.Println("WaitSeal")
|
t.Log("WaitSeal")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,122 +2,127 @@ package itests
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
lapi "github.com/filecoin-project/lotus/api"
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/node/impl"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestVerifiedClientTopUp(t *testing.T) {
|
func TestVerifiedClientTopUp(t *testing.T) {
|
||||||
|
blockTime := 100 * time.Millisecond
|
||||||
|
|
||||||
test := func(nv network.Version, shouldWork bool) func(*testing.T) {
|
test := func(nv network.Version, shouldWork bool) func(*testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
nodes, miners := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithNetworkUpgradeAt(nv, -1)}, kit.OneMiner)
|
rootKey, err := wallet.GenerateKey(types.KTSecp256k1)
|
||||||
api := nodes[0].FullNode.(*impl.FullNodeAPI)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
verifierKey, err := wallet.GenerateKey(types.KTSecp256k1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
verifiedClientKey, err := wallet.GenerateKey(types.KTBLS)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bal, err := types.ParseFIL("100fil")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
node, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(),
|
||||||
|
kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())),
|
||||||
|
kit.Account(verifierKey, abi.NewTokenAmount(bal.Int64())), // assign some balance to the verifier so they can send an AddClient message.
|
||||||
|
kit.ConstructorOpts(kit.InstantaneousNetworkVersion(nv)))
|
||||||
|
|
||||||
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
|
|
||||||
|
api := node.FullNode.(*impl.FullNodeAPI)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
//Get VRH
|
// get VRH
|
||||||
vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
|
vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
|
||||||
if err != nil {
|
fmt.Println(vrh.String())
|
||||||
t.Fatal(err)
|
require.NoError(t, err)
|
||||||
}
|
|
||||||
|
|
||||||
//Add verifier
|
// import the root key.
|
||||||
verifier, err := api.WalletDefaultAddress(ctx)
|
rootAddr, err := api.WalletImport(ctx, &rootKey.KeyInfo)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
// import the verifier's key.
|
||||||
|
verifierAddr, err := api.WalletImport(ctx, &verifierKey.KeyInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// import the verified client's key.
|
||||||
|
verifiedClientAddr, err := api.WalletImport(ctx, &verifiedClientKey.KeyInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifierAddr, Allowance: big.NewInt(100000000000)})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifier, Allowance: big.NewInt(100000000000)})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
msg := &types.Message{
|
msg := &types.Message{
|
||||||
|
From: rootAddr,
|
||||||
To: verifreg.Address,
|
To: verifreg.Address,
|
||||||
From: vrh,
|
|
||||||
Method: verifreg.Methods.AddVerifier,
|
Method: verifreg.Methods.AddVerifier,
|
||||||
Params: params,
|
Params: params,
|
||||||
Value: big.Zero(),
|
Value: big.Zero(),
|
||||||
}
|
}
|
||||||
|
|
||||||
bm := kit.NewBlockMiner(t, miners[0])
|
|
||||||
bm.MineBlocks(ctx, 100*time.Millisecond)
|
|
||||||
t.Cleanup(bm.Stop)
|
|
||||||
|
|
||||||
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
||||||
if err != nil {
|
require.NoError(t, err, "AddVerifier failed")
|
||||||
t.Fatal("AddVerifier failed: ", err)
|
|
||||||
}
|
|
||||||
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
}
|
|
||||||
if res.Receipt.ExitCode != 0 {
|
|
||||||
t.Fatal("did not successfully send message")
|
|
||||||
}
|
|
||||||
|
|
||||||
//Assign datacap to a client
|
// assign datacap to a client
|
||||||
datacap := big.NewInt(10000)
|
datacap := big.NewInt(10000)
|
||||||
clientAddress, err := api.WalletNew(ctx, types.KTBLS)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
|
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap})
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msg = &types.Message{
|
msg = &types.Message{
|
||||||
|
From: verifierAddr,
|
||||||
To: verifreg.Address,
|
To: verifreg.Address,
|
||||||
From: verifier,
|
|
||||||
Method: verifreg.Methods.AddVerifiedClient,
|
Method: verifreg.Methods.AddVerifiedClient,
|
||||||
Params: params,
|
Params: params,
|
||||||
Value: big.Zero(),
|
Value: big.Zero(),
|
||||||
}
|
}
|
||||||
|
|
||||||
sm, err = api.MpoolPushMessage(ctx, msg, nil)
|
sm, err = api.MpoolPushMessage(ctx, msg, nil)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal("AddVerifiedClient faield: ", err)
|
|
||||||
}
|
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
||||||
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
|
require.NoError(t, err)
|
||||||
if err != nil {
|
require.EqualValues(t, 0, res.Receipt.ExitCode)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
// check datacap balance
|
||||||
if res.Receipt.ExitCode != 0 {
|
dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
|
||||||
t.Fatal("did not successfully send message")
|
require.NoError(t, err)
|
||||||
}
|
|
||||||
|
|
||||||
//check datacap balance
|
|
||||||
dcap, err := api.StateVerifiedClientStatus(ctx, clientAddress, types.EmptyTSK)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !dcap.Equals(datacap) {
|
if !dcap.Equals(datacap) {
|
||||||
t.Fatal("")
|
t.Fatal("")
|
||||||
}
|
}
|
||||||
|
|
||||||
//try to assign datacap to the same client should fail for actor v4 and below
|
// try to assign datacap to the same client should fail for actor v4 and below
|
||||||
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
|
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
msg = &types.Message{
|
msg = &types.Message{
|
||||||
|
From: verifierAddr,
|
||||||
To: verifreg.Address,
|
To: verifreg.Address,
|
||||||
From: verifier,
|
|
||||||
Method: verifreg.Methods.AddVerifiedClient,
|
Method: verifreg.Methods.AddVerifiedClient,
|
||||||
Params: params,
|
Params: params,
|
||||||
Value: big.Zero(),
|
Value: big.Zero(),
|
||||||
|
@ -2,7 +2,6 @@ package itests
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -16,7 +15,6 @@ import (
|
|||||||
minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof"
|
proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@ -26,71 +24,38 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
b := kit.MockMinerBuilder
|
|
||||||
blocktime := 2 * time.Millisecond
|
blocktime := 2 * time.Millisecond
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
var (
|
||||||
|
client kit.TestFullNode
|
||||||
|
chainMiner kit.TestMiner
|
||||||
|
evilMiner kit.TestMiner
|
||||||
|
)
|
||||||
|
|
||||||
// First, we configure two miners. After sealing, we're going to turn off the first miner so
|
// First, we configure two miners. After sealing, we're going to turn off the first miner so
|
||||||
// it doesn't submit proofs.
|
// it doesn't submit proofs.
|
||||||
//
|
//
|
||||||
// Then we're going to manually submit bad proofs.
|
// Then we're going to manually submit bad proofs.
|
||||||
n, sn := b(t,
|
opts := kit.ConstructorOpts(kit.LatestActorsAt(-1))
|
||||||
[]kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)},
|
ens := kit.NewEnsemble(t, kit.MockProofs()).
|
||||||
[]kit.StorageMiner{
|
FullNode(&client, opts).
|
||||||
{Full: 0, Preseal: kit.PresealGenesis},
|
Miner(&chainMiner, &client, opts).
|
||||||
{Full: 0},
|
Miner(&evilMiner, &client, opts, kit.PresealSectors(0)).
|
||||||
})
|
Start()
|
||||||
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
chainMiner := sn[0]
|
|
||||||
evilMiner := sn[1]
|
|
||||||
|
|
||||||
{
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := chainMiner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := evilMiner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
// Mine with the _second_ node (the good one).
|
// Mine with the _second_ node (the good one).
|
||||||
done := make(chan struct{})
|
ens.InterconnectAll().BeginMining(blocktime, &chainMiner)
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for ctx.Err() == nil {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := chainMiner.MineOne(ctx, kit.MineNext); err != nil {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
// context was canceled, ignore the error.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
cancel()
|
|
||||||
<-done
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Give the chain miner enough sectors to win every block.
|
// Give the chain miner enough sectors to win every block.
|
||||||
kit.PledgeSectors(t, ctx, chainMiner, 10, 0, nil)
|
chainMiner.PledgeSectors(ctx, 10, 0, nil)
|
||||||
// And the evil one 1 sector. No cookie for you.
|
// And the evil one 1 sector. No cookie for you.
|
||||||
kit.PledgeSectors(t, ctx, evilMiner, 1, 0, nil)
|
evilMiner.PledgeSectors(ctx, 1, 0, nil)
|
||||||
|
|
||||||
// Let the evil miner's sectors gain power.
|
// Let the evil miner's sectors gain power.
|
||||||
evilMinerAddr, err := evilMiner.ActorAddress(ctx)
|
evilMinerAddr, err := evilMiner.ActorAddress(ctx)
|
||||||
@ -99,19 +64,13 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Printf("Running one proving period\n")
|
t.Logf("Running one proving period\n")
|
||||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
|
|
||||||
|
|
||||||
for {
|
waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1
|
||||||
head, err := client.ChainHead(ctx)
|
t.Logf("End for head.Height > %d", waitUntil)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
break
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -128,12 +87,12 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK)
|
evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Println("evil miner stopping")
|
t.Log("evil miner stopping")
|
||||||
|
|
||||||
// Now stop the evil miner, and start manually submitting bad proofs.
|
// Now stop the evil miner, and start manually submitting bad proofs.
|
||||||
require.NoError(t, evilMiner.Stop(ctx))
|
require.NoError(t, evilMiner.Stop(ctx))
|
||||||
|
|
||||||
fmt.Println("evil miner stopped")
|
t.Log("evil miner stopped")
|
||||||
|
|
||||||
// Wait until we need to prove our sector.
|
// Wait until we need to prove our sector.
|
||||||
for {
|
for {
|
||||||
@ -145,7 +104,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
|
err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
|
||||||
require.NoError(t, err, "evil proof not accepted")
|
require.NoError(t, err, "evil proof not accepted")
|
||||||
|
|
||||||
// Wait until after the proving period.
|
// Wait until after the proving period.
|
||||||
@ -158,7 +117,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("accepted evil proof")
|
t.Log("accepted evil proof")
|
||||||
|
|
||||||
// Make sure the evil node didn't lose any power.
|
// Make sure the evil node didn't lose any power.
|
||||||
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||||
@ -185,7 +144,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Println("waiting dispute")
|
t.Log("waiting dispute")
|
||||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
|
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
|
||||||
@ -243,7 +202,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Now try to be evil again
|
// Now try to be evil again
|
||||||
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
|
err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Contains(t, err.Error(), "message execution failed: exit 16, reason: window post failed: invalid PoSt")
|
require.Contains(t, err.Error(), "message execution failed: exit 16, reason: window post failed: invalid PoSt")
|
||||||
|
|
||||||
@ -255,27 +214,14 @@ func TestWindowPostDisputeFails(t *testing.T) {
|
|||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
b := kit.MockMinerBuilder
|
|
||||||
blocktime := 2 * time.Millisecond
|
blocktime := 2 * time.Millisecond
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
|
opts := kit.ConstructorOpts(kit.LatestActorsAt(-1))
|
||||||
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
{
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -285,48 +231,21 @@ func TestWindowPostDisputeFails(t *testing.T) {
|
|||||||
|
|
||||||
build.Clock.Sleep(time.Second)
|
build.Clock.Sleep(time.Second)
|
||||||
|
|
||||||
// Mine with the _second_ node (the good one).
|
miner.PledgeSectors(ctx, 10, 0, nil)
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for ctx.Err() == nil {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := miner.MineOne(ctx, kit.MineNext); err != nil {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
// context was canceled, ignore the error.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
cancel()
|
|
||||||
<-done
|
|
||||||
}()
|
|
||||||
|
|
||||||
kit.PledgeSectors(t, ctx, miner, 10, 0, nil)
|
|
||||||
|
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Printf("Running one proving period\n")
|
t.Log("Running one proving period")
|
||||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
|
waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1
|
||||||
|
t.Logf("End for head.Height > %d", waitUntil)
|
||||||
|
|
||||||
for {
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
head, err := client.ChainHead(ctx)
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
|
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedPower := types.NewInt(uint64(ssz) * (kit.GenesisPreseals + 10))
|
expectedPower := types.NewInt(uint64(ssz) * (kit.DefaultPresealsPerBootstrapMiner + 10))
|
||||||
|
|
||||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -388,7 +307,7 @@ waitForProof:
|
|||||||
|
|
||||||
func submitBadProof(
|
func submitBadProof(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
client api.FullNode, maddr address.Address,
|
client api.FullNode, owner address.Address, maddr address.Address,
|
||||||
di *dline.Info, dlIdx, partIdx uint64,
|
di *dline.Info, dlIdx, partIdx uint64,
|
||||||
) error {
|
) error {
|
||||||
head, err := client.ChainHead(ctx)
|
head, err := client.ChainHead(ctx)
|
||||||
@ -396,11 +315,6 @@ func submitBadProof(
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
from, err := client.WalletDefaultAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key())
|
minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -435,7 +349,7 @@ func submitBadProof(
|
|||||||
Method: minerActor.Methods.SubmitWindowedPoSt,
|
Method: minerActor.Methods.SubmitWindowedPoSt,
|
||||||
Params: enc,
|
Params: enc,
|
||||||
Value: types.NewInt(0),
|
Value: types.NewInt(0),
|
||||||
From: from,
|
From: owner,
|
||||||
}
|
}
|
||||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -6,18 +6,18 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/specs-storage/storage"
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
"github.com/filecoin-project/lotus/node/impl"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,50 +38,20 @@ func TestWindowedPost(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
height := height // copy to satisfy lints
|
height := height // copy to satisfy lints
|
||||||
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
|
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
|
||||||
testWindowPostUpgrade(t, kit.MockMinerBuilder, blocktime, nSectors, height)
|
testWindowPostUpgrade(t, blocktime, nSectors, height)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Duration, nSectors int, upgradeHeight abi.ChainEpoch) {
|
func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, upgradeHeight abi.ChainEpoch) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
n, sn := b(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(upgradeHeight)}, kit.OneMiner)
|
opts := kit.ConstructorOpts(kit.LatestActorsAt(upgradeHeight))
|
||||||
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
|
||||||
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
miner.PledgeSectors(ctx, nSectors, 0, nil)
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for ctx.Err() == nil {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, kit.MineNext); err != nil {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
// context was canceled, ignore the error.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
cancel()
|
|
||||||
<-done
|
|
||||||
}()
|
|
||||||
|
|
||||||
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -92,19 +62,12 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
|
|||||||
mid, err := address.IDFromAddress(maddr)
|
mid, err := address.IDFromAddress(maddr)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Printf("Running one proving period\n")
|
t.Log("Running one proving period")
|
||||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
|
||||||
|
t.Logf("End for head.Height > %d", waitUntil)
|
||||||
|
|
||||||
for {
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
head, err := client.ChainHead(ctx)
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
|
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -113,9 +76,9 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit.GenesisPreseals)))
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit.DefaultPresealsPerBootstrapMiner)))
|
||||||
|
|
||||||
fmt.Printf("Drop some sectors\n")
|
t.Log("Drop some sectors")
|
||||||
|
|
||||||
// Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
|
// Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
|
||||||
{
|
{
|
||||||
@ -159,7 +122,7 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
|
|||||||
|
|
||||||
all, err := secs.All(2)
|
all, err := secs.All(2)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fmt.Println("the sectors", all)
|
t.Log("the sectors", all)
|
||||||
|
|
||||||
s = storage.SectorRef{
|
s = storage.SectorRef{
|
||||||
ID: abi.SectorID{
|
ID: abi.SectorID{
|
||||||
@ -175,20 +138,12 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
|
|||||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Printf("Go through another PP, wait for sectors to become faulty\n")
|
t.Log("Go through another PP, wait for sectors to become faulty")
|
||||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
waitUntil = di.PeriodStart + di.WPoStProvingPeriod + 2
|
||||||
|
t.Logf("End for head.Height > %d", waitUntil)
|
||||||
|
|
||||||
for {
|
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
head, err := client.ChainHead(ctx)
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 {
|
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -196,9 +151,9 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
|
|||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
|
|
||||||
sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
||||||
require.Equal(t, nSectors+kit.GenesisPreseals-3, int(sectors)) // -3 just removed sectors
|
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-3, int(sectors)) // -3 just removed sectors
|
||||||
|
|
||||||
fmt.Printf("Recover one sector\n")
|
t.Log("Recover one sector")
|
||||||
|
|
||||||
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false)
|
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -206,19 +161,11 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
|
|||||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
waitUntil = di.PeriodStart + di.WPoStProvingPeriod + 2
|
||||||
|
t.Logf("End for head.Height > %d", waitUntil)
|
||||||
|
|
||||||
for {
|
ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
head, err := client.ChainHead(ctx)
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
|
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -226,11 +173,11 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
|
|||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
|
|
||||||
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
||||||
require.Equal(t, nSectors+kit.GenesisPreseals-2, int(sectors)) // -2 not recovered sectors
|
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2, int(sectors)) // -2 not recovered sectors
|
||||||
|
|
||||||
// pledge a sector after recovery
|
// pledge a sector after recovery
|
||||||
|
|
||||||
kit.PledgeSectors(t, ctx, miner, 1, nSectors, nil)
|
miner.PledgeSectors(ctx, 1, nSectors, nil)
|
||||||
|
|
||||||
{
|
{
|
||||||
// Wait until proven.
|
// Wait until proven.
|
||||||
@ -238,17 +185,10 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
|
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
|
||||||
fmt.Printf("End for head.Height > %d\n", waitUntil)
|
t.Logf("End for head.Height > %d\n", waitUntil)
|
||||||
|
|
||||||
for {
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
head, err := client.ChainHead(ctx)
|
t.Logf("Now head.Height = %d", ts.Height())
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > waitUntil {
|
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
@ -257,7 +197,7 @@ func testWindowPostUpgrade(t *testing.T, b kit.APIBuilder, blocktime time.Durati
|
|||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
|
|
||||||
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
||||||
require.Equal(t, nSectors+kit.GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
|
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWindowPostBaseFeeNoBurn(t *testing.T) {
|
func TestWindowPostBaseFeeNoBurn(t *testing.T) {
|
||||||
@ -276,11 +216,8 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) {
|
|||||||
och := build.UpgradeClausHeight
|
och := build.UpgradeClausHeight
|
||||||
build.UpgradeClausHeight = 10
|
build.UpgradeClausHeight = 10
|
||||||
|
|
||||||
n, sn := kit.MockMinerBuilder(t, kit.DefaultFullOpts(1), kit.OneMiner)
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
miner := sn[0]
|
|
||||||
bm := kit.ConnectAndStartMining(t, blocktime, miner, client)
|
|
||||||
t.Cleanup(bm.Stop)
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -288,7 +225,7 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) {
|
|||||||
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
kit.PledgeSectors(t, ctx, miner, nSectors, 0, nil)
|
miner.PledgeSectors(ctx, nSectors, 0, nil)
|
||||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
en := wact.Nonce
|
en := wact.Nonce
|
||||||
@ -327,11 +264,9 @@ func TestWindowPostBaseFeeBurn(t *testing.T) {
|
|||||||
|
|
||||||
blocktime := 2 * time.Millisecond
|
blocktime := 2 * time.Millisecond
|
||||||
|
|
||||||
n, sn := kit.MockMinerBuilder(t, []kit.FullNodeOpts{kit.FullNodeWithLatestActorsAt(-1)}, kit.OneMiner)
|
opts := kit.ConstructorOpts(kit.LatestActorsAt(-1))
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
|
||||||
miner := sn[0]
|
ens.InterconnectAll().BeginMining(blocktime)
|
||||||
bm := kit.ConnectAndStartMining(t, blocktime, miner, client)
|
|
||||||
t.Cleanup(bm.Stop)
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -339,7 +274,7 @@ func TestWindowPostBaseFeeBurn(t *testing.T) {
|
|||||||
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
kit.PledgeSectors(t, ctx, miner, 10, 0, nil)
|
miner.PledgeSectors(ctx, 10, 0, nil)
|
||||||
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
en := wact.Nonce
|
en := wact.Nonce
|
||||||
|
Loading…
Reference in New Issue
Block a user