deals tests: migrate deals cycles tests and add coverage.
This commit is contained in:
parent
dcd6fc239b
commit
8b037e2da3
@ -3,8 +3,7 @@ package itests
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"io/ioutil"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -20,36 +19,54 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/node"
|
"github.com/filecoin-project/lotus/node"
|
||||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
//
|
func TestDealCyclesConcurrent(t *testing.T) {
|
||||||
// func TestDealCycle(t *testing.T) {
|
kit.QuietMiningLogs()
|
||||||
// kit.QuietMiningLogs()
|
|
||||||
//
|
blockTime := 10 * time.Millisecond
|
||||||
// blockTime := 10 * time.Millisecond
|
|
||||||
//
|
// For these tests where the block time is artificially short, just use
|
||||||
// // For these tests where the block time is artificially short, just use
|
// a deal start epoch that is guaranteed to be far enough in the future
|
||||||
// // a deal start epoch that is guaranteed to be far enough in the future
|
// so that the deal starts sealing in time
|
||||||
// // so that the deal starts sealing in time
|
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
||||||
// dealStartEpoch := abi.ChainEpoch(2 << 12)
|
|
||||||
//
|
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
|
||||||
// t.Run("TestFullDealCycle_Single", func(t *testing.T) {
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||||
// runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
|
ens.InterconnectAll().BeginMining(blockTime)
|
||||||
// })
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
// t.Run("TestFullDealCycle_Two", func(t *testing.T) {
|
|
||||||
// runFullDealCycles(t, 2, kit.MockMinerBuilder, blockTime, false, false, dealStartEpoch)
|
errgrp, _ := errgroup.WithContext(context.Background())
|
||||||
// })
|
for i := 0; i < n; i++ {
|
||||||
// t.Run("WithExportedCAR", func(t *testing.T) {
|
i := i
|
||||||
// runFullDealCycles(t, 1, kit.MockMinerBuilder, blockTime, true, false, dealStartEpoch)
|
errgrp.Go(func() (err error) {
|
||||||
// })
|
defer func() {
|
||||||
// t.Run("TestFastRetrievalDealCycle", func(t *testing.T) {
|
// This is necessary because we use require, which invokes t.Fatal,
|
||||||
// runFastRetrievalDealFlowT(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
|
// and that's not
|
||||||
// })
|
if r := recover(); r != nil {
|
||||||
// t.Run("TestZeroPricePerByteRetrievalDealFlow", func(t *testing.T) {
|
err = fmt.Errorf("deal failed: %s", r)
|
||||||
// runZeroPricePerByteRetrievalDealFlow(t, kit.MockMinerBuilder, blockTime, dealStartEpoch)
|
}
|
||||||
// })
|
}()
|
||||||
// }
|
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), 5+i, fastRetrieval, dealStartEpoch)
|
||||||
//
|
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, carExport)
|
||||||
|
kit.FilesEqual(t, inPath, outPath)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
require.NoError(t, errgrp.Wait())
|
||||||
|
}
|
||||||
|
|
||||||
|
cycles := []int{1, 2, 4, 8}
|
||||||
|
for _, n := range cycles {
|
||||||
|
ns := fmt.Sprintf("%d", n)
|
||||||
|
t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) })
|
||||||
|
t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) })
|
||||||
|
t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) })
|
||||||
|
t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// func TestAPIDealFlowReal(t *testing.T) {
|
// func TestAPIDealFlowReal(t *testing.T) {
|
||||||
// if testing.Short() {
|
// if testing.Short() {
|
||||||
// t.Skip("skipping test in short mode")
|
// t.Skip("skipping test in short mode")
|
||||||
@ -101,8 +118,7 @@ func TestPublishDealsBatching(t *testing.T) {
|
|||||||
|
|
||||||
// Starts a deal and waits until it's published
|
// Starts a deal and waits until it's published
|
||||||
runDealTillPublish := func(rseed int) {
|
runDealTillPublish := func(rseed int) {
|
||||||
res, _, _, err := kit.CreateImportFile(ctx, client, rseed, 0)
|
res, _ := client.CreateImportFile(ctx, rseed, 0)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
upds, err := client.ClientGetDealUpdates(ctx)
|
upds, err := client.ClientGetDealUpdates(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -189,10 +205,9 @@ func TestFirstDealEnablesMining(t *testing.T) {
|
|||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, client, &provider)
|
dh := kit.NewDealHarness(t, &client, &provider)
|
||||||
|
|
||||||
ref, _, _, err := kit.CreateImportFile(ctx, client, 5, 0)
|
ref, _ := client.CreateImportFile(ctx, 5, 0)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Log("FILE CID:", ref.Root)
|
t.Log("FILE CID:", ref.Root)
|
||||||
|
|
||||||
@ -204,6 +219,8 @@ func TestFirstDealEnablesMining(t *testing.T) {
|
|||||||
// we pass the test.
|
// we pass the test.
|
||||||
providerMined := make(chan struct{})
|
providerMined := make(chan struct{})
|
||||||
heads, err := client.ChainNotify(ctx)
|
heads, err := client.ChainNotify(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for chg := range heads {
|
for chg := range heads {
|
||||||
for _, c := range chg {
|
for _, c := range chg {
|
||||||
@ -247,14 +264,13 @@ func TestOfflineDealFlow(t *testing.T) {
|
|||||||
dh := kit.NewDealHarness(t, client, miner)
|
dh := kit.NewDealHarness(t, client, miner)
|
||||||
|
|
||||||
// Create a random file and import on the client.
|
// Create a random file and import on the client.
|
||||||
res, path, data, err := kit.CreateImportFile(ctx, client, 1, 0)
|
res, inFile := client.CreateImportFile(ctx, 1, 0)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Get the piece size and commP
|
// Get the piece size and commP
|
||||||
fcid := res.Root
|
rootCid := res.Root
|
||||||
pieceInfo, err := client.ClientDealPieceCID(ctx, fcid)
|
pieceInfo, err := client.ClientDealPieceCID(ctx, rootCid)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Log("FILE CID:", fcid)
|
t.Log("FILE CID:", rootCid)
|
||||||
|
|
||||||
// Create a storage deal with the miner
|
// Create a storage deal with the miner
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
@ -266,7 +282,7 @@ func TestOfflineDealFlow(t *testing.T) {
|
|||||||
// Manual storage deal (offline deal)
|
// Manual storage deal (offline deal)
|
||||||
dataRef := &storagemarket.DataRef{
|
dataRef := &storagemarket.DataRef{
|
||||||
TransferType: storagemarket.TTManual,
|
TransferType: storagemarket.TTManual,
|
||||||
Root: fcid,
|
Root: rootCid,
|
||||||
PieceCid: &pieceInfo.PieceCID,
|
PieceCid: &pieceInfo.PieceCID,
|
||||||
PieceSize: pieceInfo.PieceSize.Unpadded(),
|
PieceSize: pieceInfo.PieceSize.Unpadded(),
|
||||||
}
|
}
|
||||||
@ -291,11 +307,9 @@ func TestOfflineDealFlow(t *testing.T) {
|
|||||||
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
|
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
|
||||||
|
|
||||||
// Create a CAR file from the raw file
|
// Create a CAR file from the raw file
|
||||||
carFileDir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-car")
|
carFileDir := t.TempDir()
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
carFilePath := filepath.Join(carFileDir, "out.car")
|
carFilePath := filepath.Join(carFileDir, "out.car")
|
||||||
err = client.ClientGenCar(ctx, api.FileRef{Path: path}, carFilePath)
|
err = client.ClientGenCar(ctx, api.FileRef{Path: inFile}, carFilePath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Import the CAR file on the miner - this is the equivalent to
|
// Import the CAR file on the miner - this is the equivalent to
|
||||||
@ -309,29 +323,16 @@ func TestOfflineDealFlow(t *testing.T) {
|
|||||||
t.Logf("deal published, retrieving")
|
t.Logf("deal published, retrieving")
|
||||||
|
|
||||||
// Retrieve the deal
|
// Retrieve the deal
|
||||||
dh.PerformRetrieval(ctx, fcid, &pieceInfo.PieceCID, false, data)
|
outFile := dh.PerformRetrieval(ctx, proposalCid, rootCid, false)
|
||||||
|
|
||||||
|
equal := kit.FilesEqual(t, inFile, outFile)
|
||||||
|
require.True(t, equal)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("NormalRetrieval", func(t *testing.T) {
|
t.Run("NormalRetrieval", func(t *testing.T) { runTest(t, false) })
|
||||||
runTest(t, false)
|
t.Run("FastRetrieval", func(t *testing.T) { runTest(t, true) })
|
||||||
})
|
|
||||||
t.Run("FastRetrieval", func(t *testing.T) {
|
|
||||||
runTest(t, true)
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// func runFullDealCycles(t *testing.T, n int, b kit.APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
|
||||||
// full, _, ens := kit.EnsembleMinimal(t)
|
|
||||||
// ens.BeginMining()
|
|
||||||
// dh := kit.NewDealHarness(t, client, miner)
|
|
||||||
//
|
|
||||||
// baseseed := 6
|
|
||||||
// for i := 0; i < n; i++ {
|
|
||||||
// dh.MakeOnlineDeal(context.Background(), baseseed+i, carExport, fastRet, startEpoch)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
//
|
||||||
// func runFastRetrievalDealFlowT(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
// func runFastRetrievalDealFlowT(t *testing.T, b kit.APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||||
// ctx := context.Background()
|
// ctx := context.Background()
|
||||||
|
@ -93,7 +93,7 @@ func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb f
|
|||||||
|
|
||||||
if success {
|
if success {
|
||||||
// Wait until it shows up on the given full nodes ChainHead
|
// Wait until it shows up on the given full nodes ChainHead
|
||||||
nloops := 50
|
nloops := 200
|
||||||
for i := 0; i < nloops; i++ {
|
for i := 0; i < nloops; i++ {
|
||||||
ts, err := fn.ChainHead(ctx)
|
ts, err := fn.ChainHead(ctx)
|
||||||
require.NoError(bm.t, err)
|
require.NoError(bm.t, err)
|
||||||
|
@ -3,16 +3,12 @@ package kit
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
"github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||||
@ -43,7 +39,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode)
|
|||||||
|
|
||||||
// Create a deal (non-interactive)
|
// Create a deal (non-interactive)
|
||||||
// client deal --start-epoch=<start epoch> <cid> <Miner addr> 1000000attofil <duration>
|
// client deal --start-epoch=<start epoch> <cid> <Miner addr> 1000000attofil <duration>
|
||||||
res, _, _, err := CreateImportFile(ctx, clientNode, 1, 0)
|
res, _ := clientNode.CreateImportFile(ctx, 1, 0)
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
|
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
|
||||||
@ -60,7 +56,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode)
|
|||||||
// <miner addr>
|
// <miner addr>
|
||||||
// "no" (verified Client)
|
// "no" (verified Client)
|
||||||
// "yes" (confirm deal)
|
// "yes" (confirm deal)
|
||||||
res, _, _, err = CreateImportFile(ctx, clientNode, 2, 0)
|
res, _ = clientNode.CreateImportFile(ctx, 2, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
dataCid2 := res.Root
|
dataCid2 := res.Root
|
||||||
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
|
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
|
||||||
@ -103,44 +99,9 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode TestFullNode)
|
|||||||
|
|
||||||
// Retrieve the first file from the Miner
|
// Retrieve the first file from the Miner
|
||||||
// client retrieve <cid> <file path>
|
// client retrieve <cid> <file path>
|
||||||
tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-Client")
|
tmpdir := t.TempDir()
|
||||||
require.NoError(t, err)
|
|
||||||
path := filepath.Join(tmpdir, "outfile.dat")
|
path := filepath.Join(tmpdir, "outfile.dat")
|
||||||
out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path)
|
out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path)
|
||||||
fmt.Println("retrieve:\n", out)
|
fmt.Println("retrieve:\n", out)
|
||||||
require.Regexp(t, regexp.MustCompile("Success"), out)
|
require.Regexp(t, regexp.MustCompile("Success"), out)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateImportFile(ctx context.Context, client api.FullNode, rseed int, size int) (res *api.ImportRes, path string, data []byte, err error) {
|
|
||||||
data, path, err = createRandomFile(rseed, size)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err = client.ClientImport(ctx, api.FileRef{Path: path})
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", nil, err
|
|
||||||
}
|
|
||||||
return res, path, data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createRandomFile(rseed, size int) ([]byte, string, error) {
|
|
||||||
if size == 0 {
|
|
||||||
size = 1600
|
|
||||||
}
|
|
||||||
data := make([]byte, size)
|
|
||||||
rand.New(rand.NewSource(int64(rseed))).Read(data)
|
|
||||||
|
|
||||||
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
path := filepath.Join(dir, "sourcefile.dat")
|
|
||||||
err = ioutil.WriteFile(path, data, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, path, nil
|
|
||||||
}
|
|
||||||
|
@ -4,8 +4,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -20,7 +18,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
ipld "github.com/ipfs/go-ipld-format"
|
ipld "github.com/ipfs/go-ipld-format"
|
||||||
dag "github.com/ipfs/go-merkledag"
|
dag "github.com/ipfs/go-merkledag"
|
||||||
dstest "github.com/ipfs/go-merkledag/test"
|
dstest "github.com/ipfs/go-merkledag/test"
|
||||||
@ -29,12 +26,12 @@ import (
|
|||||||
|
|
||||||
type DealHarness struct {
|
type DealHarness struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
client api.FullNode
|
client *TestFullNode
|
||||||
miner *TestMiner
|
miner *TestMiner
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDealHarness creates a test harness that contains testing utilities for deals.
|
// NewDealHarness creates a test harness that contains testing utilities for deals.
|
||||||
func NewDealHarness(t *testing.T, client api.FullNode, miner *TestMiner) *DealHarness {
|
func NewDealHarness(t *testing.T, client *TestFullNode, miner *TestMiner) *DealHarness {
|
||||||
return &DealHarness{
|
return &DealHarness{
|
||||||
t: t,
|
t: t,
|
||||||
client: client,
|
client: client,
|
||||||
@ -42,24 +39,18 @@ func NewDealHarness(t *testing.T, client api.FullNode, miner *TestMiner) *DealHa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, rseed int, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
|
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, rseed int, fastRet bool, startEpoch abi.ChainEpoch) (deal *cid.Cid, res *api.ImportRes, path string) {
|
||||||
res, _, data, err := CreateImportFile(ctx, dh.client, rseed, 0)
|
res, path = dh.client.CreateImportFile(ctx, rseed, 0)
|
||||||
require.NoError(dh.t, err)
|
|
||||||
|
|
||||||
fcid := res.Root
|
dh.t.Logf("FILE CID: %s", res.Root)
|
||||||
dh.t.Logf("FILE CID: %s", fcid)
|
|
||||||
|
|
||||||
deal := dh.StartDeal(ctx, fcid, fastRet, startEpoch)
|
deal = dh.StartDeal(ctx, res.Root, fastRet, startEpoch)
|
||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||||
|
|
||||||
// Retrieval
|
return deal, res, path
|
||||||
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
|
||||||
require.NoError(dh.t, err)
|
|
||||||
|
|
||||||
dh.PerformRetrieval(ctx, fcid, &info.PieceCID, carExport, data)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
||||||
@ -177,22 +168,25 @@ func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) PerformRetrieval(ctx context.Context, fcid cid.Cid, piece *cid.Cid, carExport bool, expect []byte) {
|
func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool) (path string) {
|
||||||
offers, err := dh.client.ClientFindData(ctx, fcid, piece)
|
// perform retrieval.
|
||||||
|
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
||||||
require.NoError(dh.t, err)
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
offers, err := dh.client.ClientFindData(ctx, root, &info.PieceCID)
|
||||||
|
require.NoError(dh.t, err)
|
||||||
require.NotEmpty(dh.t, offers, "no offers")
|
require.NotEmpty(dh.t, offers, "no offers")
|
||||||
|
|
||||||
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
|
tmpfile, err := ioutil.TempFile(dh.t.TempDir(), "ret-car")
|
||||||
require.NoError(dh.t, err)
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
defer os.RemoveAll(rpath) //nolint:errcheck
|
defer tmpfile.Close()
|
||||||
|
|
||||||
caddr, err := dh.client.WalletDefaultAddress(ctx)
|
caddr, err := dh.client.WalletDefaultAddress(ctx)
|
||||||
require.NoError(dh.t, err)
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
ref := &api.FileRef{
|
ref := &api.FileRef{
|
||||||
Path: filepath.Join(rpath, "ret"),
|
Path: tmpfile.Name(),
|
||||||
IsCAR: carExport,
|
IsCAR: carExport,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,19 +197,17 @@ func (dh *DealHarness) PerformRetrieval(ctx context.Context, fcid cid.Cid, piece
|
|||||||
require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err)
|
require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
|
rdata, err := ioutil.ReadFile(tmpfile.Name())
|
||||||
require.NoError(dh.t, err)
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
if carExport {
|
if carExport {
|
||||||
rdata = dh.ExtractCarData(ctx, rdata, rpath)
|
rdata = dh.ExtractFileFromCAR(ctx, rdata)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(rdata, expect) {
|
return tmpfile.Name()
|
||||||
dh.t.Fatal("wrong expect retrieved")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) ExtractCarData(ctx context.Context, rdata []byte, rpath string) []byte {
|
func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, rdata []byte) []byte {
|
||||||
bserv := dstest.Bserv()
|
bserv := dstest.Bserv()
|
||||||
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
|
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
|
||||||
require.NoError(dh.t, err)
|
require.NoError(dh.t, err)
|
||||||
@ -230,23 +222,20 @@ func (dh *DealHarness) ExtractCarData(ctx context.Context, rdata []byte, rpath s
|
|||||||
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
||||||
require.NoError(dh.t, err)
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
outPath := filepath.Join(rpath, "retLoadedCAR")
|
tmpfile, err := ioutil.TempFile(dh.t.TempDir(), "file-in-car")
|
||||||
err = files.WriteTo(fil, outPath)
|
|
||||||
require.NoError(dh.t, err)
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
rdata, err = ioutil.ReadFile(outPath)
|
defer tmpfile.Close()
|
||||||
|
|
||||||
|
err = files.WriteTo(fil, tmpfile.Name())
|
||||||
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
|
rdata, err = ioutil.ReadFile(tmpfile.Name())
|
||||||
require.NoError(dh.t, err)
|
require.NoError(dh.t, err)
|
||||||
|
|
||||||
return rdata
|
return rdata
|
||||||
}
|
}
|
||||||
|
|
||||||
type DealsScaffold struct {
|
|
||||||
Ctx context.Context
|
|
||||||
Client *impl.FullNodeAPI
|
|
||||||
Miner TestMiner
|
|
||||||
BlockMiner *BlockMiner
|
|
||||||
}
|
|
||||||
|
|
||||||
func ConnectAndStartMining(t *testing.T, blocktime time.Duration, miner *TestMiner, clients ...api.FullNode) *BlockMiner {
|
func ConnectAndStartMining(t *testing.T, blocktime time.Duration, miner *TestMiner, clients ...api.FullNode) *BlockMiner {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
57
itests/kit/files.go
Normal file
57
itests/kit/files.go
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
package kit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/minio/blake2b-simd"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateRandomFile creates a random file with the provided seed and the
|
||||||
|
// provided size.
|
||||||
|
func CreateRandomFile(t *testing.T, rseed, size int) (path string) {
|
||||||
|
if size == 0 {
|
||||||
|
size = 1600
|
||||||
|
}
|
||||||
|
|
||||||
|
source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size))
|
||||||
|
|
||||||
|
file, err := os.CreateTemp(t.TempDir(), "sourcefile.dat")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
n, err := io.Copy(file, source)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, n, size)
|
||||||
|
|
||||||
|
return file.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilesEqual compares two files by blake2b hash equality.
|
||||||
|
func FilesEqual(t *testing.T, left, right string) bool {
|
||||||
|
// initialize hashes.
|
||||||
|
leftH, rightH := blake2b.New256(), blake2b.New256()
|
||||||
|
|
||||||
|
// open files.
|
||||||
|
leftF, err := os.Open(left)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
rightF, err := os.Open(right)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// feed hash functions.
|
||||||
|
_, err = io.Copy(leftH, leftF)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = io.Copy(rightH, rightF)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// compute digests.
|
||||||
|
leftD, rightD := leftH.Sum(nil), rightH.Sum(nil)
|
||||||
|
|
||||||
|
return bytes.Equal(leftD, rightD)
|
||||||
|
}
|
@ -17,9 +17,13 @@ func init() {
|
|||||||
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
||||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||||
|
|
||||||
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
build.InsecurePoStValidation = true
|
||||||
if err != nil {
|
|
||||||
|
if err := os.Setenv("BELLMAN_NO_GPU", "1"); err != nil {
|
||||||
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
||||||
}
|
}
|
||||||
build.InsecurePoStValidation = true
|
|
||||||
|
if err := os.Setenv("LOTUS_DISABLE_WATCHDOG", "1"); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to set LOTUS_DISABLE_WATCHDOG env variable: %s", err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,11 +1,14 @@
|
|||||||
package kit
|
package kit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/v1api"
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
type TestFullNode struct {
|
type TestFullNode struct {
|
||||||
@ -20,3 +23,10 @@ type TestFullNode struct {
|
|||||||
|
|
||||||
options NodeOpts
|
options NodeOpts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, path string) {
|
||||||
|
path = CreateRandomFile(f.t, rseed, size)
|
||||||
|
res, err := f.ClientImport(ctx, api.FileRef{Path: path})
|
||||||
|
require.NoError(f.t, err)
|
||||||
|
return res, path
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user