lotus/api/test/deals.go

785 lines
23 KiB
Go
Raw Normal View History

2019-11-06 16:10:44 +00:00
package test
import (
2019-12-01 21:52:24 +00:00
"bytes"
2019-11-06 16:10:44 +00:00
"context"
"fmt"
2019-12-01 21:52:24 +00:00
"io/ioutil"
2019-11-06 16:10:44 +00:00
"math/rand"
2019-11-07 00:18:06 +00:00
"os"
2019-12-01 21:52:24 +00:00
"path/filepath"
2021-05-30 13:13:38 +00:00
"sort"
2019-11-06 16:10:44 +00:00
"testing"
"time"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
2020-06-26 15:28:05 +00:00
"github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
"github.com/ipld/go-car"
2021-01-19 10:07:28 +00:00
"github.com/stretchr/testify/require"
2019-11-06 19:44:28 +00:00
"github.com/filecoin-project/go-fil-markets/storagemarket"
2021-01-19 10:07:28 +00:00
"github.com/filecoin-project/go-state-types/abi"
2021-05-30 17:26:53 +00:00
"github.com/filecoin-project/go-state-types/big"
2020-03-04 05:03:35 +00:00
"github.com/filecoin-project/lotus/api"
2019-11-30 23:17:50 +00:00
"github.com/filecoin-project/lotus/build"
2021-01-08 15:28:38 +00:00
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
2021-01-19 10:07:28 +00:00
"github.com/filecoin-project/lotus/chain/types"
2020-08-17 13:39:33 +00:00
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
2021-03-12 14:41:46 +00:00
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
2021-01-08 15:28:38 +00:00
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/node"
2021-01-19 10:07:28 +00:00
"github.com/filecoin-project/lotus/node/impl"
2021-03-12 14:41:46 +00:00
"github.com/filecoin-project/lotus/node/modules/dtypes"
2021-01-08 15:28:38 +00:00
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
2021-01-19 10:07:28 +00:00
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
dstest "github.com/ipfs/go-merkledag/test"
unixfile "github.com/ipfs/go-unixfs/file"
2019-11-06 16:10:44 +00:00
)
func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
2021-01-19 10:07:28 +00:00
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
2019-11-07 00:18:06 +00:00
2021-01-19 10:07:28 +00:00
MakeDeal(t, s.ctx, 6, s.client, s.miner, carExport, fastRet, startEpoch)
}
func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
2021-01-19 10:07:28 +00:00
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
2021-01-19 10:07:28 +00:00
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
MakeDeal(t, s.ctx, 7, s.client, s.miner, false, false, startEpoch)
}
func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
data, info, fcid := mkStorageDeal(t, ctx, rseed, client, miner, carExport, fastRet, startEpoch)
testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
}
func mkStorageDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) ([]byte,
*api.DealInfo, cid.Cid) {
res, data, err := CreateClientFile(ctx, client, rseed, 0)
if err != nil {
t.Fatal(err)
}
fcid := res.Root
fmt.Println("FILE CID: ", fcid)
deal := startDeal(t, ctx, miner, client, fcid, fastRet, startEpoch)
2020-04-23 17:50:52 +00:00
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
2021-05-30 13:13:38 +00:00
waitDealSealed(t, ctx, miner, client, deal, false, false, nil)
2020-04-23 17:50:52 +00:00
// Retrieval
2020-07-09 16:29:57 +00:00
info, err := client.ClientGetDealInfo(ctx, *deal)
require.NoError(t, err)
2020-04-23 17:50:52 +00:00
return data, info, fcid
2020-04-23 17:50:52 +00:00
}
func CreateClientFile(ctx context.Context, client api.FullNode, rseed, size int) (*api.ImportRes, []byte, error) {
2021-04-21 14:39:41 +00:00
data, path, err := createRandomFile(rseed, size)
if err != nil {
return nil, nil, err
}
res, err := client.ClientImport(ctx, api.FileRef{Path: path})
if err != nil {
return nil, nil, err
}
return res, data, nil
}
func createRandomFile(rseed, size int) ([]byte, string, error) {
if size == 0 {
size = 1600
}
data := make([]byte, size)
rand.New(rand.NewSource(int64(rseed))).Read(data)
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
if err != nil {
2021-04-21 14:39:41 +00:00
return nil, "", err
}
path := filepath.Join(dir, "sourcefile.dat")
err = ioutil.WriteFile(path, data, 0644)
if err != nil {
2021-04-21 14:39:41 +00:00
return nil, "", err
}
2021-04-21 14:39:41 +00:00
return data, path, nil
}
2021-01-08 15:28:38 +00:00
func TestPublishDealsBatching(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(2)
// Set max deals per publish deals message to 2
minerDef := []StorageMiner{{
Full: 0,
Opts: node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
2021-01-08 15:28:38 +00:00
MaxDealsPerMsg: maxDealsPerMsg,
})),
Preseal: PresealGenesis,
}}
// Create a connect client and miner node
n, sn := b(t, OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
s := connectAndStartMining(t, b, blocktime, client, miner)
defer s.blockMiner.Stop()
// Starts a deal and waits until it's published
runDealTillPublish := func(rseed int) {
res, _, err := CreateClientFile(s.ctx, s.client, rseed, 0)
2021-01-08 15:28:38 +00:00
require.NoError(t, err)
upds, err := client.ClientGetDealUpdates(s.ctx)
require.NoError(t, err)
startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
done := make(chan struct{})
go func() {
for upd := range upds {
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
done <- struct{}{}
}
}
}()
<-done
}
// Run three deals in parallel
done := make(chan struct{}, maxDealsPerMsg+1)
for rseed := 1; rseed <= 3; rseed++ {
rseed := rseed
go func() {
runDealTillPublish(rseed)
done <- struct{}{}
}()
}
// Wait for two of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
// Expect a single PublishStorageDeals message that includes the first two deals
msgCids, err := s.client.StateListMessages(s.ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
require.NoError(t, err)
count := 0
for _, msgCid := range msgCids {
msg, err := s.client.ChainGetMessage(s.ctx, msgCid)
require.NoError(t, err)
if msg.Method == market.Methods.PublishStorageDeals {
count++
var pubDealsParams market2.PublishStorageDealsParams
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
require.NoError(t, err)
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
}
}
require.Equal(t, 1, count)
// The third deal should be published once the publish period expires.
// Allow a little padding as it takes a moment for the state change to
// be noticed by the client.
padding := 10 * time.Second
select {
case <-time.After(publishPeriod + padding):
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
case <-done: // Success
}
}
2021-03-12 14:41:46 +00:00
func TestBatchDealInput(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
run := func(piece, deals, expectSectors int) func(t *testing.T) {
return func(t *testing.T) {
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(deals)
// Set max deals per publish deals message to maxDealsPerMsg
minerDef := []StorageMiner{{
Full: 0,
Opts: node.Options(
node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
return func() (sealiface.Config, error) {
return sealiface.Config{
2021-05-30 13:13:38 +00:00
MaxWaitDealsSectors: 2,
MaxSealingSectors: 1,
2021-05-30 13:13:38 +00:00
MaxSealingSectorsForDeals: 3,
AlwaysKeepUnsealedCopy: true,
2021-05-30 13:13:38 +00:00
WaitDealsDelay: time.Hour,
}, nil
}, nil
}),
),
Preseal: PresealGenesis,
}}
// Create a connect client and miner node
n, sn := b(t, OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
s := connectAndStartMining(t, b, blocktime, client, miner)
defer s.blockMiner.Stop()
2021-05-30 17:26:53 +00:00
err := miner.MarketSetAsk(s.ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
require.NoError(t, err)
2021-03-12 14:41:46 +00:00
2021-05-30 13:13:38 +00:00
checkNoPadding := func() {
sl, err := sn[0].SectorsList(s.ctx)
require.NoError(t, err)
2021-03-12 14:41:46 +00:00
2021-05-30 13:13:38 +00:00
sort.Slice(sl, func(i, j int) bool {
return sl[i] < sl[j]
})
2021-03-12 14:41:46 +00:00
2021-05-30 13:13:38 +00:00
for _, snum := range sl {
si, err := sn[0].SectorsStatus(s.ctx, snum, false)
require.NoError(t, err)
2021-03-12 14:41:46 +00:00
2021-05-30 17:26:53 +00:00
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
2021-03-12 14:41:46 +00:00
2021-05-30 13:13:38 +00:00
for _, deal := range si.Deals {
if deal == 0 {
fmt.Printf("sector %d had a padding piece!\n", snum)
}
}
}
}
2021-03-12 14:41:46 +00:00
// Starts a deal and waits until it's published
runDealTillSeal := func(rseed int) {
res, _, err := CreateClientFile(s.ctx, s.client, rseed, piece)
require.NoError(t, err)
dc := startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
2021-05-30 13:13:38 +00:00
waitDealSealed(t, s.ctx, s.miner, s.client, dc, false, true, checkNoPadding)
}
2021-03-12 14:41:46 +00:00
2021-05-30 13:13:38 +00:00
// Run maxDealsPerMsg deals in parallel
done := make(chan struct{}, maxDealsPerMsg)
for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ {
rseed := rseed
go func() {
runDealTillSeal(rseed)
done <- struct{}{}
}()
}
2021-03-12 14:41:46 +00:00
// Wait for maxDealsPerMsg of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
2021-03-12 14:41:46 +00:00
2021-05-30 13:13:38 +00:00
checkNoPadding()
sl, err := sn[0].SectorsList(s.ctx)
require.NoError(t, err)
2021-05-30 13:13:38 +00:00
require.Equal(t, len(sl), expectSectors)
}
2021-03-12 14:41:46 +00:00
}
t.Run("4-p1600B", run(1600, 4, 4))
t.Run("4-p513B", run(513, 4, 2))
2021-05-30 17:26:53 +00:00
if !testing.Short() {
t.Run("32-p257B", run(257, 32, 8))
t.Run("32-p10B", run(10, 32, 2))
// fixme: this appears to break data-transfer / markets in some really creative ways
//t.Run("128-p10B", run(10, 128, 8))
}
2021-03-12 14:41:46 +00:00
}
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
2021-01-19 10:07:28 +00:00
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
2020-08-06 20:16:55 +00:00
data := make([]byte, 1600)
rand.New(rand.NewSource(int64(8))).Read(data)
r := bytes.NewReader(data)
2021-01-19 10:07:28 +00:00
fcid, err := s.client.ClientImportLocal(s.ctx, r)
2020-08-06 20:16:55 +00:00
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
2021-01-19 10:07:28 +00:00
deal := startDeal(t, s.ctx, s.miner, s.client, fcid, true, startEpoch)
2020-08-06 20:16:55 +00:00
2021-01-19 10:07:28 +00:00
waitDealPublished(t, s.ctx, s.miner, deal)
2020-08-06 20:16:55 +00:00
fmt.Println("deal published, retrieving")
// Retrieval
2021-01-19 10:07:28 +00:00
info, err := s.client.ClientGetDealInfo(s.ctx, *deal)
2020-08-06 20:16:55 +00:00
require.NoError(t, err)
2021-01-19 10:07:28 +00:00
testRetrieval(t, s.ctx, s.client, fcid, &info.PieceCID, false, data)
2020-08-06 20:16:55 +00:00
}
func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) {
2021-01-19 10:07:28 +00:00
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
{
data1 := make([]byte, 800)
rand.New(rand.NewSource(int64(3))).Read(data1)
r := bytes.NewReader(data1)
2021-01-19 10:07:28 +00:00
fcid1, err := s.client.ClientImportLocal(s.ctx, r)
if err != nil {
t.Fatal(err)
}
data2 := make([]byte, 800)
rand.New(rand.NewSource(int64(9))).Read(data2)
r2 := bytes.NewReader(data2)
2021-01-19 10:07:28 +00:00
fcid2, err := s.client.ClientImportLocal(s.ctx, r2)
if err != nil {
t.Fatal(err)
}
2021-01-19 10:07:28 +00:00
deal1 := startDeal(t, s.ctx, s.miner, s.client, fcid1, true, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
2021-05-30 13:13:38 +00:00
waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true, false, nil)
2021-01-19 10:07:28 +00:00
deal2 := startDeal(t, s.ctx, s.miner, s.client, fcid2, true, 0)
time.Sleep(time.Second)
2021-05-30 13:13:38 +00:00
waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false, false, nil)
// Retrieval
2021-01-19 10:07:28 +00:00
info, err := s.client.ClientGetDealInfo(s.ctx, *deal2)
require.NoError(t, err)
2021-01-19 10:07:28 +00:00
rf, _ := s.miner.SectorsRefs(s.ctx)
fmt.Printf("refs: %+v\n", rf)
2021-01-19 10:07:28 +00:00
testRetrieval(t, s.ctx, s.client, fcid2, &info.PieceCID, false, data2)
}
}
func TestNonUnsealedRetrievalQuoteForDefaultPricing(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
ppb := int64(1)
unsealPrice := int64(77)
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
// Set unsealed price to non-zero
ask, err := s.miner.MarketGetRetrievalAsk(s.ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(ppb)
ask.UnsealPrice = abi.NewTokenAmount(unsealPrice)
err = s.miner.MarketSetRetrievalAsk(s.ctx, ask)
require.NoError(t, err)
_, info, fcid := mkStorageDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
2021-05-24 12:55:12 +00:00
// one more storage deal for the same data
_, _, fcid2 := mkStorageDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
require.Equal(t, fcid, fcid2)
// fetch quote -> zero for unsealed price since unsealed file already exists.
offers, err := s.client.ClientFindData(s.ctx, fcid, &info.PieceCID)
require.NoError(t, err)
2021-05-24 12:55:12 +00:00
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
require.Equal(t, info.Size*uint64(ppb), offers[0].MinPrice.Uint64())
2021-05-24 12:55:12 +00:00
// remove ONLY one unsealed file
ss, err := s.miner.StorageList(context.Background())
require.NoError(t, err)
_, err = s.miner.SectorsList(s.ctx)
require.NoError(t, err)
2021-05-24 12:55:12 +00:00
iLoop:
for storeID, sd := range ss {
for _, sector := range sd {
require.NoError(t, s.miner.StorageDropSector(s.ctx, storeID, sector.SectorID, storiface.FTUnsealed))
2021-05-24 12:55:12 +00:00
// remove ONLY one
break iLoop
}
}
2021-05-24 12:55:12 +00:00
// get retrieval quote -> zero for unsealed price as unsealed file exists.
offers, err = s.client.ClientFindData(s.ctx, fcid, &info.PieceCID)
require.NoError(t, err)
2021-05-24 12:55:12 +00:00
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
require.Equal(t, info.Size*uint64(ppb), offers[0].MinPrice.Uint64())
2021-05-24 12:55:12 +00:00
// remove the other unsealed file as well
ss, err = s.miner.StorageList(context.Background())
require.NoError(t, err)
_, err = s.miner.SectorsList(s.ctx)
require.NoError(t, err)
for storeID, sd := range ss {
for _, sector := range sd {
require.NoError(t, s.miner.StorageDropSector(s.ctx, storeID, sector.SectorID, storiface.FTUnsealed))
}
}
// fetch quote -> non-zero for unseal price as we no more unsealed files.
offers, err = s.client.ClientFindData(s.ctx, fcid, &info.PieceCID)
require.NoError(t, err)
require.Len(t, offers, 2)
require.Equal(t, offers[0], offers[1])
require.Equal(t, uint64(unsealPrice), offers[0].UnsealPrice.Uint64())
total := (info.Size * uint64(ppb)) + uint64(unsealPrice)
require.Equal(t, total, offers[0].MinPrice.Uint64())
}
2021-02-01 13:53:45 +00:00
func TestZeroPricePerByteRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
// Set price-per-byte to zero
ask, err := s.miner.MarketGetRetrievalAsk(s.ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(0)
err = s.miner.MarketSetRetrievalAsk(s.ctx, ask)
require.NoError(t, err)
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
}
2021-04-21 14:39:41 +00:00
func TestOfflineDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch, fastRet bool) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
// Create a random file
data, path, err := createRandomFile(1, 0)
require.NoError(t, err)
// Import the file on the client
importRes, err := s.client.ClientImport(s.ctx, api.FileRef{Path: path})
require.NoError(t, err)
// Get the piece size and commP
fcid := importRes.Root
pieceInfo, err := s.client.ClientDealPieceCID(s.ctx, fcid)
require.NoError(t, err)
fmt.Println("FILE CID: ", fcid)
// Create a storage deal with the miner
maddr, err := s.miner.ActorAddress(s.ctx)
require.NoError(t, err)
addr, err := s.client.WalletDefaultAddress(s.ctx)
require.NoError(t, err)
// Manual storage deal (offline deal)
dataRef := &storagemarket.DataRef{
TransferType: storagemarket.TTManual,
Root: fcid,
PieceCid: &pieceInfo.PieceCID,
PieceSize: pieceInfo.PieceSize.Unpadded(),
}
proposalCid, err := s.client.ClientStartDeal(s.ctx, &api.StartDealParams{
Data: dataRef,
Wallet: addr,
Miner: maddr,
EpochPrice: types.NewInt(1000000),
DealStartEpoch: startEpoch,
MinBlocksDuration: uint64(build.MinDealDuration),
FastRetrieval: fastRet,
})
require.NoError(t, err)
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
cd, err := s.client.ClientGetDealInfo(s.ctx, *proposalCid)
require.NoError(t, err)
require.Eventually(t, func() bool {
cd, _ := s.client.ClientGetDealInfo(s.ctx, *proposalCid)
return cd.State == storagemarket.StorageDealCheckForAcceptance
2021-06-07 11:15:52 +00:00
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
2021-04-21 14:39:41 +00:00
// Create a CAR file from the raw file
carFileDir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-car")
require.NoError(t, err)
carFilePath := filepath.Join(carFileDir, "out.car")
err = s.client.ClientGenCar(s.ctx, api.FileRef{Path: path}, carFilePath)
require.NoError(t, err)
// Import the CAR file on the miner - this is the equivalent to
// transferring the file across the wire in a normal (non-offline) deal
err = s.miner.DealsImportData(s.ctx, *proposalCid, carFilePath)
require.NoError(t, err)
// Wait for the deal to be published
waitDealPublished(t, s.ctx, s.miner, proposalCid)
t.Logf("deal published, retrieving")
// Retrieve the deal
testRetrieval(t, s.ctx, s.client, fcid, &pieceInfo.PieceCID, false, data)
}
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
2020-04-23 17:50:52 +00:00
maddr, err := miner.ActorAddress(ctx)
if err != nil {
t.Fatal(err)
}
addr, err := client.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
2020-03-04 05:03:35 +00:00
deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{
Data: &storagemarket.DataRef{
TransferType: storagemarket.TTGraphsync,
Root: fcid,
},
2020-04-16 21:43:39 +00:00
Wallet: addr,
Miner: maddr,
EpochPrice: types.NewInt(1000000),
DealStartEpoch: startEpoch,
2020-07-28 17:51:47 +00:00
MinBlocksDuration: uint64(build.MinDealDuration),
2020-07-08 18:35:55 +00:00
FastRetrieval: fastRet,
2020-03-04 05:03:35 +00:00
})
2019-11-06 16:10:44 +00:00
if err != nil {
2020-02-23 15:50:36 +00:00
t.Fatalf("%+v", err)
2019-11-06 16:10:44 +00:00
}
2020-04-23 17:50:52 +00:00
return deal
}
2019-11-06 19:44:28 +00:00
2021-05-30 13:13:38 +00:00
func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
loop:
2019-11-06 19:44:28 +00:00
for {
di, err := client.ClientGetDealInfo(ctx, *deal)
if err != nil {
t.Fatal(err)
}
2019-11-06 20:39:07 +00:00
switch di.State {
case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
if noseal {
return
}
2021-05-30 13:13:38 +00:00
if !noSealStart {
startSealingWaiting(t, ctx, miner)
}
case storagemarket.StorageDealProposalRejected:
2019-11-06 20:39:07 +00:00
t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
2019-11-06 20:39:07 +00:00
t.Fatal("deal failed")
case storagemarket.StorageDealError:
2020-04-17 21:23:30 +00:00
t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealActive:
2019-11-06 20:39:07 +00:00
fmt.Println("COMPLETE", di)
2019-11-07 00:18:06 +00:00
break loop
2019-11-06 20:39:07 +00:00
}
2021-05-30 13:13:38 +00:00
mds, err := miner.MarketListIncompleteDeals(ctx)
if err != nil {
t.Fatal(err)
}
var minerState storagemarket.StorageDealStatus
for _, md := range mds {
if md.DealID == di.DealID {
minerState = md.State
break
}
}
fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
2019-11-06 19:44:28 +00:00
time.Sleep(time.Second / 2)
2021-05-30 13:13:38 +00:00
if cb != nil {
cb()
}
2019-11-06 19:44:28 +00:00
}
2020-04-23 17:50:52 +00:00
}
2019-11-07 00:18:06 +00:00
2020-08-06 20:16:55 +00:00
func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) {
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
updates, err := miner.MarketGetDealUpdates(subCtx)
2020-08-06 20:16:55 +00:00
if err != nil {
t.Fatal(err)
}
for {
select {
case <-ctx.Done():
t.Fatal("context timeout")
case di := <-updates:
if deal.Equals(di.ProposalCid) {
switch di.State {
case storagemarket.StorageDealProposalRejected:
t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
t.Fatal("deal failed")
case storagemarket.StorageDealError:
t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
fmt.Println("COMPLETE", di)
return
}
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
2020-08-06 20:16:55 +00:00
}
}
}
}
2020-06-26 15:28:05 +00:00
func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNode) {
snums, err := miner.SectorsList(ctx)
require.NoError(t, err)
for _, snum := range snums {
si, err := miner.SectorsStatus(ctx, snum, false)
2020-06-26 15:28:05 +00:00
require.NoError(t, err)
t.Logf("Sector %d state: %s", snum, si.State)
2020-06-26 15:28:05 +00:00
if si.State == api.SectorState(sealing.WaitDeals) {
require.NoError(t, miner.SectorStartSealing(ctx, snum))
}
}
flushSealingBatches(t, ctx, miner)
2020-06-26 15:28:05 +00:00
}
func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {
2020-07-09 16:29:57 +00:00
offers, err := client.ClientFindData(ctx, fcid, piece)
2019-12-01 21:52:24 +00:00
if err != nil {
t.Fatal(err)
}
if len(offers) < 1 {
t.Fatal("no offers")
}
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rpath) //nolint:errcheck
2019-12-01 21:52:24 +00:00
caddr, err := client.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
ref := &api.FileRef{
Path: filepath.Join(rpath, "ret"),
IsCAR: carExport,
}
updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
2020-10-03 00:45:15 +00:00
if err != nil {
t.Fatal(err)
}
2020-08-11 20:48:56 +00:00
for update := range updates {
if update.Err != "" {
2020-10-03 00:45:15 +00:00
t.Fatalf("retrieval failed: %s", update.Err)
2020-08-11 20:04:00 +00:00
}
2019-12-01 21:52:24 +00:00
}
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
if err != nil {
t.Fatal(err)
}
if carExport {
2020-04-23 17:50:52 +00:00
rdata = extractCarData(t, ctx, rdata, rpath)
}
2019-12-01 21:52:24 +00:00
if !bytes.Equal(rdata, data) {
t.Fatal("wrong data retrieved")
}
2020-04-23 17:50:52 +00:00
}
2019-12-01 21:52:24 +00:00
2020-04-23 17:50:52 +00:00
func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte {
bserv := dstest.Bserv()
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
if err != nil {
t.Fatal(err)
}
b, err := bserv.GetBlock(ctx, ch.Roots[0])
if err != nil {
t.Fatal(err)
}
nd, err := ipld.Decode(b)
if err != nil {
t.Fatal(err)
}
dserv := dag.NewDAGService(bserv)
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
if err != nil {
t.Fatal(err)
}
outPath := filepath.Join(rpath, "retLoadedCAR")
if err := files.WriteTo(fil, outPath); err != nil {
t.Fatal(err)
}
rdata, err = ioutil.ReadFile(outPath)
if err != nil {
t.Fatal(err)
}
return rdata
2019-11-06 16:10:44 +00:00
}
2021-01-19 10:07:28 +00:00
type dealsScaffold struct {
ctx context.Context
client *impl.FullNodeAPI
miner TestStorageNode
blockMiner *BlockMiner
}
func setupOneClientOneMiner(t *testing.T, b APIBuilder, blocktime time.Duration) *dealsScaffold {
n, sn := b(t, OneFull, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
return connectAndStartMining(t, b, blocktime, client, miner)
}
func connectAndStartMining(t *testing.T, b APIBuilder, blocktime time.Duration, client *impl.FullNodeAPI, miner TestStorageNode) *dealsScaffold {
ctx := context.Background()
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
blockMiner := NewBlockMiner(ctx, t, miner, blocktime)
blockMiner.MineBlocks()
return &dealsScaffold{
ctx: ctx,
client: client,
miner: miner,
blockMiner: blockMiner,
}
}