2019-09-16 13:46:05 +00:00
|
|
|
package client
|
2019-08-20 16:48:33 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
2019-10-23 09:18:22 +00:00
|
|
|
"io"
|
2019-10-23 17:39:14 +00:00
|
|
|
"math"
|
2019-08-20 16:48:33 +00:00
|
|
|
"os"
|
|
|
|
|
2019-11-05 03:42:13 +00:00
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
2019-09-06 22:39:47 +00:00
|
|
|
"github.com/ipfs/go-blockservice"
|
2019-08-20 16:48:33 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
|
|
|
"github.com/ipfs/go-filestore"
|
|
|
|
chunker "github.com/ipfs/go-ipfs-chunker"
|
2019-09-06 22:39:47 +00:00
|
|
|
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
2019-08-20 16:48:33 +00:00
|
|
|
files "github.com/ipfs/go-ipfs-files"
|
|
|
|
ipld "github.com/ipfs/go-ipld-format"
|
2019-09-06 22:39:47 +00:00
|
|
|
"github.com/ipfs/go-merkledag"
|
2019-12-10 04:19:59 +00:00
|
|
|
unixfile "github.com/ipfs/go-unixfs/file"
|
2019-08-20 16:48:33 +00:00
|
|
|
"github.com/ipfs/go-unixfs/importer/balanced"
|
|
|
|
ihelper "github.com/ipfs/go-unixfs/importer/helpers"
|
|
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
|
|
|
"go.uber.org/fx"
|
2019-09-06 22:39:47 +00:00
|
|
|
|
2019-12-19 20:13:17 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
2020-01-10 17:13:12 +00:00
|
|
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
2019-12-17 10:46:39 +00:00
|
|
|
"github.com/filecoin-project/go-fil-markets/shared/tokenamount"
|
|
|
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
|
|
|
"github.com/filecoin-project/lotus/build"
|
|
|
|
"github.com/filecoin-project/lotus/chain/store"
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2020-01-10 18:21:46 +00:00
|
|
|
"github.com/filecoin-project/lotus/markets/utils"
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/impl/full"
|
|
|
|
"github.com/filecoin-project/lotus/node/impl/paych"
|
|
|
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
2019-08-20 16:48:33 +00:00
|
|
|
)
|
|
|
|
|
2019-09-16 13:46:05 +00:00
|
|
|
type API struct {
|
2019-08-20 16:48:33 +00:00
|
|
|
fx.In
|
|
|
|
|
2019-09-16 13:46:05 +00:00
|
|
|
full.ChainAPI
|
|
|
|
full.StateAPI
|
|
|
|
full.WalletAPI
|
|
|
|
paych.PaychAPI
|
2019-08-20 16:48:33 +00:00
|
|
|
|
2019-11-04 19:57:54 +00:00
|
|
|
SMDealClient storagemarket.StorageClient
|
2019-12-10 04:19:59 +00:00
|
|
|
RetDiscovery retrievalmarket.PeerResolver
|
|
|
|
Retrieval retrievalmarket.RetrievalClient
|
2019-09-06 22:39:47 +00:00
|
|
|
Chain *store.ChainStore
|
2019-08-20 16:48:33 +00:00
|
|
|
|
2019-08-26 13:45:36 +00:00
|
|
|
LocalDAG dtypes.ClientDAG
|
|
|
|
Blockstore dtypes.ClientBlockstore
|
|
|
|
Filestore dtypes.ClientFilestore `optional:"true"`
|
2019-08-20 16:48:33 +00:00
|
|
|
}
|
|
|
|
|
2019-12-13 19:15:56 +00:00
|
|
|
func (a *API) ClientStartDeal(ctx context.Context, data cid.Cid, addr address.Address, miner address.Address, epochPrice types.BigInt, blocksDuration uint64) (*cid.Cid, error) {
|
|
|
|
exist, err := a.WalletHas(ctx, addr)
|
2019-08-20 16:48:33 +00:00
|
|
|
if err != nil {
|
2019-12-13 19:15:56 +00:00
|
|
|
return nil, xerrors.Errorf("failed getting addr from wallet: %w", addr)
|
|
|
|
}
|
|
|
|
if !exist {
|
|
|
|
return nil, xerrors.Errorf("provided address doesn't exist in wallet")
|
2019-08-20 16:48:33 +00:00
|
|
|
}
|
|
|
|
|
2019-11-07 14:09:11 +00:00
|
|
|
pid, err := a.StateMinerPeerID(ctx, miner, nil)
|
2019-08-20 16:48:33 +00:00
|
|
|
if err != nil {
|
2019-11-06 06:26:50 +00:00
|
|
|
return nil, xerrors.Errorf("failed getting peer ID: %w", err)
|
|
|
|
}
|
|
|
|
|
2019-11-07 14:09:11 +00:00
|
|
|
mw, err := a.StateMinerWorker(ctx, miner, nil)
|
2019-08-20 16:48:33 +00:00
|
|
|
if err != nil {
|
2019-11-07 14:09:11 +00:00
|
|
|
return nil, xerrors.Errorf("failed getting miner worker: %w", err)
|
2019-08-20 16:48:33 +00:00
|
|
|
}
|
2020-01-10 18:21:46 +00:00
|
|
|
providerInfo := utils.NewStorageProviderInfo(miner, mw, 0, pid)
|
2019-11-04 19:57:54 +00:00
|
|
|
result, err := a.SMDealClient.ProposeStorageDeal(
|
|
|
|
ctx,
|
2020-01-10 18:01:48 +00:00
|
|
|
addr,
|
2019-11-04 19:57:54 +00:00
|
|
|
&providerInfo,
|
|
|
|
data,
|
|
|
|
storagemarket.Epoch(math.MaxUint64),
|
|
|
|
storagemarket.Epoch(blocksDuration),
|
2020-01-10 18:21:46 +00:00
|
|
|
utils.ToSharedTokenAmount(epochPrice),
|
2019-12-17 10:46:39 +00:00
|
|
|
tokenamount.Empty)
|
2019-08-20 16:48:33 +00:00
|
|
|
|
2019-11-06 06:26:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("failed to start deal: %w", err)
|
|
|
|
}
|
|
|
|
|
2019-11-04 19:57:54 +00:00
|
|
|
return &result.ProposalCid, nil
|
2019-08-20 16:48:33 +00:00
|
|
|
}
|
|
|
|
|
2019-09-16 13:46:05 +00:00
|
|
|
func (a *API) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) {
|
2019-11-04 19:57:54 +00:00
|
|
|
deals, err := a.SMDealClient.ListInProgressDeals(ctx)
|
2019-09-10 14:13:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
out := make([]api.DealInfo, len(deals))
|
|
|
|
for k, v := range deals {
|
|
|
|
out[k] = api.DealInfo{
|
|
|
|
ProposalCid: v.ProposalCid,
|
|
|
|
State: v.State,
|
2020-01-10 18:01:48 +00:00
|
|
|
Provider: v.Proposal.Provider,
|
2019-09-10 14:48:54 +00:00
|
|
|
|
|
|
|
PieceRef: v.Proposal.PieceRef,
|
2019-10-22 10:09:36 +00:00
|
|
|
Size: v.Proposal.PieceSize,
|
2019-09-10 14:48:54 +00:00
|
|
|
|
2020-01-10 18:21:46 +00:00
|
|
|
PricePerEpoch: utils.FromSharedTokenAmount(v.Proposal.StoragePricePerEpoch),
|
2019-10-29 10:01:18 +00:00
|
|
|
Duration: v.Proposal.Duration,
|
2019-09-10 14:13:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2019-11-06 19:44:28 +00:00
|
|
|
func (a *API) ClientGetDealInfo(ctx context.Context, d cid.Cid) (*api.DealInfo, error) {
|
2019-11-04 19:57:54 +00:00
|
|
|
v, err := a.SMDealClient.GetInProgressDeal(ctx, d)
|
2019-11-06 19:44:28 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-04 19:57:54 +00:00
|
|
|
|
2019-11-06 19:44:28 +00:00
|
|
|
return &api.DealInfo{
|
|
|
|
ProposalCid: v.ProposalCid,
|
|
|
|
State: v.State,
|
2020-01-10 18:01:48 +00:00
|
|
|
Provider: v.Proposal.Provider,
|
2019-11-06 19:44:28 +00:00
|
|
|
PieceRef: v.Proposal.PieceRef,
|
|
|
|
Size: v.Proposal.PieceSize,
|
2020-01-10 18:21:46 +00:00
|
|
|
PricePerEpoch: utils.FromSharedTokenAmount(v.Proposal.StoragePricePerEpoch),
|
2019-11-06 19:44:28 +00:00
|
|
|
Duration: v.Proposal.Duration,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-09-16 13:46:05 +00:00
|
|
|
func (a *API) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) {
|
2019-08-26 13:45:36 +00:00
|
|
|
// TODO: check if we have the ENTIRE dag
|
|
|
|
|
|
|
|
offExch := merkledag.NewDAGService(blockservice.New(a.Blockstore, offline.Exchange(a.Blockstore)))
|
|
|
|
_, err := offExch.Get(ctx, root)
|
|
|
|
if err == ipld.ErrNotFound {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2019-09-16 13:46:05 +00:00
|
|
|
func (a *API) ClientFindData(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) {
|
2019-08-26 13:45:36 +00:00
|
|
|
peers, err := a.RetDiscovery.GetPeers(root)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-08-26 18:23:11 +00:00
|
|
|
out := make([]api.QueryOffer, len(peers))
|
2019-08-26 13:45:36 +00:00
|
|
|
for k, p := range peers {
|
2020-01-24 20:19:52 +00:00
|
|
|
queryResponse, err := a.Retrieval.Query(ctx, p, root, retrievalmarket.QueryParams{})
|
2019-12-10 04:19:59 +00:00
|
|
|
if err != nil {
|
2020-01-10 18:01:48 +00:00
|
|
|
out[k] = api.QueryOffer{Err: err.Error(), Miner: p.Address, MinerPeerID: p.ID}
|
2019-12-10 04:19:59 +00:00
|
|
|
} else {
|
|
|
|
out[k] = api.QueryOffer{
|
2020-01-25 00:32:17 +00:00
|
|
|
Root: root,
|
|
|
|
Size: queryResponse.Size,
|
|
|
|
MinPrice: utils.FromSharedTokenAmount(queryResponse.PieceRetrievalPrice()),
|
|
|
|
PaymentInterval: queryResponse.MaxPaymentInterval,
|
|
|
|
PaymentIntervalIncrease: queryResponse.MaxPaymentIntervalIncrease,
|
|
|
|
Miner: p.Address, // TODO: check
|
|
|
|
MinerPeerID: p.ID,
|
2019-12-10 04:19:59 +00:00
|
|
|
}
|
|
|
|
}
|
2019-08-26 13:45:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2019-09-16 13:46:05 +00:00
|
|
|
func (a *API) ClientImport(ctx context.Context, path string) (cid.Cid, error) {
|
2019-08-20 16:48:33 +00:00
|
|
|
f, err := os.Open(path)
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
stat, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
|
|
|
|
file, err := files.NewReaderPathFile(path, f, stat)
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
|
|
|
|
bufferedDS := ipld.NewBufferedDAG(ctx, a.LocalDAG)
|
|
|
|
|
|
|
|
params := ihelper.DagBuilderParams{
|
2019-08-27 22:10:23 +00:00
|
|
|
Maxlinks: build.UnixfsLinksPerLevel,
|
2019-08-20 16:48:33 +00:00
|
|
|
RawLeaves: true,
|
|
|
|
CidBuilder: nil,
|
|
|
|
Dagserv: bufferedDS,
|
|
|
|
NoCopy: true,
|
|
|
|
}
|
|
|
|
|
2019-08-27 18:45:21 +00:00
|
|
|
db, err := params.New(chunker.NewSizeSplitter(file, int64(build.UnixfsChunkSize)))
|
2019-08-20 16:48:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
nd, err := balanced.Layout(db)
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
|
2019-11-06 06:26:50 +00:00
|
|
|
if err := bufferedDS.Commit(); err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nd.Cid(), nil
|
2019-08-20 16:48:33 +00:00
|
|
|
}
|
|
|
|
|
2019-10-23 09:18:22 +00:00
|
|
|
func (a *API) ClientImportLocal(ctx context.Context, f io.Reader) (cid.Cid, error) {
|
|
|
|
file := files.NewReaderFile(f)
|
|
|
|
|
|
|
|
bufferedDS := ipld.NewBufferedDAG(ctx, a.LocalDAG)
|
|
|
|
|
|
|
|
params := ihelper.DagBuilderParams{
|
|
|
|
Maxlinks: build.UnixfsLinksPerLevel,
|
|
|
|
RawLeaves: true,
|
|
|
|
CidBuilder: nil,
|
|
|
|
Dagserv: bufferedDS,
|
|
|
|
}
|
|
|
|
|
|
|
|
db, err := params.New(chunker.NewSizeSplitter(file, int64(build.UnixfsChunkSize)))
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
nd, err := balanced.Layout(db)
|
|
|
|
if err != nil {
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nd.Cid(), bufferedDS.Commit()
|
|
|
|
}
|
|
|
|
|
2019-09-16 13:46:05 +00:00
|
|
|
func (a *API) ClientListImports(ctx context.Context) ([]api.Import, error) {
|
2019-08-20 16:48:33 +00:00
|
|
|
if a.Filestore == nil {
|
|
|
|
return nil, errors.New("listing imports is not supported with in-memory dag yet")
|
|
|
|
}
|
|
|
|
next, err := filestore.ListAll(a.Filestore, false)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: make this less very bad by tracking root cids instead of using ListAll
|
|
|
|
|
|
|
|
out := make([]api.Import, 0)
|
|
|
|
for {
|
|
|
|
r := next()
|
|
|
|
if r == nil {
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
if r.Offset != 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
out = append(out, api.Import{
|
|
|
|
Status: r.Status,
|
|
|
|
Key: r.Key,
|
|
|
|
FilePath: r.FilePath,
|
|
|
|
Size: r.Size,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2019-08-27 18:45:21 +00:00
|
|
|
|
2019-09-16 13:46:05 +00:00
|
|
|
func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, path string) error {
|
2019-09-16 20:11:17 +00:00
|
|
|
if order.MinerPeerID == "" {
|
|
|
|
pid, err := a.StateMinerPeerID(ctx, order.Miner, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
order.MinerPeerID = pid
|
|
|
|
}
|
|
|
|
|
2019-12-10 04:19:59 +00:00
|
|
|
retrievalResult := make(chan error, 1)
|
|
|
|
|
|
|
|
unsubscribe := a.Retrieval.SubscribeToEvents(func(event retrievalmarket.ClientEvent, state retrievalmarket.ClientDealState) {
|
2020-01-24 20:19:52 +00:00
|
|
|
if state.PayloadCID.Equals(order.Root) {
|
2019-12-10 04:19:59 +00:00
|
|
|
switch event {
|
|
|
|
case retrievalmarket.ClientEventError:
|
2020-01-24 22:29:31 +00:00
|
|
|
retrievalResult <- xerrors.Errorf("Retrieval Error: %s", state.Message)
|
2019-12-10 04:19:59 +00:00
|
|
|
case retrievalmarket.ClientEventComplete:
|
|
|
|
retrievalResult <- nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
a.Retrieval.Retrieve(
|
2019-12-17 03:17:46 +00:00
|
|
|
ctx,
|
2020-01-24 20:19:52 +00:00
|
|
|
order.Root,
|
2020-01-25 00:32:17 +00:00
|
|
|
retrievalmarket.NewParamsV0(types.BigDiv(order.Total, types.NewInt(order.Size)).Int, order.PaymentInterval, order.PaymentIntervalIncrease),
|
2020-01-10 18:21:46 +00:00
|
|
|
utils.ToSharedTokenAmount(order.Total),
|
2019-12-17 03:17:46 +00:00
|
|
|
order.MinerPeerID,
|
2020-01-10 18:01:48 +00:00
|
|
|
order.Client,
|
|
|
|
order.Miner)
|
2019-12-10 04:19:59 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return xerrors.New("Retrieval Timed Out")
|
|
|
|
case err := <-retrievalResult:
|
|
|
|
if err != nil {
|
2020-01-24 22:29:31 +00:00
|
|
|
return xerrors.Errorf("Retrieve: %w", err)
|
2019-12-10 04:19:59 +00:00
|
|
|
}
|
2019-08-27 22:10:23 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 04:19:59 +00:00
|
|
|
unsubscribe()
|
|
|
|
|
|
|
|
nd, err := a.LocalDAG.Get(ctx, order.Root)
|
2019-08-27 22:10:23 +00:00
|
|
|
if err != nil {
|
2019-12-10 04:19:59 +00:00
|
|
|
return xerrors.Errorf("ClientRetrieve: %w", err)
|
2019-08-27 22:10:23 +00:00
|
|
|
}
|
2019-12-10 04:19:59 +00:00
|
|
|
file, err := unixfile.NewUnixfsFile(ctx, a.LocalDAG, nd)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("ClientRetrieve: %w", err)
|
|
|
|
}
|
|
|
|
return files.WriteTo(file, path)
|
2019-08-27 18:45:21 +00:00
|
|
|
}
|
2019-09-13 21:00:36 +00:00
|
|
|
|
2019-09-16 18:39:18 +00:00
|
|
|
func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*types.SignedStorageAsk, error) {
|
2020-01-10 18:21:46 +00:00
|
|
|
info := utils.NewStorageProviderInfo(miner, address.Undef, 0, p)
|
2019-12-17 10:46:39 +00:00
|
|
|
signedAsk, err := a.SMDealClient.GetAsk(ctx, info)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-10 18:21:46 +00:00
|
|
|
return utils.FromSignedStorageAsk(signedAsk)
|
2019-09-13 21:00:36 +00:00
|
|
|
}
|