lotus/node/impl/client/client.go

413 lines
10 KiB
Go
Raw Normal View History

2019-09-16 13:46:05 +00:00
package client
2019-08-20 16:48:33 +00:00
import (
"context"
"errors"
2019-10-23 09:18:22 +00:00
"io"
2019-08-20 16:48:33 +00:00
"os"
"github.com/filecoin-project/sector-storage/ffiwrapper"
2020-02-13 00:15:33 +00:00
"github.com/filecoin-project/specs-actors/actors/abi/big"
"golang.org/x/xerrors"
2019-09-06 22:39:47 +00:00
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-car"
2019-08-20 16:48:33 +00:00
"github.com/ipfs/go-cid"
"github.com/ipfs/go-filestore"
chunker "github.com/ipfs/go-ipfs-chunker"
2019-09-06 22:39:47 +00:00
offline "github.com/ipfs/go-ipfs-exchange-offline"
2019-08-20 16:48:33 +00:00
files "github.com/ipfs/go-ipfs-files"
ipld "github.com/ipfs/go-ipld-format"
2019-09-06 22:39:47 +00:00
"github.com/ipfs/go-merkledag"
unixfile "github.com/ipfs/go-unixfs/file"
2019-08-20 16:48:33 +00:00
"github.com/ipfs/go-unixfs/importer/balanced"
ihelper "github.com/ipfs/go-unixfs/importer/helpers"
"github.com/libp2p/go-libp2p-core/peer"
"go.uber.org/fx"
2019-09-06 22:39:47 +00:00
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
2020-02-13 00:15:33 +00:00
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/markets/utils"
"github.com/filecoin-project/lotus/node/impl/full"
"github.com/filecoin-project/lotus/node/impl/paych"
"github.com/filecoin-project/lotus/node/modules/dtypes"
2019-08-20 16:48:33 +00:00
)
2020-02-13 00:15:33 +00:00
const dealStartBuffer abi.ChainEpoch = 10000 // TODO: allow setting
2019-09-16 13:46:05 +00:00
type API struct {
2019-08-20 16:48:33 +00:00
fx.In
2019-09-16 13:46:05 +00:00
full.ChainAPI
full.StateAPI
full.WalletAPI
paych.PaychAPI
2019-08-20 16:48:33 +00:00
SMDealClient storagemarket.StorageClient
RetDiscovery retrievalmarket.PeerResolver
Retrieval retrievalmarket.RetrievalClient
2019-09-06 22:39:47 +00:00
Chain *store.ChainStore
2019-08-20 16:48:33 +00:00
2019-08-26 13:45:36 +00:00
LocalDAG dtypes.ClientDAG
Blockstore dtypes.ClientBlockstore
Filestore dtypes.ClientFilestore `optional:"true"`
2019-08-20 16:48:33 +00:00
}
func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) {
exist, err := a.WalletHas(ctx, params.Wallet)
2019-08-20 16:48:33 +00:00
if err != nil {
return nil, xerrors.Errorf("failed getting addr from wallet: %w", params.Wallet)
}
if !exist {
return nil, xerrors.Errorf("provided address doesn't exist in wallet")
2019-08-20 16:48:33 +00:00
}
pid, err := a.StateMinerPeerID(ctx, params.Miner, types.EmptyTSK)
2019-08-20 16:48:33 +00:00
if err != nil {
return nil, xerrors.Errorf("failed getting peer ID: %w", err)
}
mw, err := a.StateMinerWorker(ctx, params.Miner, types.EmptyTSK)
2019-08-20 16:48:33 +00:00
if err != nil {
return nil, xerrors.Errorf("failed getting miner worker: %w", err)
2019-08-20 16:48:33 +00:00
}
2020-02-27 21:45:31 +00:00
ssize, err := a.StateMinerSectorSize(ctx, params.Miner, types.EmptyTSK)
2020-02-27 21:45:31 +00:00
if err != nil {
return nil, xerrors.Errorf("failed checking miners sector size: %w", err)
}
rt, _, err := ffiwrapper.ProofTypeFromSectorSize(ssize)
2020-02-27 21:45:31 +00:00
if err != nil {
return nil, xerrors.Errorf("bad sector size: %w", err)
}
providerInfo := utils.NewStorageProviderInfo(params.Miner, mw, 0, pid)
ts, err := a.ChainHead(ctx)
if err != nil {
return nil, xerrors.Errorf("failed getting chain height: %w", err)
}
result, err := a.SMDealClient.ProposeStorageDeal(
ctx,
params.Wallet,
&providerInfo,
params.Data,
ts.Height()+dealStartBuffer,
ts.Height()+dealStartBuffer+abi.ChainEpoch(params.BlocksDuration),
params.EpochPrice,
2020-02-27 21:45:31 +00:00
big.Zero(),
rt,
)
2019-08-20 16:48:33 +00:00
if err != nil {
return nil, xerrors.Errorf("failed to start deal: %w", err)
}
return &result.ProposalCid, nil
2019-08-20 16:48:33 +00:00
}
2019-09-16 13:46:05 +00:00
func (a *API) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) {
deals, err := a.SMDealClient.ListInProgressDeals(ctx)
2019-09-10 14:13:24 +00:00
if err != nil {
return nil, err
}
out := make([]api.DealInfo, len(deals))
for k, v := range deals {
out[k] = api.DealInfo{
ProposalCid: v.ProposalCid,
State: v.State,
Message: v.Message,
2020-01-10 18:01:48 +00:00
Provider: v.Proposal.Provider,
2019-09-10 14:48:54 +00:00
PieceCID: v.Proposal.PieceCID,
2020-02-13 00:15:33 +00:00
Size: uint64(v.Proposal.PieceSize.Unpadded()),
2019-09-10 14:48:54 +00:00
PricePerEpoch: v.Proposal.StoragePricePerEpoch,
2020-02-13 00:15:33 +00:00
Duration: uint64(v.Proposal.Duration()),
DealID: v.DealID,
2019-09-10 14:13:24 +00:00
}
}
return out, nil
}
2019-11-06 19:44:28 +00:00
func (a *API) ClientGetDealInfo(ctx context.Context, d cid.Cid) (*api.DealInfo, error) {
v, err := a.SMDealClient.GetInProgressDeal(ctx, d)
2019-11-06 19:44:28 +00:00
if err != nil {
return nil, err
}
2019-11-06 19:44:28 +00:00
return &api.DealInfo{
ProposalCid: v.ProposalCid,
State: v.State,
Message: v.Message,
2020-01-10 18:01:48 +00:00
Provider: v.Proposal.Provider,
PieceCID: v.Proposal.PieceCID,
2020-02-13 00:15:33 +00:00
Size: uint64(v.Proposal.PieceSize.Unpadded()),
PricePerEpoch: v.Proposal.StoragePricePerEpoch,
2020-02-13 00:15:33 +00:00
Duration: uint64(v.Proposal.Duration()),
DealID: v.DealID,
2019-11-06 19:44:28 +00:00
}, nil
}
2019-09-16 13:46:05 +00:00
func (a *API) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) {
2019-08-26 13:45:36 +00:00
// TODO: check if we have the ENTIRE dag
offExch := merkledag.NewDAGService(blockservice.New(a.Blockstore, offline.Exchange(a.Blockstore)))
_, err := offExch.Get(ctx, root)
if err == ipld.ErrNotFound {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
2019-09-16 13:46:05 +00:00
func (a *API) ClientFindData(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) {
2019-08-26 13:45:36 +00:00
peers, err := a.RetDiscovery.GetPeers(root)
if err != nil {
return nil, err
}
2019-08-26 18:23:11 +00:00
out := make([]api.QueryOffer, len(peers))
2019-08-26 13:45:36 +00:00
for k, p := range peers {
queryResponse, err := a.Retrieval.Query(ctx, p, root, retrievalmarket.QueryParams{})
if err != nil {
2020-01-10 18:01:48 +00:00
out[k] = api.QueryOffer{Err: err.Error(), Miner: p.Address, MinerPeerID: p.ID}
} else {
out[k] = api.QueryOffer{
Root: root,
Size: queryResponse.Size,
MinPrice: queryResponse.PieceRetrievalPrice(),
PaymentInterval: queryResponse.MaxPaymentInterval,
PaymentIntervalIncrease: queryResponse.MaxPaymentIntervalIncrease,
Miner: queryResponse.PaymentAddress, // TODO: check
MinerPeerID: p.ID,
}
}
2019-08-26 13:45:36 +00:00
}
return out, nil
}
func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (cid.Cid, error) {
f, err := os.Open(ref.Path)
2019-08-20 16:48:33 +00:00
if err != nil {
return cid.Undef, err
}
stat, err := f.Stat()
if err != nil {
return cid.Undef, err
}
file, err := files.NewReaderPathFile(ref.Path, f, stat)
if err != nil {
return cid.Undef, err
}
if ref.IsCAR {
var store car.Store
if a.Filestore == nil {
store = a.Blockstore
} else {
store = (*filestore.Filestore)(a.Filestore)
}
result, err := car.LoadCar(store, file)
if err != nil {
return cid.Undef, err
}
if len(result.Roots) != 1 {
return cid.Undef, xerrors.New("cannot import car with more than one root")
}
return result.Roots[0], nil
}
2019-08-20 16:48:33 +00:00
bufferedDS := ipld.NewBufferedDAG(ctx, a.LocalDAG)
params := ihelper.DagBuilderParams{
2019-08-27 22:10:23 +00:00
Maxlinks: build.UnixfsLinksPerLevel,
2019-08-20 16:48:33 +00:00
RawLeaves: true,
CidBuilder: nil,
Dagserv: bufferedDS,
NoCopy: true,
}
2019-08-27 18:45:21 +00:00
db, err := params.New(chunker.NewSizeSplitter(file, int64(build.UnixfsChunkSize)))
2019-08-20 16:48:33 +00:00
if err != nil {
return cid.Undef, err
}
nd, err := balanced.Layout(db)
if err != nil {
return cid.Undef, err
}
if err := bufferedDS.Commit(); err != nil {
return cid.Undef, err
}
return nd.Cid(), nil
2019-08-20 16:48:33 +00:00
}
2019-10-23 09:18:22 +00:00
func (a *API) ClientImportLocal(ctx context.Context, f io.Reader) (cid.Cid, error) {
file := files.NewReaderFile(f)
bufferedDS := ipld.NewBufferedDAG(ctx, a.LocalDAG)
params := ihelper.DagBuilderParams{
Maxlinks: build.UnixfsLinksPerLevel,
RawLeaves: true,
CidBuilder: nil,
Dagserv: bufferedDS,
}
db, err := params.New(chunker.NewSizeSplitter(file, int64(build.UnixfsChunkSize)))
if err != nil {
return cid.Undef, err
}
nd, err := balanced.Layout(db)
if err != nil {
return cid.Undef, err
}
return nd.Cid(), bufferedDS.Commit()
}
2019-09-16 13:46:05 +00:00
func (a *API) ClientListImports(ctx context.Context) ([]api.Import, error) {
2019-08-20 16:48:33 +00:00
if a.Filestore == nil {
return nil, errors.New("listing imports is not supported with in-memory dag yet")
}
next, err := filestore.ListAll(a.Filestore, false)
if err != nil {
return nil, err
}
// TODO: make this less very bad by tracking root cids instead of using ListAll
out := make([]api.Import, 0)
lowest := make([]uint64, 0)
2019-08-20 16:48:33 +00:00
for {
r := next()
if r == nil {
return out, nil
}
matched := false
for i := range out {
if out[i].FilePath == r.FilePath {
matched = true
if lowest[i] > r.Offset {
lowest[i] = r.Offset
out[i] = api.Import{
Status: r.Status,
Key: r.Key,
FilePath: r.FilePath,
Size: r.Size,
}
}
break
}
}
if !matched {
out = append(out, api.Import{
Status: r.Status,
Key: r.Key,
FilePath: r.FilePath,
Size: r.Size,
})
lowest = append(lowest, r.Offset)
2019-08-20 16:48:33 +00:00
}
}
}
2019-08-27 18:45:21 +00:00
func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref api.FileRef) error {
2019-09-16 20:11:17 +00:00
if order.MinerPeerID == "" {
pid, err := a.StateMinerPeerID(ctx, order.Miner, types.EmptyTSK)
2019-09-16 20:11:17 +00:00
if err != nil {
return err
}
order.MinerPeerID = pid
}
if order.Size == 0 {
return xerrors.Errorf("cannot make retrieval deal for zero bytes")
}
retrievalResult := make(chan error, 1)
unsubscribe := a.Retrieval.SubscribeToEvents(func(event retrievalmarket.ClientEvent, state retrievalmarket.ClientDealState) {
if state.PayloadCID.Equals(order.Root) {
switch state.Status {
case retrievalmarket.DealStatusFailed, retrievalmarket.DealStatusErrored:
retrievalResult <- xerrors.Errorf("Retrieval Error: %s", state.Message)
case retrievalmarket.DealStatusCompleted:
retrievalResult <- nil
}
}
})
ppb := types.BigDiv(order.Total, types.NewInt(order.Size))
a.Retrieval.Retrieve(
ctx,
order.Root,
retrievalmarket.NewParamsV0(ppb, order.PaymentInterval, order.PaymentIntervalIncrease),
order.Total,
order.MinerPeerID,
2020-01-10 18:01:48 +00:00
order.Client,
order.Miner)
select {
case <-ctx.Done():
return xerrors.New("Retrieval Timed Out")
case err := <-retrievalResult:
if err != nil {
return xerrors.Errorf("RetrieveUnixfs: %w", err)
}
2019-08-27 22:10:23 +00:00
}
unsubscribe()
if ref.IsCAR {
f, err := os.OpenFile(ref.Path, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
err = car.WriteCar(ctx, a.LocalDAG, []cid.Cid{order.Root}, f)
if err != nil {
return err
}
return f.Close()
}
nd, err := a.LocalDAG.Get(ctx, order.Root)
2019-08-27 22:10:23 +00:00
if err != nil {
return xerrors.Errorf("ClientRetrieve: %w", err)
2019-08-27 22:10:23 +00:00
}
file, err := unixfile.NewUnixfsFile(ctx, a.LocalDAG, nd)
if err != nil {
return xerrors.Errorf("ClientRetrieve: %w", err)
}
return files.WriteTo(file, ref.Path)
2019-08-27 18:45:21 +00:00
}
2019-09-13 21:00:36 +00:00
2020-02-13 00:15:33 +00:00
func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) {
info := utils.NewStorageProviderInfo(miner, address.Undef, 0, p)
signedAsk, err := a.SMDealClient.GetAsk(ctx, info)
if err != nil {
return nil, err
}
2020-02-13 00:15:33 +00:00
return signedAsk, nil
2019-09-13 21:00:36 +00:00
}