lotus/node/impl/client/client.go

740 lines
19 KiB
Go
Raw Normal View History

2019-09-16 13:46:05 +00:00
package client
2019-08-20 16:48:33 +00:00
import (
"context"
"fmt"
2019-10-23 09:18:22 +00:00
"io"
2019-08-20 16:48:33 +00:00
"os"
"golang.org/x/xerrors"
2019-09-06 22:39:47 +00:00
"github.com/ipfs/go-blockservice"
2019-08-20 16:48:33 +00:00
"github.com/ipfs/go-cid"
2020-07-07 09:12:32 +00:00
"github.com/ipfs/go-cidutil"
2019-08-20 16:48:33 +00:00
chunker "github.com/ipfs/go-ipfs-chunker"
2019-09-06 22:39:47 +00:00
offline "github.com/ipfs/go-ipfs-exchange-offline"
2019-08-20 16:48:33 +00:00
files "github.com/ipfs/go-ipfs-files"
ipld "github.com/ipfs/go-ipld-format"
2019-09-06 22:39:47 +00:00
"github.com/ipfs/go-merkledag"
unixfile "github.com/ipfs/go-unixfs/file"
2019-08-20 16:48:33 +00:00
"github.com/ipfs/go-unixfs/importer/balanced"
ihelper "github.com/ipfs/go-unixfs/importer/helpers"
"github.com/ipld/go-car"
2020-07-07 08:52:19 +00:00
basicnode "github.com/ipld/go-ipld-prime/node/basic"
"github.com/ipld/go-ipld-prime/traversal/selector"
"github.com/ipld/go-ipld-prime/traversal/selector/builder"
2019-08-20 16:48:33 +00:00
"github.com/libp2p/go-libp2p-core/peer"
2020-07-07 09:12:32 +00:00
mh "github.com/multiformats/go-multihash"
2019-08-20 16:48:33 +00:00
"go.uber.org/fx"
2019-09-06 22:39:47 +00:00
"github.com/filecoin-project/go-address"
2020-07-07 08:52:19 +00:00
"github.com/filecoin-project/go-fil-markets/pieceio"
2020-08-05 22:35:59 +00:00
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
2020-06-23 19:22:33 +00:00
rm "github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/shared"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-multistore"
2020-07-31 16:22:04 +00:00
"github.com/filecoin-project/go-padreader"
2020-02-13 00:15:33 +00:00
"github.com/filecoin-project/specs-actors/actors/abi"
2020-04-16 21:43:39 +00:00
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
2020-02-13 00:15:33 +00:00
2020-08-11 20:04:00 +00:00
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/markets/utils"
"github.com/filecoin-project/lotus/node/impl/full"
"github.com/filecoin-project/lotus/node/impl/paych"
2020-07-07 08:52:19 +00:00
"github.com/filecoin-project/lotus/node/modules/dtypes"
2020-07-06 23:39:30 +00:00
"github.com/filecoin-project/lotus/node/repo/importmgr"
2019-08-20 16:48:33 +00:00
)
2020-07-07 09:12:32 +00:00
var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31)
2020-07-07 09:38:22 +00:00
const dealStartBufferHours uint64 = 24
2019-09-16 13:46:05 +00:00
type API struct {
2019-08-20 16:48:33 +00:00
fx.In
2019-09-16 13:46:05 +00:00
full.ChainAPI
full.StateAPI
full.WalletAPI
paych.PaychAPI
2019-08-20 16:48:33 +00:00
SMDealClient storagemarket.StorageClient
2020-06-23 19:22:33 +00:00
RetDiscovery rm.PeerResolver
Retrieval rm.RetrievalClient
2019-09-06 22:39:47 +00:00
Chain *store.ChainStore
2019-08-20 16:48:33 +00:00
2020-07-07 08:52:19 +00:00
Imports dtypes.ClientImportMgr
2020-07-07 12:35:02 +00:00
2020-08-01 07:40:25 +00:00
CombinedBstore dtypes.ClientBlockstore // TODO: try to remove
RetrievalStoreMgr dtypes.ClientRetrievalStoreManager
2019-08-20 16:48:33 +00:00
}
2020-04-30 17:42:16 +00:00
func calcDealExpiration(minDuration uint64, md *miner.DeadlineInfo, startEpoch abi.ChainEpoch) abi.ChainEpoch {
2020-04-21 21:38:26 +00:00
// Make sure we give some time for the miner to seal
2020-04-30 17:42:16 +00:00
minExp := startEpoch + abi.ChainEpoch(minDuration)
2020-04-21 21:38:26 +00:00
// Align on miners ProvingPeriodBoundary
return minExp + miner.WPoStProvingPeriod - (minExp % miner.WPoStProvingPeriod) + (md.PeriodStart % miner.WPoStProvingPeriod) - 1
2020-04-21 21:38:26 +00:00
}
2020-07-07 08:52:19 +00:00
func (a *API) imgr() *importmgr.Mgr {
return a.Imports
}
func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) {
var storeID *multistore.StoreID
if params.Data.TransferType == storagemarket.TTGraphsync {
importIDs := a.imgr().List()
for _, importID := range importIDs {
info, err := a.imgr().Info(importID)
if err != nil {
continue
}
if info.Labels[importmgr.LRootCid] == "" {
continue
}
c, err := cid.Parse(info.Labels[importmgr.LRootCid])
if err != nil {
continue
}
if c.Equals(params.Data.Root) {
storeID = &importID
break
}
}
}
exist, err := a.WalletHas(ctx, params.Wallet)
2019-08-20 16:48:33 +00:00
if err != nil {
return nil, xerrors.Errorf("failed getting addr from wallet: %w", params.Wallet)
}
if !exist {
return nil, xerrors.Errorf("provided address doesn't exist in wallet")
2019-08-20 16:48:33 +00:00
}
mi, err := a.StateMinerInfo(ctx, params.Miner, types.EmptyTSK)
2019-08-20 16:48:33 +00:00
if err != nil {
return nil, xerrors.Errorf("failed getting peer ID: %w", err)
}
md, err := a.StateMinerProvingDeadline(ctx, params.Miner, types.EmptyTSK)
if err != nil {
2020-07-07 22:29:45 +00:00
return nil, xerrors.Errorf("failed getting miner's deadline info: %w", err)
}
rt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize)
2020-02-27 21:45:31 +00:00
if err != nil {
return nil, xerrors.Errorf("bad sector size: %w", err)
}
if uint64(params.Data.PieceSize.Padded()) > uint64(mi.SectorSize) {
return nil, xerrors.New("data doesn't fit in a sector")
}
2020-08-05 20:54:45 +00:00
providerInfo := utils.NewStorageProviderInfo(params.Miner, mi.Worker, mi.SectorSize, mi.PeerId, mi.Multiaddrs)
2020-04-30 17:42:16 +00:00
dealStart := params.DealStartEpoch
if dealStart <= 0 { // unset, or explicitly 'epoch undefined'
ts, err := a.ChainHead(ctx)
if err != nil {
return nil, xerrors.Errorf("failed getting chain height: %w", err)
}
blocksPerHour := 60 * 60 / build.BlockDelaySecs
dealStart = ts.Height() + abi.ChainEpoch(dealStartBufferHours*blocksPerHour)
}
result, err := a.SMDealClient.ProposeStorageDeal(ctx, storagemarket.ProposeStorageDealParams{
Addr: params.Wallet,
Info: &providerInfo,
Data: params.Data,
StartEpoch: dealStart,
EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart),
Price: params.EpochPrice,
Collateral: big.Zero(),
Rt: rt,
FastRetrieval: params.FastRetrieval,
VerifiedDeal: params.VerifiedDeal,
StoreID: storeID,
})
2019-08-20 16:48:33 +00:00
if err != nil {
return nil, xerrors.Errorf("failed to start deal: %w", err)
}
return &result.ProposalCid, nil
2019-08-20 16:48:33 +00:00
}
2019-09-16 13:46:05 +00:00
func (a *API) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) {
deals, err := a.SMDealClient.ListLocalDeals(ctx)
2019-09-10 14:13:24 +00:00
if err != nil {
return nil, err
}
out := make([]api.DealInfo, len(deals))
for k, v := range deals {
out[k] = api.DealInfo{
ProposalCid: v.ProposalCid,
DataRef: v.DataRef,
2019-09-10 14:13:24 +00:00
State: v.State,
Message: v.Message,
2020-01-10 18:01:48 +00:00
Provider: v.Proposal.Provider,
2019-09-10 14:48:54 +00:00
PieceCID: v.Proposal.PieceCID,
2020-02-13 00:15:33 +00:00
Size: uint64(v.Proposal.PieceSize.Unpadded()),
2019-09-10 14:48:54 +00:00
PricePerEpoch: v.Proposal.StoragePricePerEpoch,
2020-02-13 00:15:33 +00:00
Duration: uint64(v.Proposal.Duration()),
DealID: v.DealID,
2019-09-10 14:13:24 +00:00
}
}
return out, nil
}
2019-11-06 19:44:28 +00:00
func (a *API) ClientGetDealInfo(ctx context.Context, d cid.Cid) (*api.DealInfo, error) {
v, err := a.SMDealClient.GetLocalDeal(ctx, d)
2019-11-06 19:44:28 +00:00
if err != nil {
return nil, err
}
2019-11-06 19:44:28 +00:00
return &api.DealInfo{
ProposalCid: v.ProposalCid,
State: v.State,
Message: v.Message,
2020-01-10 18:01:48 +00:00
Provider: v.Proposal.Provider,
PieceCID: v.Proposal.PieceCID,
2020-02-13 00:15:33 +00:00
Size: uint64(v.Proposal.PieceSize.Unpadded()),
PricePerEpoch: v.Proposal.StoragePricePerEpoch,
2020-02-13 00:15:33 +00:00
Duration: uint64(v.Proposal.Duration()),
DealID: v.DealID,
2019-11-06 19:44:28 +00:00
}, nil
}
2019-09-16 13:46:05 +00:00
func (a *API) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) {
2019-08-26 13:45:36 +00:00
// TODO: check if we have the ENTIRE dag
2020-07-07 08:52:19 +00:00
offExch := merkledag.NewDAGService(blockservice.New(a.Imports.Blockstore, offline.Exchange(a.Imports.Blockstore)))
2019-08-26 13:45:36 +00:00
_, err := offExch.Get(ctx, root)
if err == ipld.ErrNotFound {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
2020-07-09 16:29:57 +00:00
func (a *API) ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) {
2019-08-26 13:45:36 +00:00
peers, err := a.RetDiscovery.GetPeers(root)
if err != nil {
return nil, err
}
2020-07-09 16:29:57 +00:00
out := make([]api.QueryOffer, 0, len(peers))
for _, p := range peers {
if piece != nil && !piece.Equals(*p.PieceCID) {
continue
}
out = append(out, a.makeRetrievalQuery(ctx, p, root, piece, rm.QueryParams{}))
2019-08-26 13:45:36 +00:00
}
return out, nil
}
func (a *API) ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) {
mi, err := a.StateMinerInfo(ctx, miner, types.EmptyTSK)
if err != nil {
return api.QueryOffer{}, err
}
2020-06-23 19:22:33 +00:00
rp := rm.RetrievalPeer{
Address: miner,
ID: mi.PeerId,
}
return a.makeRetrievalQuery(ctx, rp, root, piece, rm.QueryParams{}), nil
}
2020-07-09 16:29:57 +00:00
func (a *API) makeRetrievalQuery(ctx context.Context, rp rm.RetrievalPeer, payload cid.Cid, piece *cid.Cid, qp rm.QueryParams) api.QueryOffer {
queryResponse, err := a.Retrieval.Query(ctx, rp, payload, qp)
if err != nil {
2020-08-05 22:35:59 +00:00
return api.QueryOffer{Err: err.Error(), Miner: rp.Address, MinerPeer: rp}
}
var errStr string
switch queryResponse.Status {
2020-06-23 19:22:33 +00:00
case rm.QueryResponseAvailable:
errStr = ""
2020-06-23 19:22:33 +00:00
case rm.QueryResponseUnavailable:
errStr = fmt.Sprintf("retrieval query offer was unavailable: %s", queryResponse.Message)
2020-06-23 19:22:33 +00:00
case rm.QueryResponseError:
errStr = fmt.Sprintf("retrieval query offer errored: %s", queryResponse.Message)
}
return api.QueryOffer{
Root: payload,
2020-07-09 16:29:57 +00:00
Piece: piece,
Size: queryResponse.Size,
MinPrice: queryResponse.PieceRetrievalPrice(),
UnsealPrice: queryResponse.UnsealPrice,
PaymentInterval: queryResponse.MaxPaymentInterval,
PaymentIntervalIncrease: queryResponse.MaxPaymentIntervalIncrease,
Miner: queryResponse.PaymentAddress, // TODO: check
2020-08-05 22:35:59 +00:00
MinerPeer: rp,
Err: errStr,
}
}
2020-07-07 11:45:02 +00:00
func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) {
2020-07-07 08:52:19 +00:00
id, st, err := a.imgr().NewStore()
2020-07-06 23:39:30 +00:00
if err != nil {
2020-07-07 11:45:02 +00:00
return nil, err
2020-07-06 23:39:30 +00:00
}
2020-07-07 09:38:09 +00:00
if err := a.imgr().AddLabel(id, importmgr.LSource, "import"); err != nil {
2020-07-07 11:45:02 +00:00
return nil, err
2020-07-06 23:39:30 +00:00
}
2020-07-07 09:38:09 +00:00
if err := a.imgr().AddLabel(id, importmgr.LFileName, ref.Path); err != nil {
2020-07-07 11:45:02 +00:00
return nil, err
2020-07-07 09:38:09 +00:00
}
2019-08-20 16:48:33 +00:00
2020-07-07 09:38:09 +00:00
nd, err := a.clientImport(ctx, ref, st)
2019-08-20 16:48:33 +00:00
if err != nil {
2020-07-07 11:45:02 +00:00
return nil, err
2019-08-20 16:48:33 +00:00
}
2020-07-07 09:38:09 +00:00
if err := a.imgr().AddLabel(id, importmgr.LRootCid, nd.String()); err != nil {
2020-07-07 11:45:02 +00:00
return nil, err
2020-07-07 09:38:09 +00:00
}
2020-07-07 11:45:02 +00:00
return &api.ImportRes{
Root: nd,
ImportID: id,
}, nil
}
func (a *API) ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error {
2020-07-07 11:45:02 +00:00
return a.imgr().Remove(importID)
2019-08-20 16:48:33 +00:00
}
2019-10-23 09:18:22 +00:00
func (a *API) ClientImportLocal(ctx context.Context, f io.Reader) (cid.Cid, error) {
file := files.NewReaderFile(f)
2020-07-07 08:52:19 +00:00
id, st, err := a.imgr().NewStore()
2020-07-06 23:39:30 +00:00
if err != nil {
2020-07-07 09:12:32 +00:00
return cid.Undef, err
2020-07-06 23:39:30 +00:00
}
2020-07-07 08:52:19 +00:00
if err := a.imgr().AddLabel(id, "source", "import-local"); err != nil {
2020-07-06 23:39:30 +00:00
return cid.Cid{}, err
}
bufferedDS := ipld.NewBufferedDAG(ctx, st.DAG)
2019-10-23 09:18:22 +00:00
2020-07-07 09:12:32 +00:00
prefix, err := merkledag.PrefixForCidVersion(1)
if err != nil {
return cid.Undef, err
}
prefix.MhType = DefaultHashFunction
2019-10-23 09:18:22 +00:00
params := ihelper.DagBuilderParams{
2020-07-07 09:38:22 +00:00
Maxlinks: build.UnixfsLinksPerLevel,
RawLeaves: true,
2020-07-07 09:12:32 +00:00
CidBuilder: cidutil.InlineBuilder{
Builder: prefix,
Limit: 126,
},
2020-07-07 09:38:22 +00:00
Dagserv: bufferedDS,
2019-10-23 09:18:22 +00:00
}
db, err := params.New(chunker.NewSizeSplitter(file, int64(build.UnixfsChunkSize)))
if err != nil {
return cid.Undef, err
}
nd, err := balanced.Layout(db)
if err != nil {
return cid.Undef, err
}
if err := a.imgr().AddLabel(id, "root", nd.Cid().String()); err != nil {
return cid.Cid{}, err
}
2019-10-23 09:18:22 +00:00
return nd.Cid(), bufferedDS.Commit()
}
2019-09-16 13:46:05 +00:00
func (a *API) ClientListImports(ctx context.Context) ([]api.Import, error) {
2020-07-07 08:52:19 +00:00
importIDs := a.imgr().List()
2019-08-20 16:48:33 +00:00
2020-07-07 08:52:19 +00:00
out := make([]api.Import, len(importIDs))
for i, id := range importIDs {
info, err := a.imgr().Info(id)
if err != nil {
out[i] = api.Import{
Key: id,
2020-07-07 09:12:32 +00:00
Err: xerrors.Errorf("getting info: %w", err).Error(),
2020-07-07 08:52:19 +00:00
}
continue
}
2019-08-20 16:48:33 +00:00
2020-07-07 08:52:19 +00:00
ai := api.Import{
Key: id,
Source: info.Labels[importmgr.LSource],
FilePath: info.Labels[importmgr.LFileName],
2019-08-20 16:48:33 +00:00
}
2020-07-07 08:52:19 +00:00
if info.Labels[importmgr.LRootCid] != "" {
c, err := cid.Parse(info.Labels[importmgr.LRootCid])
if err != nil {
2020-07-07 09:12:32 +00:00
ai.Err = err.Error()
2020-07-07 08:52:19 +00:00
} else {
ai.Root = &c
}
}
2020-07-07 08:52:19 +00:00
out[i] = ai
2019-08-20 16:48:33 +00:00
}
2020-07-07 08:52:19 +00:00
return out, nil
2019-08-20 16:48:33 +00:00
}
2019-08-27 18:45:21 +00:00
2020-08-11 20:04:00 +00:00
func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
events := make(chan marketevents.RetrievalEvent)
go a.clientRetrieve(ctx, order, ref, events)
return events, nil
}
func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) {
defer close(events)
finish := func(e error) {
errStr := ""
if e != nil {
errStr = e.Error()
}
events <- marketevents.RetrievalEvent{Err: errStr}
2020-08-11 20:04:00 +00:00
}
2020-08-05 22:35:59 +00:00
if order.MinerPeer.ID == "" {
mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK)
2019-09-16 20:11:17 +00:00
if err != nil {
2020-08-11 20:04:00 +00:00
finish(err)
return
2019-09-16 20:11:17 +00:00
}
2020-08-05 22:35:59 +00:00
order.MinerPeer = retrievalmarket.RetrievalPeer{
ID: mi.PeerId,
Address: order.Miner,
}
2019-09-16 20:11:17 +00:00
}
if order.Size == 0 {
2020-08-11 20:04:00 +00:00
finish(xerrors.Errorf("cannot make retrieval deal for zero bytes"))
return
}
2020-07-07 12:35:02 +00:00
/*id, st, err := a.imgr().NewStore()
2020-07-06 23:39:30 +00:00
if err != nil {
return err
}
2020-07-07 08:52:19 +00:00
if err := a.imgr().AddLabel(id, "source", "retrieval"); err != nil {
2020-07-06 23:39:30 +00:00
return err
2020-07-07 12:35:02 +00:00
}*/
2020-07-06 23:39:30 +00:00
retrievalResult := make(chan error, 1)
2020-06-23 19:22:33 +00:00
unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) {
if state.PayloadCID.Equals(order.Root) {
2020-08-11 20:04:00 +00:00
events <- marketevents.RetrievalEvent{
Event: event,
Status: state.Status,
BytesReceived: state.TotalReceived,
FundsSpent: state.FundsSpent,
}
switch state.Status {
2020-06-23 19:22:33 +00:00
case rm.DealStatusCompleted:
retrievalResult <- nil
2020-06-23 19:22:33 +00:00
case rm.DealStatusRejected:
retrievalResult <- xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message)
case
2020-06-23 19:22:33 +00:00
rm.DealStatusDealNotFound,
rm.DealStatusErrored:
retrievalResult <- xerrors.Errorf("Retrieval Error: %s", state.Message)
}
}
})
ppb := types.BigDiv(order.Total, types.NewInt(order.Size))
params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, shared.AllSelector(), order.Piece, order.UnsealPrice)
if err != nil {
2020-08-11 20:04:00 +00:00
finish(xerrors.Errorf("Error in retrieval params: %s", err))
return
}
store, err := a.RetrievalStoreMgr.NewStore()
if err != nil {
2020-08-11 20:04:00 +00:00
finish(xerrors.Errorf("Error setting up new store: %w", err))
return
}
defer func() {
_ = a.RetrievalStoreMgr.ReleaseStore(store)
}()
_, err = a.Retrieval.Retrieve(
ctx,
order.Root,
params,
order.Total,
2020-08-05 22:35:59 +00:00
order.MinerPeer,
2020-01-10 18:01:48 +00:00
order.Client,
order.Miner,
store.StoreID())
2020-05-20 17:24:42 +00:00
if err != nil {
2020-08-11 20:04:00 +00:00
finish(xerrors.Errorf("Retrieve failed: %w", err))
return
2020-05-20 17:24:42 +00:00
}
2020-08-11 20:04:00 +00:00
select {
case <-ctx.Done():
2020-08-11 20:04:00 +00:00
finish(xerrors.New("Retrieval Timed Out"))
return
case err := <-retrievalResult:
if err != nil {
2020-08-11 20:04:00 +00:00
finish(xerrors.Errorf("Retrieve: %w", err))
return
}
2019-08-27 22:10:23 +00:00
}
unsubscribe()
// If ref is nil, it only fetches the data into the configured blockstore.
if ref == nil {
2020-08-11 20:04:00 +00:00
finish(nil)
return
}
rdag := store.DAGService()
if ref.IsCAR {
f, err := os.OpenFile(ref.Path, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
2020-08-11 20:04:00 +00:00
finish(err)
return
}
err = car.WriteCar(ctx, rdag, []cid.Cid{order.Root}, f)
if err != nil {
2020-08-11 20:04:00 +00:00
finish(err)
return
}
2020-08-11 20:04:00 +00:00
finish(f.Close())
return
}
nd, err := rdag.Get(ctx, order.Root)
2019-08-27 22:10:23 +00:00
if err != nil {
2020-08-11 20:04:00 +00:00
finish(xerrors.Errorf("ClientRetrieve: %w", err))
return
2019-08-27 22:10:23 +00:00
}
file, err := unixfile.NewUnixfsFile(ctx, rdag, nd)
if err != nil {
2020-08-11 20:04:00 +00:00
finish(xerrors.Errorf("ClientRetrieve: %w", err))
return
}
2020-08-11 20:04:00 +00:00
finish(files.WriteTo(file, ref.Path))
return
2019-08-27 18:45:21 +00:00
}
2019-09-13 21:00:36 +00:00
2020-02-13 00:15:33 +00:00
func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) {
2020-08-05 20:54:45 +00:00
mi, err := a.StateMinerInfo(ctx, miner, types.EmptyTSK)
if err != nil {
return nil, xerrors.Errorf("failed getting miner info: %w", err)
}
info := utils.NewStorageProviderInfo(miner, mi.Worker, mi.SectorSize, p, mi.Multiaddrs)
signedAsk, err := a.SMDealClient.GetAsk(ctx, info)
if err != nil {
return nil, err
}
2020-02-13 00:15:33 +00:00
return signedAsk, nil
2019-09-13 21:00:36 +00:00
}
func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) {
// Hard-code the sector size to 32GiB, because:
// - pieceio.GeneratePieceCommitment requires a RegisteredSealProof
// - commP itself is sector-size independent, with rather low probability of that changing
// ( note how the final rust call is identical for every RegSP type )
// https://github.com/filecoin-project/rust-filecoin-proofs-api/blob/v5.0.0/src/seal.rs#L1040-L1050
//
// IF/WHEN this changes in the future we will have to be able to calculate
// "old style" commP, and thus will need to introduce a version switch or similar
arbitrarySectorSize := abi.SectorSize(32 << 30)
rt, err := ffiwrapper.SealProofTypeFromSectorSize(arbitrarySectorSize)
if err != nil {
return nil, xerrors.Errorf("bad sector size: %w", err)
}
rdr, err := os.Open(inpath)
if err != nil {
return nil, err
}
2020-07-23 10:21:13 +00:00
defer rdr.Close()
stat, err := rdr.Stat()
if err != nil {
return nil, err
}
commP, pieceSize, err := pieceio.GeneratePieceCommitment(rt, rdr, uint64(stat.Size()))
if err != nil {
return nil, xerrors.Errorf("computing commP failed: %w", err)
}
return &api.CommPRet{
Root: commP,
Size: pieceSize,
}, nil
}
2020-07-31 16:22:04 +00:00
type lenWriter int64
func (w *lenWriter) Write(p []byte) (n int, err error) {
*w += lenWriter(len(p))
return len(p), nil
}
func (a *API) ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) {
dag := merkledag.NewDAGService(blockservice.New(a.CombinedBstore, offline.Exchange(a.CombinedBstore)))
w := lenWriter(0)
err := car.WriteCar(ctx, dag, []cid.Cid{root}, &w)
if err != nil {
return api.DataSize{}, err
}
up := padreader.PaddedSize(uint64(w))
return api.DataSize{
PayloadSize: int64(w),
PieceSize: up.Padded(),
}, nil
}
func (a *API) ClientGenCar(ctx context.Context, ref api.FileRef, outputPath string) error {
2020-07-07 08:52:19 +00:00
id, st, err := a.imgr().NewStore()
2020-07-06 23:39:30 +00:00
if err != nil {
return err
}
2020-07-07 08:52:19 +00:00
if err := a.imgr().AddLabel(id, "source", "gen-car"); err != nil {
2020-07-06 23:39:30 +00:00
return err
}
2020-07-06 23:39:30 +00:00
bufferedDS := ipld.NewBufferedDAG(ctx, st.DAG)
c, err := a.clientImport(ctx, ref, st)
if err != nil {
return err
}
2020-07-06 23:39:30 +00:00
// TODO: does that defer mean to remove the whole blockstore?
defer bufferedDS.Remove(ctx, c) //nolint:errcheck
ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any)
// entire DAG selector
allSelector := ssb.ExploreRecursive(selector.RecursionLimitNone(),
ssb.ExploreAll(ssb.ExploreRecursiveEdge())).Node()
f, err := os.Create(outputPath)
if err != nil {
return err
}
2020-07-06 23:39:30 +00:00
sc := car.NewSelectiveCar(ctx, st.Bstore, []car.Dag{{Root: c, Selector: allSelector}})
if err = sc.Write(f); err != nil {
return err
}
return f.Close()
}
func (a *API) clientImport(ctx context.Context, ref api.FileRef, store *multistore.Store) (cid.Cid, error) {
f, err := os.Open(ref.Path)
if err != nil {
return cid.Undef, err
}
2020-07-23 10:21:13 +00:00
defer f.Close()
stat, err := f.Stat()
if err != nil {
return cid.Undef, err
}
file, err := files.NewReaderPathFile(ref.Path, f, stat)
if err != nil {
return cid.Undef, err
}
if ref.IsCAR {
2020-07-06 23:39:30 +00:00
var st car.Store
if store.Fstore == nil {
st = store.Bstore
} else {
2020-07-06 23:39:30 +00:00
st = store.Fstore
}
2020-07-06 23:39:30 +00:00
result, err := car.LoadCar(st, file)
if err != nil {
return cid.Undef, err
}
if len(result.Roots) != 1 {
return cid.Undef, xerrors.New("cannot import car with more than one root")
}
return result.Roots[0], nil
}
2020-07-06 23:39:30 +00:00
bufDs := ipld.NewBufferedDAG(ctx, store.DAG)
2020-07-07 09:12:32 +00:00
prefix, err := merkledag.PrefixForCidVersion(1)
if err != nil {
return cid.Undef, err
}
prefix.MhType = DefaultHashFunction
params := ihelper.DagBuilderParams{
2020-07-07 09:38:22 +00:00
Maxlinks: build.UnixfsLinksPerLevel,
RawLeaves: true,
2020-07-07 09:12:32 +00:00
CidBuilder: cidutil.InlineBuilder{
Builder: prefix,
Limit: 126,
},
2020-07-07 09:38:22 +00:00
Dagserv: bufDs,
2020-07-08 21:05:43 +00:00
NoCopy: true,
}
db, err := params.New(chunker.NewSizeSplitter(file, int64(build.UnixfsChunkSize)))
if err != nil {
return cid.Undef, err
}
nd, err := balanced.Layout(db)
if err != nil {
return cid.Undef, err
}
2020-07-06 23:39:30 +00:00
if err := bufDs.Commit(); err != nil {
return cid.Undef, err
}
return nd.Cid(), nil
}