Types for storage market Modify deals.Provider to implement storagemarket.StorageProvider Inject storagemarket.StorageProvider Storage Provider interfaces Storage Client interfaces Add ValidatePublishedDeal to ClientNodeAdapter Remove FundManager from client Remove Wallet from client Remove StateManager, Events, Wallet from client Rebasing - Copy types.BigInt, use TokenAmount/BigInt for token amounts - Remove auto-imported log package - Move `checkAskSignature` to a client file. - Plumb contexts through fix(storagemarket): use publish cids Switch back to publish message cids to reduce the dependency surface area
324 lines
8.1 KiB
Go
324 lines
8.1 KiB
Go
package client
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"errors"
|
|
"io"
|
|
"math"
|
|
"os"
|
|
|
|
"golang.org/x/xerrors"
|
|
|
|
"github.com/ipfs/go-blockservice"
|
|
"github.com/ipfs/go-cid"
|
|
"github.com/ipfs/go-filestore"
|
|
chunker "github.com/ipfs/go-ipfs-chunker"
|
|
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
|
files "github.com/ipfs/go-ipfs-files"
|
|
ipld "github.com/ipfs/go-ipld-format"
|
|
"github.com/ipfs/go-merkledag"
|
|
unixfile "github.com/ipfs/go-unixfs/file"
|
|
"github.com/ipfs/go-unixfs/importer/balanced"
|
|
ihelper "github.com/ipfs/go-unixfs/importer/helpers"
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
|
"go.uber.org/fx"
|
|
|
|
"github.com/filecoin-project/go-address"
|
|
"github.com/filecoin-project/lotus/api"
|
|
"github.com/filecoin-project/lotus/build"
|
|
"github.com/filecoin-project/lotus/chain/store"
|
|
"github.com/filecoin-project/lotus/chain/types"
|
|
"github.com/filecoin-project/lotus/node/impl/full"
|
|
"github.com/filecoin-project/lotus/node/impl/paych"
|
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
|
retrievalmarket "github.com/filecoin-project/lotus/retrieval"
|
|
"github.com/filecoin-project/lotus/storagemarket"
|
|
)
|
|
|
|
type API struct {
|
|
fx.In
|
|
|
|
full.ChainAPI
|
|
full.StateAPI
|
|
full.WalletAPI
|
|
paych.PaychAPI
|
|
|
|
SMDealClient storagemarket.StorageClient
|
|
RetDiscovery retrievalmarket.PeerResolver
|
|
Retrieval retrievalmarket.RetrievalClient
|
|
Chain *store.ChainStore
|
|
|
|
LocalDAG dtypes.ClientDAG
|
|
Blockstore dtypes.ClientBlockstore
|
|
Filestore dtypes.ClientFilestore `optional:"true"`
|
|
}
|
|
|
|
func (a *API) ClientStartDeal(ctx context.Context, data cid.Cid, addr address.Address, miner address.Address, epochPrice types.BigInt, blocksDuration uint64) (*cid.Cid, error) {
|
|
exist, err := a.WalletHas(ctx, addr)
|
|
if err != nil {
|
|
return nil, xerrors.Errorf("failed getting addr from wallet: %w", addr)
|
|
}
|
|
if !exist {
|
|
return nil, xerrors.Errorf("provided address doesn't exist in wallet")
|
|
}
|
|
|
|
pid, err := a.StateMinerPeerID(ctx, miner, nil)
|
|
if err != nil {
|
|
return nil, xerrors.Errorf("failed getting peer ID: %w", err)
|
|
}
|
|
|
|
mw, err := a.StateMinerWorker(ctx, miner, nil)
|
|
if err != nil {
|
|
return nil, xerrors.Errorf("failed getting miner worker: %w", err)
|
|
}
|
|
providerInfo := storagemarket.StorageProviderInfo{
|
|
Address: miner,
|
|
Worker: mw,
|
|
PeerID: pid,
|
|
}
|
|
result, err := a.SMDealClient.ProposeStorageDeal(
|
|
ctx,
|
|
addr,
|
|
&providerInfo,
|
|
data,
|
|
storagemarket.Epoch(math.MaxUint64),
|
|
storagemarket.Epoch(blocksDuration),
|
|
storagemarket.TokenAmount(epochPrice),
|
|
storagemarket.TokenAmount(storagemarket.EmptyInt))
|
|
|
|
if err != nil {
|
|
return nil, xerrors.Errorf("failed to start deal: %w", err)
|
|
}
|
|
|
|
return &result.ProposalCid, nil
|
|
}
|
|
|
|
func (a *API) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) {
|
|
deals, err := a.SMDealClient.ListInProgressDeals(ctx)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
out := make([]api.DealInfo, len(deals))
|
|
for k, v := range deals {
|
|
out[k] = api.DealInfo{
|
|
ProposalCid: v.ProposalCid,
|
|
State: v.State,
|
|
Provider: v.Proposal.Provider,
|
|
|
|
PieceRef: v.Proposal.PieceRef,
|
|
Size: v.Proposal.PieceSize,
|
|
|
|
PricePerEpoch: v.Proposal.StoragePricePerEpoch,
|
|
Duration: v.Proposal.Duration,
|
|
}
|
|
}
|
|
|
|
return out, nil
|
|
}
|
|
|
|
func (a *API) ClientGetDealInfo(ctx context.Context, d cid.Cid) (*api.DealInfo, error) {
|
|
v, err := a.SMDealClient.GetInProgressDeal(ctx, d)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &api.DealInfo{
|
|
ProposalCid: v.ProposalCid,
|
|
State: v.State,
|
|
Provider: v.Proposal.Provider,
|
|
PieceRef: v.Proposal.PieceRef,
|
|
Size: v.Proposal.PieceSize,
|
|
PricePerEpoch: v.Proposal.StoragePricePerEpoch,
|
|
Duration: v.Proposal.Duration,
|
|
}, nil
|
|
}
|
|
|
|
func (a *API) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) {
|
|
// TODO: check if we have the ENTIRE dag
|
|
|
|
offExch := merkledag.NewDAGService(blockservice.New(a.Blockstore, offline.Exchange(a.Blockstore)))
|
|
_, err := offExch.Get(ctx, root)
|
|
if err == ipld.ErrNotFound {
|
|
return false, nil
|
|
}
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
return true, nil
|
|
}
|
|
|
|
func (a *API) ClientFindData(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) {
|
|
peers, err := a.RetDiscovery.GetPeers(root)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
out := make([]api.QueryOffer, len(peers))
|
|
for k, p := range peers {
|
|
queryResponse, err := a.Retrieval.Query(ctx, p, root.Bytes(), retrievalmarket.QueryParams{})
|
|
if err != nil {
|
|
out[k] = api.QueryOffer{Err: err.Error(), Miner: p.Address, MinerPeerID: p.ID}
|
|
} else {
|
|
out[k] = api.QueryOffer{
|
|
Root: root,
|
|
Size: queryResponse.Size,
|
|
MinPrice: queryResponse.PieceRetrievalPrice(),
|
|
Miner: p.Address, // TODO: check
|
|
MinerPeerID: p.ID,
|
|
}
|
|
}
|
|
}
|
|
|
|
return out, nil
|
|
}
|
|
|
|
func (a *API) ClientImport(ctx context.Context, path string) (cid.Cid, error) {
|
|
f, err := os.Open(path)
|
|
if err != nil {
|
|
return cid.Undef, err
|
|
}
|
|
stat, err := f.Stat()
|
|
if err != nil {
|
|
return cid.Undef, err
|
|
}
|
|
|
|
file, err := files.NewReaderPathFile(path, f, stat)
|
|
if err != nil {
|
|
return cid.Undef, err
|
|
}
|
|
|
|
bufferedDS := ipld.NewBufferedDAG(ctx, a.LocalDAG)
|
|
|
|
params := ihelper.DagBuilderParams{
|
|
Maxlinks: build.UnixfsLinksPerLevel,
|
|
RawLeaves: true,
|
|
CidBuilder: nil,
|
|
Dagserv: bufferedDS,
|
|
NoCopy: true,
|
|
}
|
|
|
|
db, err := params.New(chunker.NewSizeSplitter(file, int64(build.UnixfsChunkSize)))
|
|
if err != nil {
|
|
return cid.Undef, err
|
|
}
|
|
nd, err := balanced.Layout(db)
|
|
if err != nil {
|
|
return cid.Undef, err
|
|
}
|
|
|
|
if err := bufferedDS.Commit(); err != nil {
|
|
return cid.Undef, err
|
|
}
|
|
|
|
return nd.Cid(), nil
|
|
}
|
|
|
|
func (a *API) ClientImportLocal(ctx context.Context, f io.Reader) (cid.Cid, error) {
|
|
file := files.NewReaderFile(f)
|
|
|
|
bufferedDS := ipld.NewBufferedDAG(ctx, a.LocalDAG)
|
|
|
|
params := ihelper.DagBuilderParams{
|
|
Maxlinks: build.UnixfsLinksPerLevel,
|
|
RawLeaves: true,
|
|
CidBuilder: nil,
|
|
Dagserv: bufferedDS,
|
|
}
|
|
|
|
db, err := params.New(chunker.NewSizeSplitter(file, int64(build.UnixfsChunkSize)))
|
|
if err != nil {
|
|
return cid.Undef, err
|
|
}
|
|
nd, err := balanced.Layout(db)
|
|
if err != nil {
|
|
return cid.Undef, err
|
|
}
|
|
|
|
return nd.Cid(), bufferedDS.Commit()
|
|
}
|
|
|
|
func (a *API) ClientListImports(ctx context.Context) ([]api.Import, error) {
|
|
if a.Filestore == nil {
|
|
return nil, errors.New("listing imports is not supported with in-memory dag yet")
|
|
}
|
|
next, err := filestore.ListAll(a.Filestore, false)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// TODO: make this less very bad by tracking root cids instead of using ListAll
|
|
|
|
out := make([]api.Import, 0)
|
|
for {
|
|
r := next()
|
|
if r == nil {
|
|
return out, nil
|
|
}
|
|
if r.Offset != 0 {
|
|
continue
|
|
}
|
|
out = append(out, api.Import{
|
|
Status: r.Status,
|
|
Key: r.Key,
|
|
FilePath: r.FilePath,
|
|
Size: r.Size,
|
|
})
|
|
}
|
|
}
|
|
|
|
func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, path string) error {
|
|
if order.MinerPeerID == "" {
|
|
pid, err := a.StateMinerPeerID(ctx, order.Miner, nil)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
order.MinerPeerID = pid
|
|
}
|
|
|
|
retrievalResult := make(chan error, 1)
|
|
|
|
unsubscribe := a.Retrieval.SubscribeToEvents(func(event retrievalmarket.ClientEvent, state retrievalmarket.ClientDealState) {
|
|
if bytes.Equal(state.PieceCID, order.Root.Bytes()) {
|
|
switch event {
|
|
case retrievalmarket.ClientEventError:
|
|
retrievalResult <- xerrors.New("Retrieval Error")
|
|
case retrievalmarket.ClientEventComplete:
|
|
retrievalResult <- nil
|
|
}
|
|
}
|
|
})
|
|
|
|
a.Retrieval.Retrieve(
|
|
ctx, order.Root.Bytes(), retrievalmarket.Params{
|
|
PricePerByte: types.BigDiv(order.Total, types.NewInt(order.Size)),
|
|
}, order.Total, order.MinerPeerID, order.Client, order.Miner)
|
|
select {
|
|
case <-ctx.Done():
|
|
return xerrors.New("Retrieval Timed Out")
|
|
case err := <-retrievalResult:
|
|
if err != nil {
|
|
return xerrors.Errorf("RetrieveUnixfs: %w", err)
|
|
}
|
|
}
|
|
|
|
unsubscribe()
|
|
|
|
nd, err := a.LocalDAG.Get(ctx, order.Root)
|
|
if err != nil {
|
|
return xerrors.Errorf("ClientRetrieve: %w", err)
|
|
}
|
|
file, err := unixfile.NewUnixfsFile(ctx, a.LocalDAG, nd)
|
|
if err != nil {
|
|
return xerrors.Errorf("ClientRetrieve: %w", err)
|
|
}
|
|
return files.WriteTo(file, path)
|
|
}
|
|
|
|
func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*types.SignedStorageAsk, error) {
|
|
info := storagemarket.StorageProviderInfo{Address: miner, PeerID: p}
|
|
return a.SMDealClient.GetAsk(ctx, info)
|
|
}
|