lotus/node/modules/client.go

138 lines
5.4 KiB
Go
Raw Normal View History

2019-08-01 14:19:53 +00:00
package modules
import (
"context"
"path/filepath"
2020-03-18 19:43:06 +00:00
blockstore "github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-merkledag"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/routing"
"go.uber.org/fx"
graphsyncimpl "github.com/filecoin-project/go-data-transfer/impl/graphsync"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery"
retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl"
rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network"
"github.com/filecoin-project/go-fil-markets/storagemarket"
2020-01-14 01:24:04 +00:00
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
2020-03-18 18:57:22 +00:00
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation"
smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network"
2020-01-10 19:55:58 +00:00
"github.com/filecoin-project/go-statestore"
2020-04-28 17:13:46 +00:00
"github.com/filecoin-project/go-storedcounter"
2019-08-06 22:04:21 +00:00
"github.com/ipfs/go-bitswap"
"github.com/ipfs/go-bitswap/network"
2019-08-01 14:19:53 +00:00
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
"github.com/ipfs/go-filestore"
2020-01-14 01:24:04 +00:00
"github.com/filecoin-project/lotus/markets/retrievaladapter"
2020-03-18 18:57:22 +00:00
"github.com/filecoin-project/lotus/node/impl/full"
payapi "github.com/filecoin-project/lotus/node/impl/paych"
"github.com/filecoin-project/lotus/node/modules/dtypes"
2020-01-14 01:24:04 +00:00
"github.com/filecoin-project/lotus/node/modules/helpers"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/paychmgr"
2019-08-01 14:19:53 +00:00
)
func ClientFstore(r repo.LockedRepo) (dtypes.ClientFilestore, error) {
clientds, err := r.Datastore("/client")
if err != nil {
return nil, err
}
blocks := namespace.Wrap(clientds, datastore.NewKey("blocks"))
2020-05-16 01:36:38 +00:00
absPath, err := filepath.Abs(r.Path())
if err != nil {
return nil, err
}
fm := filestore.NewFileManager(clientds, filepath.Dir(absPath))
2019-08-01 14:19:53 +00:00
fm.AllowFiles = true
// TODO: fm.AllowUrls (needs more code in client import)
bs := blockstore.NewBlockstore(blocks)
return filestore.NewFilestore(bs, fm), nil
}
2019-08-26 13:45:36 +00:00
func ClientBlockstore(fstore dtypes.ClientFilestore) dtypes.ClientBlockstore {
return blockstore.NewIdStore((*filestore.Filestore)(fstore))
}
2019-08-06 22:04:21 +00:00
// RegisterClientValidator is an initialization hook that registers the client
// request validator with the data transfer module as the validator for
// StorageDataTransferVoucher types
func RegisterClientValidator(crv dtypes.ClientRequestValidator, dtm dtypes.ClientDataTransfer) {
if err := dtm.RegisterVoucherType(&requestvalidation.StorageDataTransferVoucher{}, (*requestvalidation.UnifiedRequestValidator)(crv)); err != nil {
2019-12-05 05:02:14 +00:00
panic(err)
}
}
// NewClientGraphsyncDataTransfer returns a data transfer manager that just
// uses the clients's Client DAG service for transfers
func NewClientGraphsyncDataTransfer(h host.Host, gs dtypes.Graphsync, ds dtypes.MetadataDS) dtypes.ClientDataTransfer {
sc := storedcounter.New(ds, datastore.NewKey("/datatransfer/client/counter"))
return graphsyncimpl.NewGraphSyncDataTransfer(h, gs, sc)
}
// NewClientDealStore creates a statestore for the client to store its deals
2020-03-18 18:57:22 +00:00
func NewClientDealStore(ds dtypes.ClientDatastore) dtypes.ClientDealStore {
return statestore.New(ds)
}
// NewClientDatastore creates a datastore for the client to store its deals
func NewClientDatastore(ds dtypes.MetadataDS) dtypes.ClientDatastore {
return namespace.Wrap(ds, datastore.NewKey("/deals/client"))
}
feat(datatransfer): implement and extract feat(datatransfer): setup implementation path Sets up a path to implementation, offering both the dagservice implementation and a future graphsync implement, establishes message interfaces and network layer, and isolates the datatransfer module from the app WIP using CBOR encoding for dataxfermsg * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff all message tests passing, some others in datatransfer Some cleanup for PR Cleanup for PR, clarifying and additional comments mod tidy Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters Fix tests Initiate push and pull requests (#536) * add issue link for data TransferID generation * comment out failing but not relevant tests * finish voucher rename from Identifier --> Type tests passing cleanup for PR remove unused fmt import in graphsync_test a better reflection send data transfer response other tests passing feat(datatransfer): milestone 2 infrastructure Setup test path for all tickets for milestone 2 responses alert subscribers when request is not accepted (#607) Graphsync response is scheduled when a valid push request is received (#625) fix(datatransfer): fix tests fix an error with read buffers in tests fix(deps): fix go.sum Feat/dt graphsync pullreqs (#627) * graphsync responses to pull requests Feat/dt initiator cleanup (#645) * ChannelID.To --> ChannelID.Initiator * We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs. * InProgressChannels returns all of impl.channels, currently just for testing * Implements go-data-transfer issue * Some assertions were changed based on the above. * Renamed some variables and added some assertions based on the new understanding * Updated SHA for graphsync module * Updated fakeGraphSync test structs to use new interfaces from new SHA above Techdebt/dt split graphsync impl receiver (#651) * Split up graphsyncImpl and graphsyncReceiver * rename graphsync to utils DTM sends data over graphsync for validated push requests (#665) * create channels when a request is received. register push request hook with graphsync. fix tests. * better NewReaders * use mutex lock around impl.channels access * fix(datatransfer): fix test uncertainty * fix a data race and also don't use random bytes in basic block which can fail * privatize 3 funcs with @hannahhoward Feat/dt gs pullrequests (#693) * Implements DTM Sends Data Over Graphsync For Validated Pull Requests * rename a field in a test struct * refactor a couple of private functions (one was refactored out of existence) Feat/dt subscribe, file Xfer round trip (#720) Implements the rest of Subscriber Is Notified When Request Completed #24: * send a graphsync message within a go func and consume responses until error or transfer is complete. * notify subscribers of results. * Rename datatransfer.Event to EventCode. * datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses * Add extension data to graphsync request hook, gsReq * rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync * use a mutex lock for last transfer ID * obey the linter Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) * update to correct graphsync version, update tests & code to call the new graphsync hooks * getExtensionData returns an empty struct + nil if we can't find our extension * Don't respond with error when we can't find the extension. * Test for same * mod tidy minor fix to go.sum feat(datatransfer): switch to graphsync implementation Move over to real graphsync implementation of data transfer, add constructors for graphsync instances on client and miner side fix(datatransfer): Fix validators Validators were checking payload cid against commP -- which are not the same any more. Added a payloadCid to client deal to maintain the record, fixed validator logic Feat/dt extraction use go-fil-components/datatransfer (#770) * Initial commit after changing to go-fil-components/datatransfer * blow away the datatransfer dir * use go-fil-components master after its PR #1 was merged * go mod tidy use a package updates after rebase with master
2019-10-30 02:42:16 +00:00
// ClientDAG is a DAGService for the ClientBlockstore
2019-08-26 13:45:36 +00:00
func ClientDAG(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.ClientBlockstore, rt routing.Routing, h host.Host) dtypes.ClientDAG {
2019-08-06 22:04:21 +00:00
bitswapNetwork := network.NewFromIpfsHost(h, rt)
bitswapOptions := []bitswap.Option{bitswap.ProvideEnabled(false)}
exch := bitswap.New(helpers.LifecycleCtx(mctx, lc), bitswapNetwork, ibs, bitswapOptions...)
2019-08-06 22:04:21 +00:00
bsvc := blockservice.New(ibs, exch)
2019-08-01 14:19:53 +00:00
dag := merkledag.NewDAGService(bsvc)
lc.Append(fx.Hook{
OnStop: func(_ context.Context) error {
return bsvc.Close()
},
})
return dag
2019-08-01 14:20:50 +00:00
}
feat(datatransfer): implement and extract feat(datatransfer): setup implementation path Sets up a path to implementation, offering both the dagservice implementation and a future graphsync implement, establishes message interfaces and network layer, and isolates the datatransfer module from the app WIP using CBOR encoding for dataxfermsg * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff all message tests passing, some others in datatransfer Some cleanup for PR Cleanup for PR, clarifying and additional comments mod tidy Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters Fix tests Initiate push and pull requests (#536) * add issue link for data TransferID generation * comment out failing but not relevant tests * finish voucher rename from Identifier --> Type tests passing cleanup for PR remove unused fmt import in graphsync_test a better reflection send data transfer response other tests passing feat(datatransfer): milestone 2 infrastructure Setup test path for all tickets for milestone 2 responses alert subscribers when request is not accepted (#607) Graphsync response is scheduled when a valid push request is received (#625) fix(datatransfer): fix tests fix an error with read buffers in tests fix(deps): fix go.sum Feat/dt graphsync pullreqs (#627) * graphsync responses to pull requests Feat/dt initiator cleanup (#645) * ChannelID.To --> ChannelID.Initiator * We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs. * InProgressChannels returns all of impl.channels, currently just for testing * Implements go-data-transfer issue * Some assertions were changed based on the above. * Renamed some variables and added some assertions based on the new understanding * Updated SHA for graphsync module * Updated fakeGraphSync test structs to use new interfaces from new SHA above Techdebt/dt split graphsync impl receiver (#651) * Split up graphsyncImpl and graphsyncReceiver * rename graphsync to utils DTM sends data over graphsync for validated push requests (#665) * create channels when a request is received. register push request hook with graphsync. fix tests. * better NewReaders * use mutex lock around impl.channels access * fix(datatransfer): fix test uncertainty * fix a data race and also don't use random bytes in basic block which can fail * privatize 3 funcs with @hannahhoward Feat/dt gs pullrequests (#693) * Implements DTM Sends Data Over Graphsync For Validated Pull Requests * rename a field in a test struct * refactor a couple of private functions (one was refactored out of existence) Feat/dt subscribe, file Xfer round trip (#720) Implements the rest of Subscriber Is Notified When Request Completed #24: * send a graphsync message within a go func and consume responses until error or transfer is complete. * notify subscribers of results. * Rename datatransfer.Event to EventCode. * datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses * Add extension data to graphsync request hook, gsReq * rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync * use a mutex lock for last transfer ID * obey the linter Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) * update to correct graphsync version, update tests & code to call the new graphsync hooks * getExtensionData returns an empty struct + nil if we can't find our extension * Don't respond with error when we can't find the extension. * Test for same * mod tidy minor fix to go.sum feat(datatransfer): switch to graphsync implementation Move over to real graphsync implementation of data transfer, add constructors for graphsync instances on client and miner side fix(datatransfer): Fix validators Validators were checking payload cid against commP -- which are not the same any more. Added a payloadCid to client deal to maintain the record, fixed validator logic Feat/dt extraction use go-fil-components/datatransfer (#770) * Initial commit after changing to go-fil-components/datatransfer * blow away the datatransfer dir * use go-fil-components master after its PR #1 was merged * go mod tidy use a package updates after rebase with master
2019-10-30 02:42:16 +00:00
func NewClientRequestValidator(deals dtypes.ClientDealStore) dtypes.ClientRequestValidator {
2020-05-19 23:24:59 +00:00
return requestvalidation.NewUnifiedRequestValidator(nil, deals)
}
func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, r repo.LockedRepo, dataTransfer dtypes.ClientDataTransfer, discovery *discovery.Local, deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode) (storagemarket.StorageClient, error) {
net := smnet.NewFromLibp2pHost(h)
c, err := storageimpl.NewClient(net, ibs, dataTransfer, discovery, deals, scn)
if err != nil {
return nil, err
}
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
c.Run(ctx)
return nil
},
OnStop: func(context.Context) error {
c.Stop()
return nil
},
})
return c, nil
}
// RetrievalClient creates a new retrieval client attached to the client blockstore
2020-03-18 18:57:22 +00:00
func RetrievalClient(h host.Host, bs dtypes.ClientBlockstore, pmgr *paychmgr.Manager, payapi payapi.PaychAPI, resolver retrievalmarket.PeerResolver, ds dtypes.MetadataDS, chainapi full.ChainAPI) (retrievalmarket.RetrievalClient, error) {
adapter := retrievaladapter.NewRetrievalClientNode(pmgr, payapi, chainapi)
network := rmnet.NewFromLibp2pHost(h)
2020-02-27 21:45:31 +00:00
sc := storedcounter.New(ds, datastore.NewKey("/retr"))
return retrievalimpl.NewClient(network, bs, adapter, resolver, namespace.Wrap(ds, datastore.NewKey("/retrievals/client")), sc)
}