lotus/node/modules/storageminer.go

323 lines
10 KiB
Go
Raw Normal View History

2019-08-01 14:19:53 +00:00
package modules
import (
"context"
"math"
"reflect"
2019-08-08 04:24:49 +00:00
"github.com/filecoin-project/go-address"
dtgraphsync "github.com/filecoin-project/go-data-transfer/impl/graphsync"
piecefilestore "github.com/filecoin-project/go-fil-markets/filestore"
"github.com/filecoin-project/go-fil-markets/piecestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl"
rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network"
"github.com/filecoin-project/go-fil-markets/storagemarket"
deals "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network"
2020-01-02 19:08:49 +00:00
paramfetch "github.com/filecoin-project/go-paramfetch"
"github.com/filecoin-project/go-sectorbuilder"
2020-01-29 18:10:41 +00:00
"github.com/filecoin-project/go-sectorbuilder/fs"
"github.com/filecoin-project/go-statestore"
2019-08-06 22:04:21 +00:00
"github.com/ipfs/go-bitswap"
"github.com/ipfs/go-bitswap/network"
"github.com/ipfs/go-blockservice"
2019-08-01 14:19:53 +00:00
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
feat(datatransfer): implement and extract feat(datatransfer): setup implementation path Sets up a path to implementation, offering both the dagservice implementation and a future graphsync implement, establishes message interfaces and network layer, and isolates the datatransfer module from the app WIP using CBOR encoding for dataxfermsg * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff all message tests passing, some others in datatransfer Some cleanup for PR Cleanup for PR, clarifying and additional comments mod tidy Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters Fix tests Initiate push and pull requests (#536) * add issue link for data TransferID generation * comment out failing but not relevant tests * finish voucher rename from Identifier --> Type tests passing cleanup for PR remove unused fmt import in graphsync_test a better reflection send data transfer response other tests passing feat(datatransfer): milestone 2 infrastructure Setup test path for all tickets for milestone 2 responses alert subscribers when request is not accepted (#607) Graphsync response is scheduled when a valid push request is received (#625) fix(datatransfer): fix tests fix an error with read buffers in tests fix(deps): fix go.sum Feat/dt graphsync pullreqs (#627) * graphsync responses to pull requests Feat/dt initiator cleanup (#645) * ChannelID.To --> ChannelID.Initiator * We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs. * InProgressChannels returns all of impl.channels, currently just for testing * Implements go-data-transfer issue * Some assertions were changed based on the above. * Renamed some variables and added some assertions based on the new understanding * Updated SHA for graphsync module * Updated fakeGraphSync test structs to use new interfaces from new SHA above Techdebt/dt split graphsync impl receiver (#651) * Split up graphsyncImpl and graphsyncReceiver * rename graphsync to utils DTM sends data over graphsync for validated push requests (#665) * create channels when a request is received. register push request hook with graphsync. fix tests. * better NewReaders * use mutex lock around impl.channels access * fix(datatransfer): fix test uncertainty * fix a data race and also don't use random bytes in basic block which can fail * privatize 3 funcs with @hannahhoward Feat/dt gs pullrequests (#693) * Implements DTM Sends Data Over Graphsync For Validated Pull Requests * rename a field in a test struct * refactor a couple of private functions (one was refactored out of existence) Feat/dt subscribe, file Xfer round trip (#720) Implements the rest of Subscriber Is Notified When Request Completed #24: * send a graphsync message within a go func and consume responses until error or transfer is complete. * notify subscribers of results. * Rename datatransfer.Event to EventCode. * datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses * Add extension data to graphsync request hook, gsReq * rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync * use a mutex lock for last transfer ID * obey the linter Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) * update to correct graphsync version, update tests & code to call the new graphsync hooks * getExtensionData returns an empty struct + nil if we can't find our extension * Don't respond with error when we can't find the extension. * Test for same * mod tidy minor fix to go.sum feat(datatransfer): switch to graphsync implementation Move over to real graphsync implementation of data transfer, add constructors for graphsync instances on client and miner side fix(datatransfer): Fix validators Validators were checking payload cid against commP -- which are not the same any more. Added a payloadCid to client deal to maintain the record, fixed validator logic Feat/dt extraction use go-fil-components/datatransfer (#770) * Initial commit after changing to go-fil-components/datatransfer * blow away the datatransfer dir * use go-fil-components master after its PR #1 was merged * go mod tidy use a package updates after rebase with master
2019-10-30 02:42:16 +00:00
graphsync "github.com/ipfs/go-graphsync/impl"
"github.com/ipfs/go-graphsync/ipldbridge"
gsnet "github.com/ipfs/go-graphsync/network"
"github.com/ipfs/go-graphsync/storeutil"
2019-08-06 22:04:21 +00:00
blockstore "github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-merkledag"
2019-08-01 14:19:53 +00:00
"github.com/libp2p/go-libp2p-core/host"
2019-08-29 15:09:34 +00:00
"github.com/libp2p/go-libp2p-core/routing"
2019-08-01 14:19:53 +00:00
"github.com/mitchellh/go-homedir"
"go.uber.org/fx"
2019-10-27 08:56:53 +00:00
"golang.org/x/xerrors"
2019-08-01 14:19:53 +00:00
"github.com/filecoin-project/lotus/api"
2019-10-27 08:56:53 +00:00
"github.com/filecoin-project/lotus/build"
2019-11-25 04:45:13 +00:00
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/markets/retrievaladapter"
2019-11-25 04:45:13 +00:00
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/helpers"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage"
"github.com/filecoin-project/lotus/storage/sealing"
2019-08-01 14:19:53 +00:00
)
func minerAddrFromDS(ds dtypes.MetadataDS) (address.Address, error) {
maddrb, err := ds.Get(datastore.NewKey("miner-address"))
if err != nil {
return address.Undef, err
}
return address.NewFromBytes(maddrb)
}
2019-12-04 19:44:15 +00:00
func GetParams(sbc *sectorbuilder.Config) error {
if err := paramfetch.GetParams(build.ParametersJson(), uint64(sbc.SectorSize)); err != nil {
2019-12-04 19:44:15 +00:00
return xerrors.Errorf("fetching proof parameters: %w", err)
}
return nil
}
2020-01-29 18:10:41 +00:00
func SectorBuilderConfig(storage []fs.PathConfig, threads uint, noprecommit, nocommit bool) func(dtypes.MetadataDS, api.FullNode) (*sectorbuilder.Config, error) {
return func(ds dtypes.MetadataDS, api api.FullNode) (*sectorbuilder.Config, error) {
minerAddr, err := minerAddrFromDS(ds)
if err != nil {
return nil, err
}
ssize, err := api.StateMinerSectorSize(context.TODO(), minerAddr, nil)
if err != nil {
return nil, err
}
2020-01-29 18:10:41 +00:00
for i := range storage {
storage[i].Path, err = homedir.Expand(storage[i].Path)
if err != nil {
return nil, err
}
2019-08-01 14:19:53 +00:00
}
if threads > math.MaxUint8 {
return nil, xerrors.Errorf("too many sectorbuilder threads specified: %d, max allowed: %d", threads, math.MaxUint8)
}
sb := &sectorbuilder.Config{
Miner: minerAddr,
SectorSize: ssize,
WorkerThreads: uint8(threads),
NoPreCommit: noprecommit,
NoCommit: nocommit,
2020-01-29 18:10:41 +00:00
Paths: storage,
2019-08-01 14:19:53 +00:00
}
return sb, nil
}
}
func StorageMiner(mctx helpers.MetricsCtx, lc fx.Lifecycle, api api.FullNode, h host.Host, ds dtypes.MetadataDS, sb sectorbuilder.Interface, tktFn sealing.TicketFn) (*storage.Miner, error) {
maddr, err := minerAddrFromDS(ds)
2019-08-01 14:19:53 +00:00
if err != nil {
return nil, err
}
2020-02-03 23:46:24 +00:00
ctx := helpers.LifecycleCtx(mctx, lc)
worker, err := api.StateMinerWorker(ctx, maddr, nil)
2019-08-01 14:19:53 +00:00
if err != nil {
return nil, err
}
fps := storage.NewFPoStScheduler(api, sb, maddr, worker)
sm, err := storage.NewMiner(api, maddr, worker, h, ds, sb, tktFn)
if err != nil {
return nil, err
}
2019-08-01 14:19:53 +00:00
lc.Append(fx.Hook{
OnStart: func(context.Context) error {
go fps.Run(ctx)
2019-08-01 14:19:53 +00:00
return sm.Run(ctx)
},
2019-11-01 13:58:48 +00:00
OnStop: sm.Stop,
2019-08-01 14:19:53 +00:00
})
return sm, nil
}
2019-08-02 16:25:10 +00:00
func HandleRetrieval(host host.Host, lc fx.Lifecycle, m retrievalmarket.RetrievalProvider) {
2019-08-26 13:45:36 +00:00
lc.Append(fx.Hook{
OnStart: func(context.Context) error {
m.Start()
return nil
},
OnStop: func(context.Context) error {
m.Stop()
2019-08-26 13:45:36 +00:00
return nil
},
})
}
func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h storagemarket.StorageProvider) {
2019-08-06 23:08:34 +00:00
ctx := helpers.LifecycleCtx(mctx, lc)
lc.Append(fx.Hook{
OnStart: func(context.Context) error {
h.Start(ctx)
2019-08-06 23:08:34 +00:00
return nil
},
OnStop: func(context.Context) error {
h.Stop()
return nil
},
})
2019-08-02 16:25:10 +00:00
}
2019-08-06 22:04:21 +00:00
// RegisterProviderValidator is an initialization hook that registers the provider
// request validator with the data transfer module as the validator for
// StorageDataTransferVoucher types
func RegisterProviderValidator(mrv *deals.ProviderRequestValidator, dtm dtypes.ProviderDataTransfer) {
feat(datatransfer): implement and extract feat(datatransfer): setup implementation path Sets up a path to implementation, offering both the dagservice implementation and a future graphsync implement, establishes message interfaces and network layer, and isolates the datatransfer module from the app WIP using CBOR encoding for dataxfermsg * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff all message tests passing, some others in datatransfer Some cleanup for PR Cleanup for PR, clarifying and additional comments mod tidy Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters Fix tests Initiate push and pull requests (#536) * add issue link for data TransferID generation * comment out failing but not relevant tests * finish voucher rename from Identifier --> Type tests passing cleanup for PR remove unused fmt import in graphsync_test a better reflection send data transfer response other tests passing feat(datatransfer): milestone 2 infrastructure Setup test path for all tickets for milestone 2 responses alert subscribers when request is not accepted (#607) Graphsync response is scheduled when a valid push request is received (#625) fix(datatransfer): fix tests fix an error with read buffers in tests fix(deps): fix go.sum Feat/dt graphsync pullreqs (#627) * graphsync responses to pull requests Feat/dt initiator cleanup (#645) * ChannelID.To --> ChannelID.Initiator * We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs. * InProgressChannels returns all of impl.channels, currently just for testing * Implements go-data-transfer issue * Some assertions were changed based on the above. * Renamed some variables and added some assertions based on the new understanding * Updated SHA for graphsync module * Updated fakeGraphSync test structs to use new interfaces from new SHA above Techdebt/dt split graphsync impl receiver (#651) * Split up graphsyncImpl and graphsyncReceiver * rename graphsync to utils DTM sends data over graphsync for validated push requests (#665) * create channels when a request is received. register push request hook with graphsync. fix tests. * better NewReaders * use mutex lock around impl.channels access * fix(datatransfer): fix test uncertainty * fix a data race and also don't use random bytes in basic block which can fail * privatize 3 funcs with @hannahhoward Feat/dt gs pullrequests (#693) * Implements DTM Sends Data Over Graphsync For Validated Pull Requests * rename a field in a test struct * refactor a couple of private functions (one was refactored out of existence) Feat/dt subscribe, file Xfer round trip (#720) Implements the rest of Subscriber Is Notified When Request Completed #24: * send a graphsync message within a go func and consume responses until error or transfer is complete. * notify subscribers of results. * Rename datatransfer.Event to EventCode. * datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses * Add extension data to graphsync request hook, gsReq * rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync * use a mutex lock for last transfer ID * obey the linter Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) * update to correct graphsync version, update tests & code to call the new graphsync hooks * getExtensionData returns an empty struct + nil if we can't find our extension * Don't respond with error when we can't find the extension. * Test for same * mod tidy minor fix to go.sum feat(datatransfer): switch to graphsync implementation Move over to real graphsync implementation of data transfer, add constructors for graphsync instances on client and miner side fix(datatransfer): Fix validators Validators were checking payload cid against commP -- which are not the same any more. Added a payloadCid to client deal to maintain the record, fixed validator logic Feat/dt extraction use go-fil-components/datatransfer (#770) * Initial commit after changing to go-fil-components/datatransfer * blow away the datatransfer dir * use go-fil-components master after its PR #1 was merged * go mod tidy use a package updates after rebase with master
2019-10-30 02:42:16 +00:00
if err := dtm.RegisterVoucherType(reflect.TypeOf(&deals.StorageDataTransferVoucher{}), mrv); err != nil {
2019-12-05 05:02:14 +00:00
panic(err)
}
}
// NewProviderDAGServiceDataTransfer returns a data transfer manager that just
// uses the provider's Staging DAG service for transfers
feat(datatransfer): implement and extract feat(datatransfer): setup implementation path Sets up a path to implementation, offering both the dagservice implementation and a future graphsync implement, establishes message interfaces and network layer, and isolates the datatransfer module from the app WIP using CBOR encoding for dataxfermsg * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff all message tests passing, some others in datatransfer Some cleanup for PR Cleanup for PR, clarifying and additional comments mod tidy Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters Fix tests Initiate push and pull requests (#536) * add issue link for data TransferID generation * comment out failing but not relevant tests * finish voucher rename from Identifier --> Type tests passing cleanup for PR remove unused fmt import in graphsync_test a better reflection send data transfer response other tests passing feat(datatransfer): milestone 2 infrastructure Setup test path for all tickets for milestone 2 responses alert subscribers when request is not accepted (#607) Graphsync response is scheduled when a valid push request is received (#625) fix(datatransfer): fix tests fix an error with read buffers in tests fix(deps): fix go.sum Feat/dt graphsync pullreqs (#627) * graphsync responses to pull requests Feat/dt initiator cleanup (#645) * ChannelID.To --> ChannelID.Initiator * We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs. * InProgressChannels returns all of impl.channels, currently just for testing * Implements go-data-transfer issue * Some assertions were changed based on the above. * Renamed some variables and added some assertions based on the new understanding * Updated SHA for graphsync module * Updated fakeGraphSync test structs to use new interfaces from new SHA above Techdebt/dt split graphsync impl receiver (#651) * Split up graphsyncImpl and graphsyncReceiver * rename graphsync to utils DTM sends data over graphsync for validated push requests (#665) * create channels when a request is received. register push request hook with graphsync. fix tests. * better NewReaders * use mutex lock around impl.channels access * fix(datatransfer): fix test uncertainty * fix a data race and also don't use random bytes in basic block which can fail * privatize 3 funcs with @hannahhoward Feat/dt gs pullrequests (#693) * Implements DTM Sends Data Over Graphsync For Validated Pull Requests * rename a field in a test struct * refactor a couple of private functions (one was refactored out of existence) Feat/dt subscribe, file Xfer round trip (#720) Implements the rest of Subscriber Is Notified When Request Completed #24: * send a graphsync message within a go func and consume responses until error or transfer is complete. * notify subscribers of results. * Rename datatransfer.Event to EventCode. * datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses * Add extension data to graphsync request hook, gsReq * rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync * use a mutex lock for last transfer ID * obey the linter Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) * update to correct graphsync version, update tests & code to call the new graphsync hooks * getExtensionData returns an empty struct + nil if we can't find our extension * Don't respond with error when we can't find the extension. * Test for same * mod tidy minor fix to go.sum feat(datatransfer): switch to graphsync implementation Move over to real graphsync implementation of data transfer, add constructors for graphsync instances on client and miner side fix(datatransfer): Fix validators Validators were checking payload cid against commP -- which are not the same any more. Added a payloadCid to client deal to maintain the record, fixed validator logic Feat/dt extraction use go-fil-components/datatransfer (#770) * Initial commit after changing to go-fil-components/datatransfer * blow away the datatransfer dir * use go-fil-components master after its PR #1 was merged * go mod tidy use a package updates after rebase with master
2019-10-30 02:42:16 +00:00
func NewProviderDAGServiceDataTransfer(h host.Host, gs dtypes.StagingGraphsync) dtypes.ProviderDataTransfer {
return dtgraphsync.NewGraphSyncDataTransfer(h, gs)
}
// NewProviderDealStore creates a statestore for the client to store its deals
func NewProviderDealStore(ds dtypes.MetadataDS) dtypes.ProviderDealStore {
return statestore.New(namespace.Wrap(ds, datastore.NewKey("/deals/client")))
}
// NewProviderPieceStore creates a statestore for storing metadata about pieces
// shared by the storage and retrieval providers
func NewProviderPieceStore(ds dtypes.MetadataDS) dtypes.ProviderPieceStore {
return piecestore.NewPieceStore(ds)
}
feat(datatransfer): implement and extract feat(datatransfer): setup implementation path Sets up a path to implementation, offering both the dagservice implementation and a future graphsync implement, establishes message interfaces and network layer, and isolates the datatransfer module from the app WIP using CBOR encoding for dataxfermsg * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff all message tests passing, some others in datatransfer Some cleanup for PR Cleanup for PR, clarifying and additional comments mod tidy Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters Fix tests Initiate push and pull requests (#536) * add issue link for data TransferID generation * comment out failing but not relevant tests * finish voucher rename from Identifier --> Type tests passing cleanup for PR remove unused fmt import in graphsync_test a better reflection send data transfer response other tests passing feat(datatransfer): milestone 2 infrastructure Setup test path for all tickets for milestone 2 responses alert subscribers when request is not accepted (#607) Graphsync response is scheduled when a valid push request is received (#625) fix(datatransfer): fix tests fix an error with read buffers in tests fix(deps): fix go.sum Feat/dt graphsync pullreqs (#627) * graphsync responses to pull requests Feat/dt initiator cleanup (#645) * ChannelID.To --> ChannelID.Initiator * We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs. * InProgressChannels returns all of impl.channels, currently just for testing * Implements go-data-transfer issue * Some assertions were changed based on the above. * Renamed some variables and added some assertions based on the new understanding * Updated SHA for graphsync module * Updated fakeGraphSync test structs to use new interfaces from new SHA above Techdebt/dt split graphsync impl receiver (#651) * Split up graphsyncImpl and graphsyncReceiver * rename graphsync to utils DTM sends data over graphsync for validated push requests (#665) * create channels when a request is received. register push request hook with graphsync. fix tests. * better NewReaders * use mutex lock around impl.channels access * fix(datatransfer): fix test uncertainty * fix a data race and also don't use random bytes in basic block which can fail * privatize 3 funcs with @hannahhoward Feat/dt gs pullrequests (#693) * Implements DTM Sends Data Over Graphsync For Validated Pull Requests * rename a field in a test struct * refactor a couple of private functions (one was refactored out of existence) Feat/dt subscribe, file Xfer round trip (#720) Implements the rest of Subscriber Is Notified When Request Completed #24: * send a graphsync message within a go func and consume responses until error or transfer is complete. * notify subscribers of results. * Rename datatransfer.Event to EventCode. * datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses * Add extension data to graphsync request hook, gsReq * rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync * use a mutex lock for last transfer ID * obey the linter Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) * update to correct graphsync version, update tests & code to call the new graphsync hooks * getExtensionData returns an empty struct + nil if we can't find our extension * Don't respond with error when we can't find the extension. * Test for same * mod tidy minor fix to go.sum feat(datatransfer): switch to graphsync implementation Move over to real graphsync implementation of data transfer, add constructors for graphsync instances on client and miner side fix(datatransfer): Fix validators Validators were checking payload cid against commP -- which are not the same any more. Added a payloadCid to client deal to maintain the record, fixed validator logic Feat/dt extraction use go-fil-components/datatransfer (#770) * Initial commit after changing to go-fil-components/datatransfer * blow away the datatransfer dir * use go-fil-components master after its PR #1 was merged * go mod tidy use a package updates after rebase with master
2019-10-30 02:42:16 +00:00
// StagingBlockstore creates a blockstore for staging blocks for a miner
// in a storage deal, prior to sealing
func StagingBlockstore(r repo.LockedRepo) (dtypes.StagingBlockstore, error) {
2019-08-06 22:04:21 +00:00
stagingds, err := r.Datastore("/staging")
if err != nil {
return nil, err
}
bs := blockstore.NewBlockstore(stagingds)
ibs := blockstore.NewIdStore(bs)
feat(datatransfer): implement and extract feat(datatransfer): setup implementation path Sets up a path to implementation, offering both the dagservice implementation and a future graphsync implement, establishes message interfaces and network layer, and isolates the datatransfer module from the app WIP using CBOR encoding for dataxfermsg * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff all message tests passing, some others in datatransfer Some cleanup for PR Cleanup for PR, clarifying and additional comments mod tidy Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters Fix tests Initiate push and pull requests (#536) * add issue link for data TransferID generation * comment out failing but not relevant tests * finish voucher rename from Identifier --> Type tests passing cleanup for PR remove unused fmt import in graphsync_test a better reflection send data transfer response other tests passing feat(datatransfer): milestone 2 infrastructure Setup test path for all tickets for milestone 2 responses alert subscribers when request is not accepted (#607) Graphsync response is scheduled when a valid push request is received (#625) fix(datatransfer): fix tests fix an error with read buffers in tests fix(deps): fix go.sum Feat/dt graphsync pullreqs (#627) * graphsync responses to pull requests Feat/dt initiator cleanup (#645) * ChannelID.To --> ChannelID.Initiator * We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs. * InProgressChannels returns all of impl.channels, currently just for testing * Implements go-data-transfer issue * Some assertions were changed based on the above. * Renamed some variables and added some assertions based on the new understanding * Updated SHA for graphsync module * Updated fakeGraphSync test structs to use new interfaces from new SHA above Techdebt/dt split graphsync impl receiver (#651) * Split up graphsyncImpl and graphsyncReceiver * rename graphsync to utils DTM sends data over graphsync for validated push requests (#665) * create channels when a request is received. register push request hook with graphsync. fix tests. * better NewReaders * use mutex lock around impl.channels access * fix(datatransfer): fix test uncertainty * fix a data race and also don't use random bytes in basic block which can fail * privatize 3 funcs with @hannahhoward Feat/dt gs pullrequests (#693) * Implements DTM Sends Data Over Graphsync For Validated Pull Requests * rename a field in a test struct * refactor a couple of private functions (one was refactored out of existence) Feat/dt subscribe, file Xfer round trip (#720) Implements the rest of Subscriber Is Notified When Request Completed #24: * send a graphsync message within a go func and consume responses until error or transfer is complete. * notify subscribers of results. * Rename datatransfer.Event to EventCode. * datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses * Add extension data to graphsync request hook, gsReq * rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync * use a mutex lock for last transfer ID * obey the linter Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) * update to correct graphsync version, update tests & code to call the new graphsync hooks * getExtensionData returns an empty struct + nil if we can't find our extension * Don't respond with error when we can't find the extension. * Test for same * mod tidy minor fix to go.sum feat(datatransfer): switch to graphsync implementation Move over to real graphsync implementation of data transfer, add constructors for graphsync instances on client and miner side fix(datatransfer): Fix validators Validators were checking payload cid against commP -- which are not the same any more. Added a payloadCid to client deal to maintain the record, fixed validator logic Feat/dt extraction use go-fil-components/datatransfer (#770) * Initial commit after changing to go-fil-components/datatransfer * blow away the datatransfer dir * use go-fil-components master after its PR #1 was merged * go mod tidy use a package updates after rebase with master
2019-10-30 02:42:16 +00:00
return ibs, nil
}
// StagingDAG is a DAGService for the StagingBlockstore
func StagingDAG(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, rt routing.Routing, h host.Host) (dtypes.StagingDAG, error) {
2019-08-06 22:04:21 +00:00
bitswapNetwork := network.NewFromIpfsHost(h, rt)
feat(datatransfer): implement and extract feat(datatransfer): setup implementation path Sets up a path to implementation, offering both the dagservice implementation and a future graphsync implement, establishes message interfaces and network layer, and isolates the datatransfer module from the app WIP using CBOR encoding for dataxfermsg * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff all message tests passing, some others in datatransfer Some cleanup for PR Cleanup for PR, clarifying and additional comments mod tidy Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters Fix tests Initiate push and pull requests (#536) * add issue link for data TransferID generation * comment out failing but not relevant tests * finish voucher rename from Identifier --> Type tests passing cleanup for PR remove unused fmt import in graphsync_test a better reflection send data transfer response other tests passing feat(datatransfer): milestone 2 infrastructure Setup test path for all tickets for milestone 2 responses alert subscribers when request is not accepted (#607) Graphsync response is scheduled when a valid push request is received (#625) fix(datatransfer): fix tests fix an error with read buffers in tests fix(deps): fix go.sum Feat/dt graphsync pullreqs (#627) * graphsync responses to pull requests Feat/dt initiator cleanup (#645) * ChannelID.To --> ChannelID.Initiator * We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs. * InProgressChannels returns all of impl.channels, currently just for testing * Implements go-data-transfer issue * Some assertions were changed based on the above. * Renamed some variables and added some assertions based on the new understanding * Updated SHA for graphsync module * Updated fakeGraphSync test structs to use new interfaces from new SHA above Techdebt/dt split graphsync impl receiver (#651) * Split up graphsyncImpl and graphsyncReceiver * rename graphsync to utils DTM sends data over graphsync for validated push requests (#665) * create channels when a request is received. register push request hook with graphsync. fix tests. * better NewReaders * use mutex lock around impl.channels access * fix(datatransfer): fix test uncertainty * fix a data race and also don't use random bytes in basic block which can fail * privatize 3 funcs with @hannahhoward Feat/dt gs pullrequests (#693) * Implements DTM Sends Data Over Graphsync For Validated Pull Requests * rename a field in a test struct * refactor a couple of private functions (one was refactored out of existence) Feat/dt subscribe, file Xfer round trip (#720) Implements the rest of Subscriber Is Notified When Request Completed #24: * send a graphsync message within a go func and consume responses until error or transfer is complete. * notify subscribers of results. * Rename datatransfer.Event to EventCode. * datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses * Add extension data to graphsync request hook, gsReq * rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync * use a mutex lock for last transfer ID * obey the linter Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) * update to correct graphsync version, update tests & code to call the new graphsync hooks * getExtensionData returns an empty struct + nil if we can't find our extension * Don't respond with error when we can't find the extension. * Test for same * mod tidy minor fix to go.sum feat(datatransfer): switch to graphsync implementation Move over to real graphsync implementation of data transfer, add constructors for graphsync instances on client and miner side fix(datatransfer): Fix validators Validators were checking payload cid against commP -- which are not the same any more. Added a payloadCid to client deal to maintain the record, fixed validator logic Feat/dt extraction use go-fil-components/datatransfer (#770) * Initial commit after changing to go-fil-components/datatransfer * blow away the datatransfer dir * use go-fil-components master after its PR #1 was merged * go mod tidy use a package updates after rebase with master
2019-10-30 02:42:16 +00:00
exch := bitswap.New(helpers.LifecycleCtx(mctx, lc), bitswapNetwork, ibs)
2019-08-06 22:04:21 +00:00
bsvc := blockservice.New(ibs, exch)
dag := merkledag.NewDAGService(bsvc)
lc.Append(fx.Hook{
OnStop: func(_ context.Context) error {
return bsvc.Close()
},
})
return dag, nil
}
feat(datatransfer): implement and extract feat(datatransfer): setup implementation path Sets up a path to implementation, offering both the dagservice implementation and a future graphsync implement, establishes message interfaces and network layer, and isolates the datatransfer module from the app WIP using CBOR encoding for dataxfermsg * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff all message tests passing, some others in datatransfer Some cleanup for PR Cleanup for PR, clarifying and additional comments mod tidy Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters Fix tests Initiate push and pull requests (#536) * add issue link for data TransferID generation * comment out failing but not relevant tests * finish voucher rename from Identifier --> Type tests passing cleanup for PR remove unused fmt import in graphsync_test a better reflection send data transfer response other tests passing feat(datatransfer): milestone 2 infrastructure Setup test path for all tickets for milestone 2 responses alert subscribers when request is not accepted (#607) Graphsync response is scheduled when a valid push request is received (#625) fix(datatransfer): fix tests fix an error with read buffers in tests fix(deps): fix go.sum Feat/dt graphsync pullreqs (#627) * graphsync responses to pull requests Feat/dt initiator cleanup (#645) * ChannelID.To --> ChannelID.Initiator * We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs. * InProgressChannels returns all of impl.channels, currently just for testing * Implements go-data-transfer issue * Some assertions were changed based on the above. * Renamed some variables and added some assertions based on the new understanding * Updated SHA for graphsync module * Updated fakeGraphSync test structs to use new interfaces from new SHA above Techdebt/dt split graphsync impl receiver (#651) * Split up graphsyncImpl and graphsyncReceiver * rename graphsync to utils DTM sends data over graphsync for validated push requests (#665) * create channels when a request is received. register push request hook with graphsync. fix tests. * better NewReaders * use mutex lock around impl.channels access * fix(datatransfer): fix test uncertainty * fix a data race and also don't use random bytes in basic block which can fail * privatize 3 funcs with @hannahhoward Feat/dt gs pullrequests (#693) * Implements DTM Sends Data Over Graphsync For Validated Pull Requests * rename a field in a test struct * refactor a couple of private functions (one was refactored out of existence) Feat/dt subscribe, file Xfer round trip (#720) Implements the rest of Subscriber Is Notified When Request Completed #24: * send a graphsync message within a go func and consume responses until error or transfer is complete. * notify subscribers of results. * Rename datatransfer.Event to EventCode. * datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses * Add extension data to graphsync request hook, gsReq * rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync * use a mutex lock for last transfer ID * obey the linter Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) * update to correct graphsync version, update tests & code to call the new graphsync hooks * getExtensionData returns an empty struct + nil if we can't find our extension * Don't respond with error when we can't find the extension. * Test for same * mod tidy minor fix to go.sum feat(datatransfer): switch to graphsync implementation Move over to real graphsync implementation of data transfer, add constructors for graphsync instances on client and miner side fix(datatransfer): Fix validators Validators were checking payload cid against commP -- which are not the same any more. Added a payloadCid to client deal to maintain the record, fixed validator logic Feat/dt extraction use go-fil-components/datatransfer (#770) * Initial commit after changing to go-fil-components/datatransfer * blow away the datatransfer dir * use go-fil-components master after its PR #1 was merged * go mod tidy use a package updates after rebase with master
2019-10-30 02:42:16 +00:00
// StagingGraphsync creates a graphsync instance which reads and writes blocks
// to the StagingBlockstore
func StagingGraphsync(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync {
graphsyncNetwork := gsnet.NewFromLibp2pHost(h)
ipldBridge := ipldbridge.NewIPLDBridge()
loader := storeutil.LoaderForBlockstore(ibs)
storer := storeutil.StorerForBlockstore(ibs)
gs := graphsync.New(helpers.LifecycleCtx(mctx, lc), graphsyncNetwork, ipldBridge, loader, storer)
return gs
}
2019-11-25 04:45:13 +00:00
func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api api.FullNode, epp gen.ElectionPoStProver) (*miner.Miner, error) {
minerAddr, err := minerAddrFromDS(ds)
if err != nil {
2019-11-25 04:45:13 +00:00
return nil, err
}
2019-11-25 04:45:13 +00:00
m := miner.NewMiner(api, epp)
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
2019-11-25 04:45:13 +00:00
if err := m.Register(minerAddr); err != nil {
return err
}
return nil
},
2019-09-17 14:23:08 +00:00
OnStop: func(ctx context.Context) error {
2019-11-25 04:45:13 +00:00
return m.Unregister(ctx, minerAddr)
2019-09-17 14:23:08 +00:00
},
})
2019-11-25 04:45:13 +00:00
return m, nil
}
2019-10-27 08:56:53 +00:00
func SectorBuilder(cfg *sectorbuilder.Config, ds dtypes.MetadataDS) (*sectorbuilder.SectorBuilder, error) {
sb, err := sectorbuilder.New(cfg, namespace.Wrap(ds, datastore.NewKey("/sectorbuilder")))
if err != nil {
return nil, err
}
return sb, nil
}
func SealTicketGen(api api.FullNode) sealing.TicketFn {
2019-10-27 08:56:53 +00:00
return func(ctx context.Context) (*sectorbuilder.SealTicket, error) {
ts, err := api.ChainHead(ctx)
if err != nil {
return nil, xerrors.Errorf("getting head ts for SealTicket failed: %w", err)
}
r, err := api.ChainGetRandomness(ctx, ts.Key(), int64(ts.Height())-build.SealRandomnessLookback)
2019-10-27 08:56:53 +00:00
if err != nil {
return nil, xerrors.Errorf("getting randomness for SealTicket failed: %w", err)
}
var tkt [sectorbuilder.CommLen]byte
if n := copy(tkt[:], r); n != sectorbuilder.CommLen {
return nil, xerrors.Errorf("unexpected randomness len: %d (expected %d)", n, sectorbuilder.CommLen)
}
return &sectorbuilder.SealTicket{
BlockHeight: uint64(ts.Height()),
2019-10-27 08:56:53 +00:00
TicketBytes: tkt,
}, nil
}
}
func NewProviderRequestValidator(deals dtypes.ProviderDealStore) *storageimpl.ProviderRequestValidator {
return storageimpl.NewProviderRequestValidator(deals)
}
func StorageProvider(h host.Host, ds dtypes.MetadataDS, ibs dtypes.StagingBlockstore, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, dataTransfer dtypes.ProviderDataTransfer, spn storagemarket.StorageProviderNode) (storagemarket.StorageProvider, error) {
store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(r.Path()))
if err != nil {
return nil, err
}
net := smnet.NewFromLibp2pHost(h)
addr, err := ds.Get(datastore.NewKey("miner-address"))
if err != nil {
return nil, err
}
minerAddress, err := address.NewFromBytes(addr)
if err != nil {
return nil, err
}
return storageimpl.NewProvider(net, ds, ibs, store, pieceStore, dataTransfer, spn, minerAddress)
}
// RetrievalProvider creates a new retrieval provider attached to the provider blockstore
func RetrievalProvider(h host.Host, miner *storage.Miner, sb sectorbuilder.Interface, full api.FullNode, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, ibs dtypes.StagingBlockstore) (retrievalmarket.RetrievalProvider, error) {
adapter := retrievaladapter.NewRetrievalProviderNode(miner, sb, full)
address, err := minerAddrFromDS(ds)
if err != nil {
return nil, err
}
network := rmnet.NewFromLibp2pHost(h)
return retrievalimpl.NewProvider(address, adapter, network, pieceStore, ibs), nil
}