2019-08-01 14:19:53 +00:00
|
|
|
package modules
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-03-27 20:08:06 +00:00
|
|
|
"net/http"
|
2019-11-11 20:25:19 +00:00
|
|
|
"reflect"
|
2019-08-08 04:24:49 +00:00
|
|
|
|
2020-03-18 19:43:06 +00:00
|
|
|
"github.com/ipfs/go-bitswap"
|
|
|
|
"github.com/ipfs/go-bitswap/network"
|
|
|
|
"github.com/ipfs/go-blockservice"
|
|
|
|
"github.com/ipfs/go-datastore"
|
|
|
|
"github.com/ipfs/go-datastore/namespace"
|
|
|
|
graphsync "github.com/ipfs/go-graphsync/impl"
|
|
|
|
gsnet "github.com/ipfs/go-graphsync/network"
|
|
|
|
"github.com/ipfs/go-graphsync/storeutil"
|
|
|
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
|
|
|
"github.com/ipfs/go-merkledag"
|
|
|
|
"github.com/libp2p/go-libp2p-core/host"
|
|
|
|
"github.com/libp2p/go-libp2p-core/routing"
|
|
|
|
"go.uber.org/fx"
|
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
2020-01-15 20:49:11 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
|
|
|
dtgraphsync "github.com/filecoin-project/go-data-transfer/impl/graphsync"
|
2020-01-24 20:19:52 +00:00
|
|
|
piecefilestore "github.com/filecoin-project/go-fil-markets/filestore"
|
|
|
|
"github.com/filecoin-project/go-fil-markets/piecestore"
|
2020-01-15 20:49:11 +00:00
|
|
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
|
|
|
retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl"
|
2020-01-24 20:19:52 +00:00
|
|
|
rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network"
|
2020-01-15 20:49:11 +00:00
|
|
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
|
|
|
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
|
2020-03-18 18:57:22 +00:00
|
|
|
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation"
|
2020-02-06 02:43:37 +00:00
|
|
|
smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network"
|
2020-03-05 00:38:07 +00:00
|
|
|
"github.com/filecoin-project/go-fil-markets/storedcounter"
|
2020-01-02 19:08:49 +00:00
|
|
|
paramfetch "github.com/filecoin-project/go-paramfetch"
|
2020-01-15 20:49:11 +00:00
|
|
|
"github.com/filecoin-project/go-statestore"
|
2020-04-06 18:07:26 +00:00
|
|
|
sectorstorage "github.com/filecoin-project/sector-storage"
|
|
|
|
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
|
|
|
"github.com/filecoin-project/sector-storage/stores"
|
2020-02-27 21:45:31 +00:00
|
|
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
2020-02-23 20:00:47 +00:00
|
|
|
"github.com/filecoin-project/specs-actors/actors/crypto"
|
2019-08-01 14:19:53 +00:00
|
|
|
|
2020-03-08 08:07:58 +00:00
|
|
|
lapi "github.com/filecoin-project/lotus/api"
|
2019-10-27 08:56:53 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2020-03-25 23:16:17 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/beacon"
|
2019-11-25 04:45:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/gen"
|
2020-03-08 08:07:58 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2020-01-15 20:49:11 +00:00
|
|
|
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
2019-11-25 04:45:13 +00:00
|
|
|
"github.com/filecoin-project/lotus/miner"
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
|
|
|
"github.com/filecoin-project/lotus/node/modules/helpers"
|
|
|
|
"github.com/filecoin-project/lotus/node/repo"
|
|
|
|
"github.com/filecoin-project/lotus/storage"
|
2020-04-07 00:41:33 +00:00
|
|
|
sealing "github.com/filecoin-project/storage-fsm"
|
2019-08-01 14:19:53 +00:00
|
|
|
)
|
|
|
|
|
2019-08-08 01:16:58 +00:00
|
|
|
func minerAddrFromDS(ds dtypes.MetadataDS) (address.Address, error) {
|
|
|
|
maddrb, err := ds.Get(datastore.NewKey("miner-address"))
|
|
|
|
if err != nil {
|
|
|
|
return address.Undef, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return address.NewFromBytes(maddrb)
|
|
|
|
}
|
|
|
|
|
2020-03-26 02:50:56 +00:00
|
|
|
func GetParams(sbc *ffiwrapper.Config) error {
|
2020-02-27 21:45:31 +00:00
|
|
|
ssize, err := sbc.SealProofType.SectorSize()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := paramfetch.GetParams(build.ParametersJson(), uint64(ssize)); err != nil {
|
2019-12-04 19:44:15 +00:00
|
|
|
return xerrors.Errorf("fetching proof parameters: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-18 01:08:11 +00:00
|
|
|
func MinerAddress(ds dtypes.MetadataDS) (dtypes.MinerAddress, error) {
|
|
|
|
ma, err := minerAddrFromDS(ds)
|
|
|
|
return dtypes.MinerAddress(ma), err
|
|
|
|
}
|
|
|
|
|
|
|
|
func MinerID(ma dtypes.MinerAddress) (dtypes.MinerID, error) {
|
|
|
|
id, err := address.IDFromAddress(address.Address(ma))
|
|
|
|
return dtypes.MinerID(id), err
|
|
|
|
}
|
2019-12-06 00:27:32 +00:00
|
|
|
|
2020-03-31 23:13:37 +00:00
|
|
|
func StorageNetworkName(ctx helpers.MetricsCtx, a lapi.FullNode) (dtypes.NetworkName, error) {
|
|
|
|
return a.StateNetworkName(ctx)
|
|
|
|
}
|
|
|
|
|
2020-03-26 19:34:38 +00:00
|
|
|
func ProofsConfig(maddr dtypes.MinerAddress, fnapi lapi.FullNode) (*ffiwrapper.Config, error) {
|
2020-03-18 01:08:11 +00:00
|
|
|
ssize, err := fnapi.StateMinerSectorSize(context.TODO(), address.Address(maddr), types.EmptyTSK)
|
2020-03-03 22:19:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-04 16:47:08 +00:00
|
|
|
|
2020-04-10 21:29:05 +00:00
|
|
|
spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
2020-03-03 22:19:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("bad sector size: %w", err)
|
|
|
|
}
|
2019-08-01 14:19:53 +00:00
|
|
|
|
2020-03-26 02:50:56 +00:00
|
|
|
sb := &ffiwrapper.Config{
|
2020-03-03 22:19:22 +00:00
|
|
|
SealProofType: spt,
|
2019-08-01 14:19:53 +00:00
|
|
|
}
|
2020-03-03 22:19:22 +00:00
|
|
|
|
|
|
|
return sb, nil
|
2019-08-01 14:19:53 +00:00
|
|
|
}
|
|
|
|
|
2020-03-05 00:38:07 +00:00
|
|
|
type sidsc struct {
|
|
|
|
sc *storedcounter.StoredCounter
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *sidsc) Next() (abi.SectorNumber, error) {
|
|
|
|
i, err := s.sc.Next()
|
|
|
|
return abi.SectorNumber(i), err
|
|
|
|
}
|
|
|
|
|
2020-03-17 20:19:52 +00:00
|
|
|
func SectorIDCounter(ds dtypes.MetadataDS) sealing.SectorIDCounter {
|
2020-03-05 00:38:07 +00:00
|
|
|
sc := storedcounter.New(ds, datastore.NewKey("/storage/nextid"))
|
|
|
|
return &sidsc{sc}
|
|
|
|
}
|
|
|
|
|
2020-04-04 02:55:19 +00:00
|
|
|
func StorageMiner(mctx helpers.MetricsCtx, lc fx.Lifecycle, api lapi.FullNode, h host.Host, ds dtypes.MetadataDS, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, tktFn sealing.TicketFn) (*storage.Miner, error) {
|
2019-08-08 01:16:58 +00:00
|
|
|
maddr, err := minerAddrFromDS(ds)
|
2019-08-01 14:19:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-02-03 23:46:24 +00:00
|
|
|
ctx := helpers.LifecycleCtx(mctx, lc)
|
|
|
|
|
2020-02-11 23:29:45 +00:00
|
|
|
worker, err := api.StateMinerWorker(ctx, maddr, types.EmptyTSK)
|
2019-08-01 14:19:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-04-13 21:25:38 +00:00
|
|
|
fps, err := storage.NewWindowedPoStScheduler(api, sealer, maddr, worker)
|
2020-04-10 21:29:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-04-04 02:55:19 +00:00
|
|
|
sm, err := storage.NewMiner(api, maddr, worker, h, ds, sealer, sc, verif, tktFn)
|
2020-02-03 20:09:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-08-01 14:19:53 +00:00
|
|
|
|
|
|
|
lc.Append(fx.Hook{
|
|
|
|
OnStart: func(context.Context) error {
|
2020-02-03 20:09:21 +00:00
|
|
|
go fps.Run(ctx)
|
2019-08-01 14:19:53 +00:00
|
|
|
return sm.Run(ctx)
|
|
|
|
},
|
2019-11-01 13:58:48 +00:00
|
|
|
OnStop: sm.Stop,
|
2019-08-01 14:19:53 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return sm, nil
|
|
|
|
}
|
2019-08-02 16:25:10 +00:00
|
|
|
|
2019-12-10 04:19:59 +00:00
|
|
|
func HandleRetrieval(host host.Host, lc fx.Lifecycle, m retrievalmarket.RetrievalProvider) {
|
2019-08-26 13:45:36 +00:00
|
|
|
lc.Append(fx.Hook{
|
|
|
|
OnStart: func(context.Context) error {
|
2020-01-24 20:19:52 +00:00
|
|
|
m.Start()
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnStop: func(context.Context) error {
|
|
|
|
m.Stop()
|
2019-08-26 13:45:36 +00:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-11-04 19:57:54 +00:00
|
|
|
func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h storagemarket.StorageProvider) {
|
2019-08-06 23:08:34 +00:00
|
|
|
ctx := helpers.LifecycleCtx(mctx, lc)
|
|
|
|
|
|
|
|
lc.Append(fx.Hook{
|
|
|
|
OnStart: func(context.Context) error {
|
2020-02-06 02:43:37 +00:00
|
|
|
h.Start(ctx)
|
2019-08-06 23:08:34 +00:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnStop: func(context.Context) error {
|
|
|
|
h.Stop()
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
})
|
2019-08-02 16:25:10 +00:00
|
|
|
}
|
2019-08-06 22:04:21 +00:00
|
|
|
|
2019-11-11 20:25:19 +00:00
|
|
|
// RegisterProviderValidator is an initialization hook that registers the provider
|
|
|
|
// request validator with the data transfer module as the validator for
|
|
|
|
// StorageDataTransferVoucher types
|
2020-03-18 18:57:22 +00:00
|
|
|
func RegisterProviderValidator(mrv *requestvalidation.ProviderRequestValidator, dtm dtypes.ProviderDataTransfer) {
|
|
|
|
if err := dtm.RegisterVoucherType(reflect.TypeOf(&requestvalidation.StorageDataTransferVoucher{}), mrv); err != nil {
|
2019-12-05 05:02:14 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
2019-11-11 20:25:19 +00:00
|
|
|
}
|
|
|
|
|
2019-11-11 20:51:28 +00:00
|
|
|
// NewProviderDAGServiceDataTransfer returns a data transfer manager that just
|
|
|
|
// uses the provider's Staging DAG service for transfers
|
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
|
|
|
func NewProviderDAGServiceDataTransfer(h host.Host, gs dtypes.StagingGraphsync) dtypes.ProviderDataTransfer {
|
|
|
|
return dtgraphsync.NewGraphSyncDataTransfer(h, gs)
|
2019-11-11 20:51:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewProviderDealStore creates a statestore for the client to store its deals
|
|
|
|
func NewProviderDealStore(ds dtypes.MetadataDS) dtypes.ProviderDealStore {
|
|
|
|
return statestore.New(namespace.Wrap(ds, datastore.NewKey("/deals/client")))
|
|
|
|
}
|
|
|
|
|
2020-01-24 20:19:52 +00:00
|
|
|
// NewProviderPieceStore creates a statestore for storing metadata about pieces
|
|
|
|
// shared by the storage and retrieval providers
|
|
|
|
func NewProviderPieceStore(ds dtypes.MetadataDS) dtypes.ProviderPieceStore {
|
|
|
|
return piecestore.NewPieceStore(ds)
|
|
|
|
}
|
|
|
|
|
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
|
|
|
// StagingBlockstore creates a blockstore for staging blocks for a miner
|
|
|
|
// in a storage deal, prior to sealing
|
|
|
|
func StagingBlockstore(r repo.LockedRepo) (dtypes.StagingBlockstore, error) {
|
2019-08-06 22:04:21 +00:00
|
|
|
stagingds, err := r.Datastore("/staging")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
bs := blockstore.NewBlockstore(stagingds)
|
|
|
|
ibs := blockstore.NewIdStore(bs)
|
|
|
|
|
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
|
|
|
return ibs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// StagingDAG is a DAGService for the StagingBlockstore
|
|
|
|
func StagingDAG(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, rt routing.Routing, h host.Host) (dtypes.StagingDAG, error) {
|
|
|
|
|
2019-08-06 22:04:21 +00:00
|
|
|
bitswapNetwork := network.NewFromIpfsHost(h, rt)
|
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
|
|
|
exch := bitswap.New(helpers.LifecycleCtx(mctx, lc), bitswapNetwork, ibs)
|
2019-08-06 22:04:21 +00:00
|
|
|
|
|
|
|
bsvc := blockservice.New(ibs, exch)
|
|
|
|
dag := merkledag.NewDAGService(bsvc)
|
|
|
|
|
|
|
|
lc.Append(fx.Hook{
|
|
|
|
OnStop: func(_ context.Context) error {
|
|
|
|
return bsvc.Close()
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
return dag, nil
|
|
|
|
}
|
2019-08-20 17:19:24 +00:00
|
|
|
|
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
|
|
|
// StagingGraphsync creates a graphsync instance which reads and writes blocks
|
|
|
|
// to the StagingBlockstore
|
|
|
|
func StagingGraphsync(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync {
|
|
|
|
graphsyncNetwork := gsnet.NewFromLibp2pHost(h)
|
|
|
|
loader := storeutil.LoaderForBlockstore(ibs)
|
|
|
|
storer := storeutil.StorerForBlockstore(ibs)
|
2020-04-07 02:17:02 +00:00
|
|
|
gs := graphsync.New(helpers.LifecycleCtx(mctx, lc), graphsyncNetwork, loader, storer, graphsync.RejectAllRequestsByDefault())
|
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
|
|
|
|
|
|
|
return gs
|
|
|
|
}
|
|
|
|
|
2020-04-10 21:07:18 +00:00
|
|
|
func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api lapi.FullNode, epp gen.WinningPoStProver, beacon beacon.RandomBeacon) (*miner.Miner, error) {
|
2019-08-20 17:19:24 +00:00
|
|
|
minerAddr, err := minerAddrFromDS(ds)
|
|
|
|
if err != nil {
|
2019-11-25 04:45:13 +00:00
|
|
|
return nil, err
|
2019-08-20 17:19:24 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 23:16:17 +00:00
|
|
|
m := miner.NewMiner(api, epp, beacon)
|
2019-11-25 04:45:13 +00:00
|
|
|
|
2019-08-20 17:19:24 +00:00
|
|
|
lc.Append(fx.Hook{
|
|
|
|
OnStart: func(ctx context.Context) error {
|
2019-11-25 04:45:13 +00:00
|
|
|
if err := m.Register(minerAddr); err != nil {
|
|
|
|
return err
|
2019-10-31 22:04:13 +00:00
|
|
|
}
|
|
|
|
return nil
|
2019-08-20 17:19:24 +00:00
|
|
|
},
|
2019-09-17 14:23:08 +00:00
|
|
|
OnStop: func(ctx context.Context) error {
|
2019-11-25 04:45:13 +00:00
|
|
|
return m.Unregister(ctx, minerAddr)
|
2019-09-17 14:23:08 +00:00
|
|
|
},
|
2019-08-20 17:19:24 +00:00
|
|
|
})
|
2019-11-25 04:45:13 +00:00
|
|
|
|
|
|
|
return m, nil
|
2019-08-20 17:19:24 +00:00
|
|
|
}
|
2019-10-27 08:56:53 +00:00
|
|
|
|
2020-03-08 08:07:58 +00:00
|
|
|
func SealTicketGen(fapi lapi.FullNode) sealing.TicketFn {
|
2020-04-07 22:34:30 +00:00
|
|
|
return func(ctx context.Context, tok sealing.TipSetToken) (abi.SealRandomness, abi.ChainEpoch, error) {
|
|
|
|
tsk, err := types.TipSetKeyFromBytes(tok)
|
2019-10-27 08:56:53 +00:00
|
|
|
if err != nil {
|
2020-04-07 22:34:30 +00:00
|
|
|
return nil, 0, xerrors.Errorf("could not unmarshal TipSetToken to TipSetKey: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ts, err := fapi.ChainGetTipSet(ctx, tsk)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, xerrors.Errorf("getting TipSet for key failed: %w", err)
|
2019-10-27 08:56:53 +00:00
|
|
|
}
|
|
|
|
|
2020-02-27 21:45:31 +00:00
|
|
|
r, err := fapi.ChainGetRandomness(ctx, ts.Key(), crypto.DomainSeparationTag_SealRandomness, ts.Height()-build.SealRandomnessLookback, nil)
|
2019-10-27 08:56:53 +00:00
|
|
|
if err != nil {
|
2020-04-06 18:07:26 +00:00
|
|
|
return nil, 0, xerrors.Errorf("getting randomness for SealTicket failed: %w", err)
|
2019-10-27 08:56:53 +00:00
|
|
|
}
|
|
|
|
|
2020-04-06 18:07:26 +00:00
|
|
|
return abi.SealRandomness(r), ts.Height() - build.SealRandomnessLookback, nil
|
2019-10-27 08:56:53 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-10 04:19:59 +00:00
|
|
|
|
2020-03-18 18:57:22 +00:00
|
|
|
func NewProviderRequestValidator(deals dtypes.ProviderDealStore) *requestvalidation.ProviderRequestValidator {
|
|
|
|
return requestvalidation.NewProviderRequestValidator(deals)
|
2019-12-17 10:46:39 +00:00
|
|
|
}
|
|
|
|
|
2020-03-08 08:07:58 +00:00
|
|
|
func StorageProvider(ctx helpers.MetricsCtx, fapi lapi.FullNode, h host.Host, ds dtypes.MetadataDS, ibs dtypes.StagingBlockstore, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, dataTransfer dtypes.ProviderDataTransfer, spn storagemarket.StorageProviderNode) (storagemarket.StorageProvider, error) {
|
2020-01-24 20:19:52 +00:00
|
|
|
store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(r.Path()))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-06 02:43:37 +00:00
|
|
|
net := smnet.NewFromLibp2pHost(h)
|
|
|
|
addr, err := ds.Get(datastore.NewKey("miner-address"))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-27 21:45:31 +00:00
|
|
|
|
2020-02-06 02:43:37 +00:00
|
|
|
minerAddress, err := address.NewFromBytes(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-27 21:45:31 +00:00
|
|
|
|
|
|
|
ssize, err := fapi.StateMinerSectorSize(ctx, minerAddress, types.EmptyTSK)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-04-10 21:29:05 +00:00
|
|
|
rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
|
2020-02-27 21:45:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return storageimpl.NewProvider(net, ds, ibs, store, pieceStore, dataTransfer, spn, minerAddress, rt)
|
2019-12-17 10:46:39 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 04:19:59 +00:00
|
|
|
// RetrievalProvider creates a new retrieval provider attached to the provider blockstore
|
2020-03-23 11:40:02 +00:00
|
|
|
func RetrievalProvider(h host.Host, miner *storage.Miner, sealer sectorstorage.SectorManager, full lapi.FullNode, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, ibs dtypes.StagingBlockstore) (retrievalmarket.RetrievalProvider, error) {
|
2020-03-03 22:19:22 +00:00
|
|
|
adapter := retrievaladapter.NewRetrievalProviderNode(miner, sealer, full)
|
2020-01-24 20:19:52 +00:00
|
|
|
address, err := minerAddrFromDS(ds)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
network := rmnet.NewFromLibp2pHost(h)
|
2020-02-27 21:45:31 +00:00
|
|
|
return retrievalimpl.NewProvider(address, adapter, network, pieceStore, ibs, ds)
|
2019-12-10 04:19:59 +00:00
|
|
|
}
|
2020-03-24 18:00:08 +00:00
|
|
|
|
2020-03-27 20:08:06 +00:00
|
|
|
func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth) (*sectorstorage.Manager, error) {
|
2020-03-24 18:00:08 +00:00
|
|
|
ctx := helpers.LifecycleCtx(mctx, lc)
|
|
|
|
|
2020-03-27 20:08:06 +00:00
|
|
|
sst, err := sectorstorage.New(ctx, ls, si, cfg, sc, urls, sa)
|
2020-03-24 23:49:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
lc.Append(fx.Hook{
|
|
|
|
OnStop: func(_ context.Context) error {
|
|
|
|
if err := sst.Close(); err != nil {
|
|
|
|
log.Errorf("%+v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
return sst, nil
|
2020-03-24 18:00:08 +00:00
|
|
|
}
|
2020-03-27 20:08:06 +00:00
|
|
|
|
|
|
|
func StorageAuth(ctx helpers.MetricsCtx, ca lapi.Common) (sectorstorage.StorageAuth, error) {
|
|
|
|
token, err := ca.AuthNew(ctx, []lapi.Permission{"admin"})
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("creating storage auth header: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
headers := http.Header{}
|
|
|
|
headers.Add("Authorization", "Bearer "+string(token))
|
|
|
|
return sectorstorage.StorageAuth(headers), nil
|
|
|
|
}
|