2019-08-02 14:09:54 +00:00
|
|
|
package deals
|
|
|
|
|
|
|
|
import (
|
2019-08-06 22:04:21 +00:00
|
|
|
"context"
|
2019-10-28 23:51:50 +00:00
|
|
|
"errors"
|
2019-09-13 21:00:36 +00:00
|
|
|
"sync"
|
2019-08-06 22:04:21 +00:00
|
|
|
|
2019-09-13 21:00:36 +00:00
|
|
|
cid "github.com/ipfs/go-cid"
|
|
|
|
datastore "github.com/ipfs/go-datastore"
|
2019-08-07 18:57:48 +00:00
|
|
|
"github.com/ipfs/go-datastore/namespace"
|
2019-08-02 14:09:54 +00:00
|
|
|
inet "github.com/libp2p/go-libp2p-core/network"
|
2019-08-06 22:04:21 +00:00
|
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
2019-09-13 21:00:36 +00:00
|
|
|
"golang.org/x/xerrors"
|
2019-09-10 12:35:43 +00:00
|
|
|
|
2019-12-19 20:13:17 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
2020-01-07 14:00:10 +00:00
|
|
|
"github.com/filecoin-project/go-cbor-util"
|
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
|
|
|
"github.com/filecoin-project/go-fil-components/datatransfer"
|
2020-01-07 16:18:35 +00:00
|
|
|
"github.com/filecoin-project/go-statestore"
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2019-10-21 18:12:11 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors"
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
|
|
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
2019-10-31 16:55:35 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage"
|
2019-11-05 18:40:51 +00:00
|
|
|
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
2019-08-02 14:09:54 +00:00
|
|
|
)
|
|
|
|
|
2019-11-29 19:11:01 +00:00
|
|
|
var ProviderDsPrefix = "/deals/provider"
|
|
|
|
|
2019-08-06 22:04:21 +00:00
|
|
|
type MinerDeal struct {
|
|
|
|
Client peer.ID
|
2019-10-21 18:12:11 +00:00
|
|
|
Proposal actors.StorageDealProposal
|
2019-08-06 22:04:21 +00:00
|
|
|
ProposalCid cid.Cid
|
2019-09-10 14:13:24 +00:00
|
|
|
State api.DealState
|
2019-08-06 22:04:21 +00:00
|
|
|
|
|
|
|
Ref cid.Cid
|
2019-08-07 18:57:48 +00:00
|
|
|
|
2019-10-24 14:24:31 +00:00
|
|
|
DealID uint64
|
2019-09-10 14:13:24 +00:00
|
|
|
SectorID uint64 // Set when State >= DealStaged
|
2019-08-12 21:48:18 +00:00
|
|
|
|
2019-08-07 18:57:48 +00:00
|
|
|
s inet.Stream
|
2019-08-06 22:04:21 +00:00
|
|
|
}
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
type Provider struct {
|
2019-09-06 22:39:47 +00:00
|
|
|
pricePerByteBlock types.BigInt // how much we want for storing one byte for one block
|
2019-09-13 21:00:36 +00:00
|
|
|
minPieceSize uint64
|
|
|
|
|
|
|
|
ask *types.SignedStorageAsk
|
|
|
|
askLk sync.Mutex
|
2019-09-06 22:39:47 +00:00
|
|
|
|
2019-11-05 18:40:51 +00:00
|
|
|
secb *sectorblocks.SectorBlocks
|
2019-10-31 16:55:35 +00:00
|
|
|
sminer *storage.Miner
|
2019-10-31 19:03:26 +00:00
|
|
|
full api.FullNode
|
2019-08-06 22:04:21 +00:00
|
|
|
|
2019-10-29 00:57:12 +00:00
|
|
|
// TODO: This will go away once storage market module + CAR
|
2019-10-28 23:51:50 +00:00
|
|
|
// is implemented
|
2019-08-06 22:04:21 +00:00
|
|
|
dag dtypes.StagingDAG
|
|
|
|
|
2019-10-29 00:57:12 +00:00
|
|
|
// dataTransfer is the manager of data transfers used by this storage provider
|
2019-11-11 20:51:28 +00:00
|
|
|
dataTransfer dtypes.ProviderDataTransfer
|
2019-10-28 23:51:50 +00:00
|
|
|
|
2019-11-01 11:07:05 +00:00
|
|
|
deals *statestore.StateStore
|
2019-09-13 21:00:36 +00:00
|
|
|
ds dtypes.MetadataDS
|
|
|
|
|
2019-08-07 18:57:48 +00:00
|
|
|
conns map[cid.Cid]inet.Stream
|
2019-08-06 22:04:21 +00:00
|
|
|
|
2019-08-06 23:08:34 +00:00
|
|
|
actor address.Address
|
|
|
|
|
2019-08-07 18:57:48 +00:00
|
|
|
incoming chan MinerDeal
|
2019-09-10 12:35:43 +00:00
|
|
|
updated chan minerDealUpdate
|
2019-08-07 18:57:48 +00:00
|
|
|
stop chan struct{}
|
|
|
|
stopped chan struct{}
|
2019-08-02 14:09:54 +00:00
|
|
|
}
|
|
|
|
|
2019-09-10 12:35:43 +00:00
|
|
|
type minerDealUpdate struct {
|
2019-09-10 14:13:24 +00:00
|
|
|
newState api.DealState
|
2019-08-07 18:57:48 +00:00
|
|
|
id cid.Cid
|
|
|
|
err error
|
2019-08-12 21:48:18 +00:00
|
|
|
mut func(*MinerDeal)
|
2019-08-07 11:46:09 +00:00
|
|
|
}
|
|
|
|
|
2019-10-28 23:51:50 +00:00
|
|
|
var (
|
|
|
|
// ErrDataTransferFailed means a data transfer for a deal failed
|
2019-12-05 05:14:19 +00:00
|
|
|
ErrDataTransferFailed = errors.New("deal data transfer failed")
|
2019-10-28 23:51:50 +00:00
|
|
|
)
|
2019-11-11 19:55:25 +00:00
|
|
|
|
2019-11-11 20:51:28 +00:00
|
|
|
func NewProvider(ds dtypes.MetadataDS, sminer *storage.Miner, secb *sectorblocks.SectorBlocks, dag dtypes.StagingDAG, dataTransfer dtypes.ProviderDataTransfer, fullNode api.FullNode) (*Provider, error) {
|
2019-08-06 23:08:34 +00:00
|
|
|
addr, err := ds.Get(datastore.NewKey("miner-address"))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
minerAddress, err := address.NewFromBytes(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
h := &Provider{
|
2019-11-11 19:55:25 +00:00
|
|
|
sminer: sminer,
|
|
|
|
dag: dag,
|
2019-10-28 23:51:50 +00:00
|
|
|
dataTransfer: dataTransfer,
|
2019-11-11 19:55:25 +00:00
|
|
|
full: fullNode,
|
|
|
|
secb: secb,
|
2019-08-06 22:04:21 +00:00
|
|
|
|
2019-09-06 22:39:47 +00:00
|
|
|
pricePerByteBlock: types.NewInt(3), // TODO: allow setting
|
2019-11-06 23:09:48 +00:00
|
|
|
minPieceSize: 256, // TODO: allow setting (BUT KEEP MIN 256! (because of how we fill sectors up))
|
2019-09-06 22:39:47 +00:00
|
|
|
|
2019-08-07 18:57:48 +00:00
|
|
|
conns: map[cid.Cid]inet.Stream{},
|
|
|
|
|
2019-08-06 23:08:34 +00:00
|
|
|
incoming: make(chan MinerDeal),
|
2019-09-10 12:35:43 +00:00
|
|
|
updated: make(chan minerDealUpdate),
|
2019-08-07 18:57:48 +00:00
|
|
|
stop: make(chan struct{}),
|
|
|
|
stopped: make(chan struct{}),
|
2019-08-06 23:08:34 +00:00
|
|
|
|
|
|
|
actor: minerAddress,
|
|
|
|
|
2019-11-29 19:11:01 +00:00
|
|
|
deals: statestore.New(namespace.Wrap(ds, datastore.NewKey(ProviderDsPrefix))),
|
2019-09-13 21:00:36 +00:00
|
|
|
ds: ds,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := h.tryLoadAsk(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if h.ask == nil {
|
|
|
|
// TODO: we should be fine with this state, and just say it means 'not actively accepting deals'
|
|
|
|
// for now... lets just set a price
|
2019-10-29 10:19:39 +00:00
|
|
|
if err := h.SetPrice(types.NewInt(500_000_000), 1000000); err != nil {
|
2019-09-13 21:00:36 +00:00
|
|
|
return nil, xerrors.Errorf("failed setting a default price: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-29 00:57:12 +00:00
|
|
|
// register a data transfer event handler -- this will move deals from
|
|
|
|
// accepted to staged
|
2019-10-28 23:51:50 +00:00
|
|
|
h.dataTransfer.SubscribeToEvents(h.onDataTransferEvent)
|
|
|
|
|
2019-09-13 21:00:36 +00:00
|
|
|
return h, nil
|
2019-08-02 14:09:54 +00:00
|
|
|
}
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
func (p *Provider) Run(ctx context.Context) {
|
2019-08-07 20:16:26 +00:00
|
|
|
// TODO: restore state
|
|
|
|
|
2019-08-06 22:04:21 +00:00
|
|
|
go func() {
|
2019-10-21 18:12:11 +00:00
|
|
|
defer log.Warn("quitting deal provider loop")
|
|
|
|
defer close(p.stopped)
|
2019-08-06 22:04:21 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2019-10-21 18:12:11 +00:00
|
|
|
case deal := <-p.incoming: // DealAccepted
|
|
|
|
p.onIncoming(deal)
|
|
|
|
case update := <-p.updated: // DealStaged
|
|
|
|
p.onUpdated(ctx, update)
|
|
|
|
case <-p.stop:
|
2019-08-06 22:04:21 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
func (p *Provider) onIncoming(deal MinerDeal) {
|
2019-08-07 18:57:48 +00:00
|
|
|
log.Info("incoming deal")
|
2019-08-02 14:09:54 +00:00
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
p.conns[deal.ProposalCid] = deal.s
|
2019-08-02 16:25:10 +00:00
|
|
|
|
2019-10-23 12:59:57 +00:00
|
|
|
if err := p.deals.Begin(deal.ProposalCid, &deal); err != nil {
|
2019-08-07 18:57:48 +00:00
|
|
|
// This can happen when client re-sends proposal
|
2019-10-21 18:12:11 +00:00
|
|
|
p.failDeal(deal.ProposalCid, err)
|
2019-08-07 18:57:48 +00:00
|
|
|
log.Errorf("deal tracking failed: %s", err)
|
2019-08-02 14:09:54 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-08-07 18:57:48 +00:00
|
|
|
go func() {
|
2019-10-21 18:12:11 +00:00
|
|
|
p.updated <- minerDealUpdate{
|
2019-09-10 14:13:24 +00:00
|
|
|
newState: api.DealAccepted,
|
2019-08-07 18:57:48 +00:00
|
|
|
id: deal.ProposalCid,
|
|
|
|
err: nil,
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2019-08-06 23:08:34 +00:00
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
func (p *Provider) onUpdated(ctx context.Context, update minerDealUpdate) {
|
2019-11-08 17:15:38 +00:00
|
|
|
log.Infof("Deal %s updated state to %s", update.id, api.DealStates[update.newState])
|
2019-08-07 18:57:48 +00:00
|
|
|
if update.err != nil {
|
2019-11-06 12:04:33 +00:00
|
|
|
log.Errorf("deal %s (newSt: %d) failed: %+v", update.id, update.newState, update.err)
|
2019-10-21 18:12:11 +00:00
|
|
|
p.failDeal(update.id, update.err)
|
2019-08-06 22:04:21 +00:00
|
|
|
return
|
|
|
|
}
|
2019-08-07 18:57:48 +00:00
|
|
|
var deal MinerDeal
|
2019-11-01 11:07:05 +00:00
|
|
|
err := p.deals.Mutate(update.id, func(d *MinerDeal) error {
|
2019-08-07 18:57:48 +00:00
|
|
|
d.State = update.newState
|
2019-08-12 21:48:18 +00:00
|
|
|
if update.mut != nil {
|
|
|
|
update.mut(d)
|
|
|
|
}
|
2019-08-07 18:57:48 +00:00
|
|
|
deal = *d
|
|
|
|
return nil
|
|
|
|
})
|
2019-08-06 12:31:13 +00:00
|
|
|
if err != nil {
|
2019-10-21 18:12:11 +00:00
|
|
|
p.failDeal(update.id, err)
|
2019-08-06 12:31:13 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-08-07 18:57:48 +00:00
|
|
|
switch update.newState {
|
2019-09-10 14:13:24 +00:00
|
|
|
case api.DealAccepted:
|
2019-10-28 23:51:50 +00:00
|
|
|
p.handle(ctx, deal, p.accept, api.DealNoUpdate)
|
2019-09-10 14:13:24 +00:00
|
|
|
case api.DealStaged:
|
2019-10-21 18:12:11 +00:00
|
|
|
p.handle(ctx, deal, p.staged, api.DealSealing)
|
2019-09-10 14:13:24 +00:00
|
|
|
case api.DealSealing:
|
2019-10-21 18:12:11 +00:00
|
|
|
p.handle(ctx, deal, p.sealing, api.DealComplete)
|
2019-09-16 18:04:15 +00:00
|
|
|
case api.DealComplete:
|
2019-10-21 18:12:11 +00:00
|
|
|
p.handle(ctx, deal, p.complete, api.DealNoUpdate)
|
2019-08-02 14:09:54 +00:00
|
|
|
}
|
2019-08-07 18:57:48 +00:00
|
|
|
}
|
2019-08-06 12:31:13 +00:00
|
|
|
|
2019-10-29 00:57:12 +00:00
|
|
|
// onDataTransferEvent is the function called when an event occurs in a data
|
|
|
|
// transfer -- it reads the voucher to verify this even occurred in a storage
|
|
|
|
// market deal, then, based on the data transfer event that occurred, it generates
|
|
|
|
// and update message for the deal -- either moving to staged for a completion
|
|
|
|
// event or moving to error if a data transfer error occurs
|
2019-10-28 23:51:50 +00:00
|
|
|
func (p *Provider) onDataTransferEvent(event datatransfer.Event, channelState datatransfer.ChannelState) {
|
2019-10-31 03:00:02 +00:00
|
|
|
voucher, ok := channelState.Voucher().(*StorageDataTransferVoucher)
|
2019-10-28 23:51:50 +00:00
|
|
|
// if this event is for a transfer not related to storage, ignore
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// data transfer events for opening and progress do not affect deal state
|
|
|
|
var next api.DealState
|
|
|
|
var err error
|
2019-12-01 20:07:42 +00:00
|
|
|
var mut func(*MinerDeal)
|
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
|
|
|
switch event.Code {
|
2019-10-28 23:51:50 +00:00
|
|
|
case datatransfer.Complete:
|
|
|
|
next = api.DealStaged
|
2019-12-01 20:07:42 +00:00
|
|
|
mut = func(deal *MinerDeal) {
|
|
|
|
deal.DealID = voucher.DealID
|
|
|
|
}
|
2019-10-28 23:51:50 +00:00
|
|
|
case datatransfer.Error:
|
|
|
|
next = api.DealFailed
|
|
|
|
err = ErrDataTransferFailed
|
|
|
|
default:
|
|
|
|
// the only events we care about are complete and error
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case p.updated <- minerDealUpdate{
|
|
|
|
newState: next,
|
|
|
|
id: voucher.Proposal,
|
|
|
|
err: err,
|
2019-12-01 20:07:42 +00:00
|
|
|
mut: mut,
|
2019-10-28 23:51:50 +00:00
|
|
|
}:
|
|
|
|
case <-p.stop:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 17:38:42 +00:00
|
|
|
func (p *Provider) newDeal(s inet.Stream, proposal Proposal) (MinerDeal, error) {
|
2019-11-07 14:11:39 +00:00
|
|
|
proposalNd, err := cborutil.AsIpld(proposal.DealProposal)
|
2019-08-06 12:31:13 +00:00
|
|
|
if err != nil {
|
2019-08-07 18:57:48 +00:00
|
|
|
return MinerDeal{}, err
|
2019-08-06 12:31:13 +00:00
|
|
|
}
|
2019-08-06 23:08:34 +00:00
|
|
|
|
2019-08-07 18:57:48 +00:00
|
|
|
return MinerDeal{
|
|
|
|
Client: s.Conn().RemotePeer(),
|
2019-11-06 17:38:42 +00:00
|
|
|
Proposal: *proposal.DealProposal,
|
2019-08-07 18:57:48 +00:00
|
|
|
ProposalCid: proposalNd.Cid(),
|
2019-09-10 14:13:24 +00:00
|
|
|
State: api.DealUnknown,
|
2019-08-06 12:31:13 +00:00
|
|
|
|
2019-11-06 17:38:42 +00:00
|
|
|
Ref: proposal.Piece,
|
2019-08-06 23:08:34 +00:00
|
|
|
|
2019-08-07 18:57:48 +00:00
|
|
|
s: s,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
func (p *Provider) HandleStream(s inet.Stream) {
|
2019-08-07 18:57:48 +00:00
|
|
|
log.Info("Handling storage deal proposal!")
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
proposal, err := p.readProposal(s)
|
2019-08-07 18:57:48 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
s.Close()
|
2019-08-02 14:09:54 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
deal, err := p.newDeal(s, proposal)
|
2019-08-06 22:04:21 +00:00
|
|
|
if err != nil {
|
2019-11-06 23:59:56 +00:00
|
|
|
log.Errorf("%+v", err)
|
2019-08-07 18:57:48 +00:00
|
|
|
s.Close()
|
2019-08-06 22:04:21 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
p.incoming <- deal
|
2019-08-02 14:09:54 +00:00
|
|
|
}
|
2019-08-06 23:08:34 +00:00
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
func (p *Provider) Stop() {
|
|
|
|
close(p.stop)
|
|
|
|
<-p.stopped
|
2019-08-06 23:08:34 +00:00
|
|
|
}
|