2019-08-07 18:57:48 +00:00
|
|
|
package deals
|
|
|
|
|
|
|
|
import (
|
2019-08-12 23:54:53 +00:00
|
|
|
"context"
|
2019-08-07 18:57:48 +00:00
|
|
|
"runtime"
|
|
|
|
|
2020-01-09 22:18:06 +00:00
|
|
|
datatransfer "github.com/filecoin-project/go-data-transfer"
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/api"
|
2019-10-31 03:00:02 +00:00
|
|
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
|
|
|
"github.com/ipld/go-ipld-prime"
|
|
|
|
|
2019-12-19 20:13:17 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
2020-01-07 14:00:10 +00:00
|
|
|
"github.com/filecoin-project/go-cbor-util"
|
2020-01-07 16:18:35 +00:00
|
|
|
"github.com/filecoin-project/go-statestore"
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/actors"
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2019-08-07 18:57:48 +00:00
|
|
|
|
|
|
|
"github.com/ipfs/go-cid"
|
2019-08-12 23:54:53 +00:00
|
|
|
inet "github.com/libp2p/go-libp2p-core/network"
|
2019-10-31 03:00:02 +00:00
|
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
2019-08-07 18:57:48 +00:00
|
|
|
"golang.org/x/xerrors"
|
|
|
|
)
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
func (p *Provider) failDeal(id cid.Cid, cerr error) {
|
|
|
|
if err := p.deals.End(id); err != nil {
|
2019-08-07 18:57:48 +00:00
|
|
|
log.Warnf("deals.End: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if cerr == nil {
|
|
|
|
_, f, l, _ := runtime.Caller(1)
|
|
|
|
cerr = xerrors.Errorf("unknown error (fail called at %s:%d)", f, l)
|
|
|
|
}
|
|
|
|
|
2019-11-06 12:04:33 +00:00
|
|
|
log.Warnf("deal %s failed: %s", id, cerr)
|
2019-08-07 18:57:48 +00:00
|
|
|
|
2019-10-23 12:59:57 +00:00
|
|
|
err := p.sendSignedResponse(&Response{
|
2019-09-10 14:13:24 +00:00
|
|
|
State: api.DealFailed,
|
2019-08-07 18:57:48 +00:00
|
|
|
Message: cerr.Error(),
|
|
|
|
Proposal: id,
|
|
|
|
})
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
s, ok := p.conns[id]
|
2019-08-07 18:57:48 +00:00
|
|
|
if ok {
|
2019-09-10 12:35:43 +00:00
|
|
|
_ = s.Reset()
|
2019-10-21 18:12:11 +00:00
|
|
|
delete(p.conns, id)
|
2019-08-07 18:57:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2019-08-08 17:16:41 +00:00
|
|
|
log.Warnf("notifying client about deal failure: %s", err)
|
2019-08-07 18:57:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 17:38:42 +00:00
|
|
|
func (p *Provider) readProposal(s inet.Stream) (proposal Proposal, err error) {
|
2019-11-07 14:11:39 +00:00
|
|
|
if err := cborutil.ReadCborRPC(s, &proposal); err != nil {
|
2019-08-07 18:57:48 +00:00
|
|
|
log.Errorw("failed to read proposal message", "error", err)
|
2019-10-21 18:12:11 +00:00
|
|
|
return proposal, err
|
|
|
|
}
|
|
|
|
|
2020-01-09 22:18:06 +00:00
|
|
|
if proposal.DealProposal.ProposerSignature == nil {
|
|
|
|
return proposal, xerrors.Errorf("incoming deal proposal has no signature")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := proposal.DealProposal.Verify(address.Undef); err != nil {
|
2019-10-21 18:12:11 +00:00
|
|
|
return proposal, xerrors.Errorf("verifying StorageDealProposal: %w", err)
|
2019-08-07 18:57:48 +00:00
|
|
|
}
|
|
|
|
|
2019-11-06 17:38:42 +00:00
|
|
|
if proposal.DealProposal.Provider != p.actor {
|
|
|
|
log.Errorf("proposal with wrong ProviderAddress: %s", proposal.DealProposal.Provider)
|
2019-10-21 18:12:11 +00:00
|
|
|
return proposal, err
|
2019-08-07 18:57:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-10-23 12:59:57 +00:00
|
|
|
func (p *Provider) sendSignedResponse(resp *Response) error {
|
2019-10-21 18:12:11 +00:00
|
|
|
s, ok := p.conns[resp.Proposal]
|
2019-08-07 18:57:48 +00:00
|
|
|
if !ok {
|
|
|
|
return xerrors.New("couldn't send response: not connected")
|
|
|
|
}
|
|
|
|
|
2019-11-07 14:11:39 +00:00
|
|
|
msg, err := cborutil.Dump(resp)
|
2019-08-07 18:57:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("serializing response: %w", err)
|
|
|
|
}
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
worker, err := p.getWorker(p.actor)
|
2019-08-12 23:54:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-08-07 18:57:48 +00:00
|
|
|
}
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
sig, err := p.full.WalletSign(context.TODO(), worker, msg)
|
2019-08-07 18:57:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("failed to sign response message: %w", err)
|
|
|
|
}
|
|
|
|
|
2019-10-23 12:59:57 +00:00
|
|
|
signedResponse := &SignedResponse{
|
|
|
|
Response: *resp,
|
2019-08-07 18:57:48 +00:00
|
|
|
Signature: sig,
|
|
|
|
}
|
|
|
|
|
2019-11-07 14:11:39 +00:00
|
|
|
err = cborutil.WriteCborRPC(s, signedResponse)
|
2019-08-07 18:57:48 +00:00
|
|
|
if err != nil {
|
|
|
|
// Assume client disconnected
|
|
|
|
s.Close()
|
2019-10-21 18:12:11 +00:00
|
|
|
delete(p.conns, resp.Proposal)
|
2019-08-07 18:57:48 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2019-09-13 21:00:36 +00:00
|
|
|
|
2019-11-07 12:57:00 +00:00
|
|
|
func (p *Provider) disconnect(deal MinerDeal) error {
|
|
|
|
s, ok := p.conns[deal.ProposalCid]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err := s.Close()
|
|
|
|
delete(p.conns, deal.ProposalCid)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-21 18:12:11 +00:00
|
|
|
func (p *Provider) getWorker(miner address.Address) (address.Address, error) {
|
2019-09-13 21:00:36 +00:00
|
|
|
getworker := &types.Message{
|
|
|
|
To: miner,
|
|
|
|
From: miner,
|
|
|
|
Method: actors.MAMethods.GetWorkerAddr,
|
|
|
|
}
|
2019-10-21 18:12:11 +00:00
|
|
|
r, err := p.full.StateCall(context.TODO(), getworker, nil)
|
2019-09-13 21:00:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return address.Undef, xerrors.Errorf("getting worker address: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.ExitCode != 0 {
|
|
|
|
return address.Undef, xerrors.Errorf("getWorker call failed: %d", r.ExitCode)
|
|
|
|
}
|
|
|
|
|
|
|
|
return address.NewFromBytes(r.Return)
|
|
|
|
}
|
2019-10-31 03:00:02 +00:00
|
|
|
|
|
|
|
var _ datatransfer.RequestValidator = &ProviderRequestValidator{}
|
|
|
|
|
|
|
|
// ProviderRequestValidator validates data transfer requests for the provider
|
|
|
|
// in a storage market
|
|
|
|
type ProviderRequestValidator struct {
|
|
|
|
deals *statestore.StateStore
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewProviderRequestValidator returns a new client request validator for the
|
|
|
|
// given datastore
|
2019-11-11 20:51:28 +00:00
|
|
|
func NewProviderRequestValidator(deals dtypes.ProviderDealStore) *ProviderRequestValidator {
|
2019-10-31 03:00:02 +00:00
|
|
|
return &ProviderRequestValidator{
|
2019-11-11 20:51:28 +00:00
|
|
|
deals: deals,
|
2019-10-31 03:00:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ValidatePush validates a push request received from the peer that will send data
|
|
|
|
// Will succeed only if:
|
|
|
|
// - voucher has correct type
|
|
|
|
// - voucher references an active deal
|
|
|
|
// - referenced deal matches the client
|
|
|
|
// - referenced deal matches the given base CID
|
|
|
|
// - referenced deal is in an acceptable state
|
|
|
|
func (m *ProviderRequestValidator) ValidatePush(
|
|
|
|
sender peer.ID,
|
|
|
|
voucher datatransfer.Voucher,
|
|
|
|
baseCid cid.Cid,
|
|
|
|
Selector ipld.Node) error {
|
|
|
|
dealVoucher, ok := voucher.(*StorageDataTransferVoucher)
|
|
|
|
if !ok {
|
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
|
|
|
return xerrors.Errorf("voucher type %s: %w", voucher.Type(), ErrWrongVoucherType)
|
2019-10-31 03:00:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var deal MinerDeal
|
2019-11-11 21:02:49 +00:00
|
|
|
err := m.deals.Get(dealVoucher.Proposal, &deal)
|
2019-10-31 03:00:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("Proposal CID %s: %w", dealVoucher.Proposal.String(), ErrNoDeal)
|
|
|
|
}
|
|
|
|
if deal.Client != sender {
|
|
|
|
return xerrors.Errorf("Deal Peer %s, Data Transfer Peer %s: %w", deal.Client.String(), sender.String(), ErrWrongPeer)
|
|
|
|
}
|
|
|
|
|
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
|
|
|
if !deal.Ref.Equals(baseCid) {
|
2019-10-31 03:00:02 +00:00
|
|
|
return xerrors.Errorf("Deal Payload CID %s, Data Transfer CID %s: %w", string(deal.Proposal.PieceRef), baseCid.String(), ErrWrongPiece)
|
|
|
|
}
|
2019-11-11 20:25:19 +00:00
|
|
|
for _, state := range DataTransferStates {
|
2019-10-31 03:00:02 +00:00
|
|
|
if deal.State == state {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return xerrors.Errorf("Deal State %s: %w", deal.State, ErrInacceptableDealState)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ValidatePull validates a pull request received from the peer that will receive data.
|
|
|
|
// Will always error because providers should not accept pull requests from a client
|
|
|
|
// in a storage deal (i.e. send data to client).
|
|
|
|
func (m *ProviderRequestValidator) ValidatePull(
|
|
|
|
receiver peer.ID,
|
|
|
|
voucher datatransfer.Voucher,
|
|
|
|
baseCid cid.Cid,
|
|
|
|
Selector ipld.Node) error {
|
|
|
|
return ErrNoPullAccepted
|
|
|
|
}
|