lotus/chain/deals/provider_utils.go

181 lines
4.8 KiB
Go
Raw Normal View History

package deals
import (
2019-08-12 23:54:53 +00:00
"context"
"runtime"
"github.com/filecoin-project/go-address"
2020-01-07 14:00:10 +00:00
"github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/go-data-transfer"
2020-01-07 16:18:35 +00:00
"github.com/filecoin-project/go-statestore"
"github.com/ipfs/go-cid"
"github.com/ipld/go-ipld-prime"
2019-08-12 23:54:53 +00:00
inet "github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
func (p *Provider) failDeal(ctx context.Context, id cid.Cid, cerr error) {
if err := p.deals.End(id); err != nil {
log.Warnf("deals.End: %s", err)
}
if cerr == nil {
_, f, l, _ := runtime.Caller(1)
cerr = xerrors.Errorf("unknown error (fail called at %s:%d)", f, l)
}
2019-11-06 12:04:33 +00:00
log.Warnf("deal %s failed: %s", id, cerr)
err := p.sendSignedResponse(ctx, &Response{
2019-09-10 14:13:24 +00:00
State: api.DealFailed,
Message: cerr.Error(),
Proposal: id,
})
s, ok := p.conns[id]
if ok {
2019-09-10 12:35:43 +00:00
_ = s.Reset()
delete(p.conns, id)
}
if err != nil {
2019-08-08 17:16:41 +00:00
log.Warnf("notifying client about deal failure: %s", err)
}
}
2019-11-06 17:38:42 +00:00
func (p *Provider) readProposal(s inet.Stream) (proposal Proposal, err error) {
2019-11-07 14:11:39 +00:00
if err := cborutil.ReadCborRPC(s, &proposal); err != nil {
log.Errorw("failed to read proposal message", "error", err)
return proposal, err
}
if proposal.DealProposal.ProposerSignature == nil {
return proposal, xerrors.Errorf("incoming deal proposal has no signature")
}
if err := proposal.DealProposal.Verify(address.Undef); err != nil {
return proposal, xerrors.Errorf("verifying StorageDealProposal: %w", err)
}
2019-11-06 17:38:42 +00:00
if proposal.DealProposal.Provider != p.actor {
log.Errorf("proposal with wrong ProviderAddress: %s", proposal.DealProposal.Provider)
return proposal, err
}
return
}
func (p *Provider) sendSignedResponse(ctx context.Context, resp *Response) error {
s, ok := p.conns[resp.Proposal]
if !ok {
return xerrors.New("couldn't send response: not connected")
}
2019-11-07 14:11:39 +00:00
msg, err := cborutil.Dump(resp)
if err != nil {
return xerrors.Errorf("serializing response: %w", err)
}
worker, err := p.spn.GetMinerWorker(ctx, p.actor)
2019-08-12 23:54:53 +00:00
if err != nil {
return err
}
sig, err := p.spn.SignBytes(ctx, worker, msg)
if err != nil {
return xerrors.Errorf("failed to sign response message: %w", err)
}
signedResponse := &SignedResponse{
Response: *resp,
Signature: sig,
}
2019-11-07 14:11:39 +00:00
err = cborutil.WriteCborRPC(s, signedResponse)
if err != nil {
// Assume client disconnected
s.Close()
delete(p.conns, resp.Proposal)
}
return err
}
2019-09-13 21:00:36 +00:00
2019-11-07 12:57:00 +00:00
func (p *Provider) disconnect(deal MinerDeal) error {
s, ok := p.conns[deal.ProposalCid]
if !ok {
return nil
}
err := s.Close()
delete(p.conns, deal.ProposalCid)
return err
}
var _ datatransfer.RequestValidator = &ProviderRequestValidator{}
// ProviderRequestValidator validates data transfer requests for the provider
// in a storage market
type ProviderRequestValidator struct {
deals *statestore.StateStore
}
// NewProviderRequestValidator returns a new client request validator for the
// given datastore
func NewProviderRequestValidator(deals dtypes.ProviderDealStore) *ProviderRequestValidator {
return &ProviderRequestValidator{
deals: deals,
}
}
// ValidatePush validates a push request received from the peer that will send data
// Will succeed only if:
// - voucher has correct type
// - voucher references an active deal
// - referenced deal matches the client
// - referenced deal matches the given base CID
// - referenced deal is in an acceptable state
func (m *ProviderRequestValidator) ValidatePush(
sender peer.ID,
voucher datatransfer.Voucher,
baseCid cid.Cid,
Selector ipld.Node) error {
dealVoucher, ok := voucher.(*StorageDataTransferVoucher)
if !ok {
feat(datatransfer): implement and extract feat(datatransfer): setup implementation path Sets up a path to implementation, offering both the dagservice implementation and a future graphsync implement, establishes message interfaces and network layer, and isolates the datatransfer module from the app WIP using CBOR encoding for dataxfermsg * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff all message tests passing, some others in datatransfer Some cleanup for PR Cleanup for PR, clarifying and additional comments mod tidy Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters Fix tests Initiate push and pull requests (#536) * add issue link for data TransferID generation * comment out failing but not relevant tests * finish voucher rename from Identifier --> Type tests passing cleanup for PR remove unused fmt import in graphsync_test a better reflection send data transfer response other tests passing feat(datatransfer): milestone 2 infrastructure Setup test path for all tickets for milestone 2 responses alert subscribers when request is not accepted (#607) Graphsync response is scheduled when a valid push request is received (#625) fix(datatransfer): fix tests fix an error with read buffers in tests fix(deps): fix go.sum Feat/dt graphsync pullreqs (#627) * graphsync responses to pull requests Feat/dt initiator cleanup (#645) * ChannelID.To --> ChannelID.Initiator * We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs. * InProgressChannels returns all of impl.channels, currently just for testing * Implements go-data-transfer issue * Some assertions were changed based on the above. * Renamed some variables and added some assertions based on the new understanding * Updated SHA for graphsync module * Updated fakeGraphSync test structs to use new interfaces from new SHA above Techdebt/dt split graphsync impl receiver (#651) * Split up graphsyncImpl and graphsyncReceiver * rename graphsync to utils DTM sends data over graphsync for validated push requests (#665) * create channels when a request is received. register push request hook with graphsync. fix tests. * better NewReaders * use mutex lock around impl.channels access * fix(datatransfer): fix test uncertainty * fix a data race and also don't use random bytes in basic block which can fail * privatize 3 funcs with @hannahhoward Feat/dt gs pullrequests (#693) * Implements DTM Sends Data Over Graphsync For Validated Pull Requests * rename a field in a test struct * refactor a couple of private functions (one was refactored out of existence) Feat/dt subscribe, file Xfer round trip (#720) Implements the rest of Subscriber Is Notified When Request Completed #24: * send a graphsync message within a go func and consume responses until error or transfer is complete. * notify subscribers of results. * Rename datatransfer.Event to EventCode. * datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses * Add extension data to graphsync request hook, gsReq * rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync * use a mutex lock for last transfer ID * obey the linter Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) * update to correct graphsync version, update tests & code to call the new graphsync hooks * getExtensionData returns an empty struct + nil if we can't find our extension * Don't respond with error when we can't find the extension. * Test for same * mod tidy minor fix to go.sum feat(datatransfer): switch to graphsync implementation Move over to real graphsync implementation of data transfer, add constructors for graphsync instances on client and miner side fix(datatransfer): Fix validators Validators were checking payload cid against commP -- which are not the same any more. Added a payloadCid to client deal to maintain the record, fixed validator logic Feat/dt extraction use go-fil-components/datatransfer (#770) * Initial commit after changing to go-fil-components/datatransfer * blow away the datatransfer dir * use go-fil-components master after its PR #1 was merged * go mod tidy use a package updates after rebase with master
2019-10-30 02:42:16 +00:00
return xerrors.Errorf("voucher type %s: %w", voucher.Type(), ErrWrongVoucherType)
}
var deal MinerDeal
err := m.deals.Get(dealVoucher.Proposal, &deal)
if err != nil {
return xerrors.Errorf("Proposal CID %s: %w", dealVoucher.Proposal.String(), ErrNoDeal)
}
if deal.Client != sender {
return xerrors.Errorf("Deal Peer %s, Data Transfer Peer %s: %w", deal.Client.String(), sender.String(), ErrWrongPeer)
}
feat(datatransfer): implement and extract feat(datatransfer): setup implementation path Sets up a path to implementation, offering both the dagservice implementation and a future graphsync implement, establishes message interfaces and network layer, and isolates the datatransfer module from the app WIP using CBOR encoding for dataxfermsg * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff all message tests passing, some others in datatransfer Some cleanup for PR Cleanup for PR, clarifying and additional comments mod tidy Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters Fix tests Initiate push and pull requests (#536) * add issue link for data TransferID generation * comment out failing but not relevant tests * finish voucher rename from Identifier --> Type tests passing cleanup for PR remove unused fmt import in graphsync_test a better reflection send data transfer response other tests passing feat(datatransfer): milestone 2 infrastructure Setup test path for all tickets for milestone 2 responses alert subscribers when request is not accepted (#607) Graphsync response is scheduled when a valid push request is received (#625) fix(datatransfer): fix tests fix an error with read buffers in tests fix(deps): fix go.sum Feat/dt graphsync pullreqs (#627) * graphsync responses to pull requests Feat/dt initiator cleanup (#645) * ChannelID.To --> ChannelID.Initiator * We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs. * InProgressChannels returns all of impl.channels, currently just for testing * Implements go-data-transfer issue * Some assertions were changed based on the above. * Renamed some variables and added some assertions based on the new understanding * Updated SHA for graphsync module * Updated fakeGraphSync test structs to use new interfaces from new SHA above Techdebt/dt split graphsync impl receiver (#651) * Split up graphsyncImpl and graphsyncReceiver * rename graphsync to utils DTM sends data over graphsync for validated push requests (#665) * create channels when a request is received. register push request hook with graphsync. fix tests. * better NewReaders * use mutex lock around impl.channels access * fix(datatransfer): fix test uncertainty * fix a data race and also don't use random bytes in basic block which can fail * privatize 3 funcs with @hannahhoward Feat/dt gs pullrequests (#693) * Implements DTM Sends Data Over Graphsync For Validated Pull Requests * rename a field in a test struct * refactor a couple of private functions (one was refactored out of existence) Feat/dt subscribe, file Xfer round trip (#720) Implements the rest of Subscriber Is Notified When Request Completed #24: * send a graphsync message within a go func and consume responses until error or transfer is complete. * notify subscribers of results. * Rename datatransfer.Event to EventCode. * datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses * Add extension data to graphsync request hook, gsReq * rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync * use a mutex lock for last transfer ID * obey the linter Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) * update to correct graphsync version, update tests & code to call the new graphsync hooks * getExtensionData returns an empty struct + nil if we can't find our extension * Don't respond with error when we can't find the extension. * Test for same * mod tidy minor fix to go.sum feat(datatransfer): switch to graphsync implementation Move over to real graphsync implementation of data transfer, add constructors for graphsync instances on client and miner side fix(datatransfer): Fix validators Validators were checking payload cid against commP -- which are not the same any more. Added a payloadCid to client deal to maintain the record, fixed validator logic Feat/dt extraction use go-fil-components/datatransfer (#770) * Initial commit after changing to go-fil-components/datatransfer * blow away the datatransfer dir * use go-fil-components master after its PR #1 was merged * go mod tidy use a package updates after rebase with master
2019-10-30 02:42:16 +00:00
if !deal.Ref.Equals(baseCid) {
return xerrors.Errorf("Deal Payload CID %s, Data Transfer CID %s: %w", string(deal.Proposal.PieceRef), baseCid.String(), ErrWrongPiece)
}
for _, state := range DataTransferStates {
if deal.State == state {
return nil
}
}
return xerrors.Errorf("Deal State %s: %w", deal.State, ErrInacceptableDealState)
}
// ValidatePull validates a pull request received from the peer that will receive data.
// Will always error because providers should not accept pull requests from a client
// in a storage deal (i.e. send data to client).
func (m *ProviderRequestValidator) ValidatePull(
receiver peer.ID,
voucher datatransfer.Voucher,
baseCid cid.Cid,
Selector ipld.Node) error {
return ErrNoPullAccepted
}