cleanup: Lotus client: remove markets and deal-making from Lotus Client (#11999)
* remove client CLI * remove markets CLI from miner * remove markets from all CLI * remove client API * update go mod * changes as per review
This commit is contained in:
parent
c9c070727a
commit
469960ce0e
@ -9,7 +9,6 @@ import (
|
||||
"github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
@ -348,74 +347,6 @@ type FullNode interface {
|
||||
|
||||
// Other
|
||||
|
||||
// MethodGroup: Client
|
||||
// The Client methods all have to do with interacting with the storage and
|
||||
// retrieval markets as a client
|
||||
|
||||
// ClientImport imports file under the specified path into filestore.
|
||||
ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error) //perm:admin
|
||||
// ClientRemoveImport removes file import
|
||||
ClientRemoveImport(ctx context.Context, importID imports.ID) error //perm:admin
|
||||
// ClientStartDeal proposes a deal with a miner.
|
||||
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin
|
||||
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
|
||||
ClientStatelessDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:write
|
||||
// ClientGetDealInfo returns the latest information about a given deal.
|
||||
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read
|
||||
// ClientListDeals returns information about the deals made by the local client.
|
||||
ClientListDeals(ctx context.Context) ([]DealInfo, error) //perm:write
|
||||
// ClientGetDealUpdates returns the status of updated deals
|
||||
ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error) //perm:write
|
||||
// ClientGetDealStatus returns status given a code
|
||||
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
|
||||
// ClientHasLocal indicates whether a certain CID is locally stored.
|
||||
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
|
||||
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
|
||||
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error) //perm:read
|
||||
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
||||
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read
|
||||
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
||||
ClientRetrieve(ctx context.Context, params RetrievalOrder) (*RestrievalRes, error) //perm:admin
|
||||
// ClientRetrieveWait waits for retrieval to be complete
|
||||
ClientRetrieveWait(ctx context.Context, deal retrievalmarket.DealID) error //perm:admin
|
||||
// ClientExport exports a file stored in the local filestore to a system file
|
||||
ClientExport(ctx context.Context, exportRef ExportRef, fileRef FileRef) error //perm:admin
|
||||
// ClientListRetrievals returns information about retrievals made by the local client
|
||||
ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write
|
||||
// ClientGetRetrievalUpdates returns status of updated retrieval deals
|
||||
ClientGetRetrievalUpdates(ctx context.Context) (<-chan RetrievalInfo, error) //perm:write
|
||||
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
|
||||
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*StorageAsk, error) //perm:read
|
||||
// ClientCalcCommP calculates the CommP and data size of the specified CID
|
||||
ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error) //perm:read
|
||||
// ClientCalcCommP calculates the CommP for a specified file
|
||||
ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error) //perm:write
|
||||
// ClientGenCar generates a CAR file for the specified file.
|
||||
ClientGenCar(ctx context.Context, ref FileRef, outpath string) error //perm:write
|
||||
// ClientDealSize calculates real deal data size
|
||||
ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error) //perm:read
|
||||
// ClientListTransfers returns the status of all ongoing transfers of data
|
||||
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
|
||||
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
|
||||
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
|
||||
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
|
||||
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
|
||||
// which are stuck due to insufficient funds
|
||||
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
|
||||
|
||||
// ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
|
||||
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
|
||||
|
||||
// ClientUnimport removes references to the specified file from filestore
|
||||
// ClientUnimport(path string)
|
||||
|
||||
// ClientListImports lists imported files and their root CIDs
|
||||
ClientListImports(ctx context.Context) ([]Import, error) //perm:write
|
||||
|
||||
// ClientListAsks() []Ask
|
||||
|
||||
// MethodGroup: State
|
||||
// The State methods are used to query, inspect, and interact with chain state.
|
||||
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
|
||||
|
@ -21,8 +21,6 @@ import (
|
||||
|
||||
address "github.com/filecoin-project/go-address"
|
||||
bitfield "github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
jsonrpc "github.com/filecoin-project/go-jsonrpc"
|
||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
@ -42,7 +40,6 @@ import (
|
||||
ethtypes "github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
// MockFullNode is a mock of FullNode interface.
|
||||
@ -511,418 +508,6 @@ func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *g
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientCalcCommP mocks base method.
|
||||
func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1)
|
||||
ret0, _ := ret[0].(*api.CommPRet)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientCalcCommP indicates an expected call of ClientCalcCommP.
|
||||
func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientCancelDataTransfer mocks base method.
|
||||
func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer.
|
||||
func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// ClientCancelRetrievalDeal mocks base method.
|
||||
func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal.
|
||||
func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientDataTransferUpdates mocks base method.
|
||||
func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0)
|
||||
ret0, _ := ret[0].(<-chan api.DataTransferChannel)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates.
|
||||
func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0)
|
||||
}
|
||||
|
||||
// ClientDealPieceCID mocks base method.
|
||||
func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1)
|
||||
ret0, _ := ret[0].(api.DataCIDSize)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientDealPieceCID indicates an expected call of ClientDealPieceCID.
|
||||
func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientDealSize mocks base method.
|
||||
func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1)
|
||||
ret0, _ := ret[0].(api.DataSize)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientDealSize indicates an expected call of ClientDealSize.
|
||||
func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientExport mocks base method.
|
||||
func (m *MockFullNode) ClientExport(arg0 context.Context, arg1 api.ExportRef, arg2 api.FileRef) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientExport", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientExport indicates an expected call of ClientExport.
|
||||
func (mr *MockFullNodeMockRecorder) ClientExport(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientExport", reflect.TypeOf((*MockFullNode)(nil).ClientExport), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// ClientFindData mocks base method.
|
||||
func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].([]api.QueryOffer)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientFindData indicates an expected call of ClientFindData.
|
||||
func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// ClientGenCar mocks base method.
|
||||
func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientGenCar indicates an expected call of ClientGenCar.
|
||||
func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// ClientGetDealInfo mocks base method.
|
||||
func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1)
|
||||
ret0, _ := ret[0].(*api.DealInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientGetDealInfo indicates an expected call of ClientGetDealInfo.
|
||||
func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientGetDealStatus mocks base method.
|
||||
func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientGetDealStatus indicates an expected call of ClientGetDealStatus.
|
||||
func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientGetDealUpdates mocks base method.
|
||||
func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0)
|
||||
ret0, _ := ret[0].(<-chan api.DealInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates.
|
||||
func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0)
|
||||
}
|
||||
|
||||
// ClientGetRetrievalUpdates mocks base method.
|
||||
func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0)
|
||||
ret0, _ := ret[0].(<-chan api.RetrievalInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates.
|
||||
func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0)
|
||||
}
|
||||
|
||||
// ClientHasLocal mocks base method.
|
||||
func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientHasLocal indicates an expected call of ClientHasLocal.
|
||||
func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientImport mocks base method.
|
||||
func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientImport", arg0, arg1)
|
||||
ret0, _ := ret[0].(*api.ImportRes)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientImport indicates an expected call of ClientImport.
|
||||
func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientListDataTransfers mocks base method.
|
||||
func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0)
|
||||
ret0, _ := ret[0].([]api.DataTransferChannel)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientListDataTransfers indicates an expected call of ClientListDataTransfers.
|
||||
func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0)
|
||||
}
|
||||
|
||||
// ClientListDeals mocks base method.
|
||||
func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientListDeals", arg0)
|
||||
ret0, _ := ret[0].([]api.DealInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientListDeals indicates an expected call of ClientListDeals.
|
||||
func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0)
|
||||
}
|
||||
|
||||
// ClientListImports mocks base method.
|
||||
func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientListImports", arg0)
|
||||
ret0, _ := ret[0].([]api.Import)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientListImports indicates an expected call of ClientListImports.
|
||||
func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0)
|
||||
}
|
||||
|
||||
// ClientListRetrievals mocks base method.
|
||||
func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientListRetrievals", arg0)
|
||||
ret0, _ := ret[0].([]api.RetrievalInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientListRetrievals indicates an expected call of ClientListRetrievals.
|
||||
func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0)
|
||||
}
|
||||
|
||||
// ClientMinerQueryOffer mocks base method.
|
||||
func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(api.QueryOffer)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer.
|
||||
func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// ClientQueryAsk mocks base method.
|
||||
func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*api.StorageAsk, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*api.StorageAsk)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientQueryAsk indicates an expected call of ClientQueryAsk.
|
||||
func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// ClientRemoveImport mocks base method.
|
||||
func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 imports.ID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientRemoveImport indicates an expected call of ClientRemoveImport.
|
||||
func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientRestartDataTransfer mocks base method.
|
||||
func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer.
|
||||
func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// ClientRetrieve mocks base method.
|
||||
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder) (*api.RestrievalRes, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1)
|
||||
ret0, _ := ret[0].(*api.RestrievalRes)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientRetrieve indicates an expected call of ClientRetrieve.
|
||||
func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientRetrieveTryRestartInsufficientFunds mocks base method.
|
||||
func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds.
|
||||
func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientRetrieveWait mocks base method.
|
||||
func (m *MockFullNode) ClientRetrieveWait(arg0 context.Context, arg1 retrievalmarket.DealID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRetrieveWait", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientRetrieveWait indicates an expected call of ClientRetrieveWait.
|
||||
func (mr *MockFullNodeMockRecorder) ClientRetrieveWait(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWait", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWait), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientStartDeal mocks base method.
|
||||
func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1)
|
||||
ret0, _ := ret[0].(*cid.Cid)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientStartDeal indicates an expected call of ClientStartDeal.
|
||||
func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientStatelessDeal mocks base method.
|
||||
func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1)
|
||||
ret0, _ := ret[0].(*cid.Cid)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientStatelessDeal indicates an expected call of ClientStatelessDeal.
|
||||
func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1)
|
||||
}
|
||||
|
||||
// Closing mocks base method.
|
||||
func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
365
api/proxy_gen.go
365
api/proxy_gen.go
@ -40,7 +40,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||
"github.com/filecoin-project/lotus/journal/alerting"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/piece"
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/sealiface"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
||||
@ -189,62 +188,6 @@ type FullNodeMethods struct {
|
||||
|
||||
ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
|
||||
ClientCalcCommP func(p0 context.Context, p1 string) (*CommPRet, error) `perm:"write"`
|
||||
|
||||
ClientCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
ClientCancelRetrievalDeal func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"write"`
|
||||
|
||||
ClientDataTransferUpdates func(p0 context.Context) (<-chan DataTransferChannel, error) `perm:"write"`
|
||||
|
||||
ClientDealPieceCID func(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) `perm:"read"`
|
||||
|
||||
ClientDealSize func(p0 context.Context, p1 cid.Cid) (DataSize, error) `perm:"read"`
|
||||
|
||||
ClientExport func(p0 context.Context, p1 ExportRef, p2 FileRef) error `perm:"admin"`
|
||||
|
||||
ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) `perm:"read"`
|
||||
|
||||
ClientGenCar func(p0 context.Context, p1 FileRef, p2 string) error `perm:"write"`
|
||||
|
||||
ClientGetDealInfo func(p0 context.Context, p1 cid.Cid) (*DealInfo, error) `perm:"read"`
|
||||
|
||||
ClientGetDealStatus func(p0 context.Context, p1 uint64) (string, error) `perm:"read"`
|
||||
|
||||
ClientGetDealUpdates func(p0 context.Context) (<-chan DealInfo, error) `perm:"write"`
|
||||
|
||||
ClientGetRetrievalUpdates func(p0 context.Context) (<-chan RetrievalInfo, error) `perm:"write"`
|
||||
|
||||
ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"`
|
||||
|
||||
ClientImport func(p0 context.Context, p1 FileRef) (*ImportRes, error) `perm:"admin"`
|
||||
|
||||
ClientListDataTransfers func(p0 context.Context) ([]DataTransferChannel, error) `perm:"write"`
|
||||
|
||||
ClientListDeals func(p0 context.Context) ([]DealInfo, error) `perm:"write"`
|
||||
|
||||
ClientListImports func(p0 context.Context) ([]Import, error) `perm:"write"`
|
||||
|
||||
ClientListRetrievals func(p0 context.Context) ([]RetrievalInfo, error) `perm:"write"`
|
||||
|
||||
ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) `perm:"read"`
|
||||
|
||||
ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*StorageAsk, error) `perm:"read"`
|
||||
|
||||
ClientRemoveImport func(p0 context.Context, p1 imports.ID) error `perm:"admin"`
|
||||
|
||||
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
ClientRetrieve func(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) `perm:"admin"`
|
||||
|
||||
ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
|
||||
|
||||
ClientRetrieveWait func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"admin"`
|
||||
|
||||
ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
||||
|
||||
ClientStatelessDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"write"`
|
||||
|
||||
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
|
||||
EthAccounts func(p0 context.Context) ([]ethtypes.EthAddress, error) `perm:"read"`
|
||||
@ -1780,314 +1723,6 @@ func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey)
|
||||
return *new(types.BigInt), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*CommPRet, error) {
|
||||
if s.Internal.ClientCalcCommP == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientCalcCommP(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientCalcCommP(p0 context.Context, p1 string) (*CommPRet, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
if s.Internal.ClientCancelDataTransfer == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error {
|
||||
if s.Internal.ClientCancelRetrievalDeal == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientCancelRetrievalDeal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) {
|
||||
if s.Internal.ClientDataTransferUpdates == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientDataTransferUpdates(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) {
|
||||
if s.Internal.ClientDealPieceCID == nil {
|
||||
return *new(DataCIDSize), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientDealPieceCID(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) {
|
||||
return *new(DataCIDSize), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, error) {
|
||||
if s.Internal.ClientDealSize == nil {
|
||||
return *new(DataSize), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientDealSize(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, error) {
|
||||
return *new(DataSize), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error {
|
||||
if s.Internal.ClientExport == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientExport(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) {
|
||||
if s.Internal.ClientFindData == nil {
|
||||
return *new([]QueryOffer), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientFindData(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) {
|
||||
return *new([]QueryOffer), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 FileRef, p2 string) error {
|
||||
if s.Internal.ClientGenCar == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientGenCar(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientGenCar(p0 context.Context, p1 FileRef, p2 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*DealInfo, error) {
|
||||
if s.Internal.ClientGetDealInfo == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientGetDealInfo(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*DealInfo, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) {
|
||||
if s.Internal.ClientGetDealStatus == nil {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientGetDealStatus(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo, error) {
|
||||
if s.Internal.ClientGetDealUpdates == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientGetDealUpdates(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) {
|
||||
if s.Internal.ClientGetRetrievalUpdates == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientGetRetrievalUpdates(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
|
||||
if s.Internal.ClientHasLocal == nil {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientHasLocal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientImport(p0 context.Context, p1 FileRef) (*ImportRes, error) {
|
||||
if s.Internal.ClientImport == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientImport(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientImport(p0 context.Context, p1 FileRef) (*ImportRes, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) {
|
||||
if s.Internal.ClientListDataTransfers == nil {
|
||||
return *new([]DataTransferChannel), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientListDataTransfers(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) {
|
||||
return *new([]DataTransferChannel), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]DealInfo, error) {
|
||||
if s.Internal.ClientListDeals == nil {
|
||||
return *new([]DealInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientListDeals(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientListDeals(p0 context.Context) ([]DealInfo, error) {
|
||||
return *new([]DealInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]Import, error) {
|
||||
if s.Internal.ClientListImports == nil {
|
||||
return *new([]Import), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientListImports(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]Import, error) {
|
||||
return *new([]Import), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) {
|
||||
if s.Internal.ClientListRetrievals == nil {
|
||||
return *new([]RetrievalInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientListRetrievals(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) {
|
||||
return *new([]RetrievalInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) {
|
||||
if s.Internal.ClientMinerQueryOffer == nil {
|
||||
return *new(QueryOffer), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) {
|
||||
return *new(QueryOffer), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*StorageAsk, error) {
|
||||
if s.Internal.ClientQueryAsk == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientQueryAsk(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*StorageAsk, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 imports.ID) error {
|
||||
if s.Internal.ClientRemoveImport == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRemoveImport(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 imports.ID) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
if s.Internal.ClientRestartDataTransfer == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) {
|
||||
if s.Internal.ClientRetrieve == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRetrieve(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
|
||||
if s.Internal.ClientRetrieveTryRestartInsufficientFunds == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error {
|
||||
if s.Internal.ClientRetrieveWait == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRetrieveWait(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
|
||||
if s.Internal.ClientStartDeal == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientStartDeal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
|
||||
if s.Internal.ClientStatelessDeal == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientStatelessDeal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error {
|
||||
if s.Internal.CreateBackup == nil {
|
||||
return ErrNotSupported
|
||||
|
@ -6,13 +6,10 @@ import (
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
@ -24,9 +21,7 @@ import (
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/golang/mock/mockgen -destination=v0mocks/mock_full.go -package=v0mocks . FullNode
|
||||
@ -305,74 +300,6 @@ type FullNode interface {
|
||||
WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read
|
||||
|
||||
// Other
|
||||
|
||||
// MethodGroup: Client
|
||||
// The Client methods all have to do with interacting with the storage and
|
||||
// retrieval markets as a client
|
||||
|
||||
// ClientImport imports file under the specified path into filestore.
|
||||
ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) //perm:admin
|
||||
// ClientRemoveImport removes file import
|
||||
ClientRemoveImport(ctx context.Context, importID imports.ID) error //perm:admin
|
||||
// ClientStartDeal proposes a deal with a miner.
|
||||
ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin
|
||||
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
|
||||
ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:write
|
||||
// ClientGetDealInfo returns the latest information about a given deal.
|
||||
ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read
|
||||
// ClientListDeals returns information about the deals made by the local client.
|
||||
ClientListDeals(ctx context.Context) ([]api.DealInfo, error) //perm:write
|
||||
// ClientGetDealUpdates returns the status of updated deals
|
||||
ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) //perm:write
|
||||
// ClientGetDealStatus returns status given a code
|
||||
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
|
||||
// ClientHasLocal indicates whether a certain CID is locally stored.
|
||||
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
|
||||
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
|
||||
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) //perm:read
|
||||
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
||||
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read
|
||||
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
||||
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error //perm:admin
|
||||
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
|
||||
// of status updates.
|
||||
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
|
||||
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
|
||||
// ClientListRetrievals returns information about retrievals made by the local client
|
||||
ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write
|
||||
// ClientGetRetrievalUpdates returns status of updated retrieval deals
|
||||
ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) //perm:write
|
||||
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
|
||||
// ClientCalcCommP calculates the CommP and data size of the specified CID
|
||||
ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) //perm:read
|
||||
// ClientCalcCommP calculates the CommP for a specified file
|
||||
ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) //perm:write
|
||||
// ClientGenCar generates a CAR file for the specified file.
|
||||
ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error //perm:write
|
||||
// ClientDealSize calculates real deal data size
|
||||
ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) //perm:read
|
||||
// ClientListTransfers returns the status of all ongoing transfers of data
|
||||
ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) //perm:write
|
||||
ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) //perm:write
|
||||
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
|
||||
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
|
||||
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
|
||||
// which are stuck due to insufficient funds
|
||||
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
|
||||
|
||||
// ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
|
||||
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
|
||||
|
||||
// ClientUnimport removes references to the specified file from filestore
|
||||
// ClientUnimport(path string)
|
||||
|
||||
// ClientListImports lists imported files and their root CIDs
|
||||
ClientListImports(ctx context.Context) ([]api.Import, error) //perm:write
|
||||
|
||||
// ClientListAsks() []Ask
|
||||
|
||||
// MethodGroup: State
|
||||
// The State methods are used to query, inspect, and interact with chain state.
|
||||
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
|
||||
|
@ -7,14 +7,10 @@ import (
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
@ -26,9 +22,7 @@ import (
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
var ErrNotSupported = xerrors.New("method not supported")
|
||||
@ -90,60 +84,6 @@ type FullNodeMethods struct {
|
||||
|
||||
ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
|
||||
ClientCalcCommP func(p0 context.Context, p1 string) (*api.CommPRet, error) `perm:"write"`
|
||||
|
||||
ClientCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
ClientCancelRetrievalDeal func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"write"`
|
||||
|
||||
ClientDataTransferUpdates func(p0 context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
|
||||
|
||||
ClientDealPieceCID func(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) `perm:"read"`
|
||||
|
||||
ClientDealSize func(p0 context.Context, p1 cid.Cid) (api.DataSize, error) `perm:"read"`
|
||||
|
||||
ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
|
||||
|
||||
ClientGenCar func(p0 context.Context, p1 api.FileRef, p2 string) error `perm:"write"`
|
||||
|
||||
ClientGetDealInfo func(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) `perm:"read"`
|
||||
|
||||
ClientGetDealStatus func(p0 context.Context, p1 uint64) (string, error) `perm:"read"`
|
||||
|
||||
ClientGetDealUpdates func(p0 context.Context) (<-chan api.DealInfo, error) `perm:"write"`
|
||||
|
||||
ClientGetRetrievalUpdates func(p0 context.Context) (<-chan api.RetrievalInfo, error) `perm:"write"`
|
||||
|
||||
ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"`
|
||||
|
||||
ClientImport func(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) `perm:"admin"`
|
||||
|
||||
ClientListDataTransfers func(p0 context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
|
||||
|
||||
ClientListDeals func(p0 context.Context) ([]api.DealInfo, error) `perm:"write"`
|
||||
|
||||
ClientListImports func(p0 context.Context) ([]api.Import, error) `perm:"write"`
|
||||
|
||||
ClientListRetrievals func(p0 context.Context) ([]api.RetrievalInfo, error) `perm:"write"`
|
||||
|
||||
ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) `perm:"read"`
|
||||
|
||||
ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
|
||||
|
||||
ClientRemoveImport func(p0 context.Context, p1 imports.ID) error `perm:"admin"`
|
||||
|
||||
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error `perm:"admin"`
|
||||
|
||||
ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
|
||||
|
||||
ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
||||
|
||||
ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
||||
|
||||
ClientStatelessDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"write"`
|
||||
|
||||
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
|
||||
GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
@ -796,303 +736,6 @@ func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey)
|
||||
return *new(types.BigInt), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) {
|
||||
if s.Internal.ClientCalcCommP == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientCalcCommP(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
if s.Internal.ClientCancelDataTransfer == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error {
|
||||
if s.Internal.ClientCancelRetrievalDeal == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientCancelRetrievalDeal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) {
|
||||
if s.Internal.ClientDataTransferUpdates == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientDataTransferUpdates(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) {
|
||||
if s.Internal.ClientDealPieceCID == nil {
|
||||
return *new(api.DataCIDSize), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientDealPieceCID(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) {
|
||||
return *new(api.DataCIDSize), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) {
|
||||
if s.Internal.ClientDealSize == nil {
|
||||
return *new(api.DataSize), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientDealSize(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) {
|
||||
return *new(api.DataSize), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) {
|
||||
if s.Internal.ClientFindData == nil {
|
||||
return *new([]api.QueryOffer), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientFindData(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) {
|
||||
return *new([]api.QueryOffer), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error {
|
||||
if s.Internal.ClientGenCar == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientGenCar(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) {
|
||||
if s.Internal.ClientGetDealInfo == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientGetDealInfo(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) {
|
||||
if s.Internal.ClientGetDealStatus == nil {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientGetDealStatus(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) {
|
||||
if s.Internal.ClientGetDealUpdates == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientGetDealUpdates(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) {
|
||||
if s.Internal.ClientGetRetrievalUpdates == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientGetRetrievalUpdates(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
|
||||
if s.Internal.ClientHasLocal == nil {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientHasLocal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) {
|
||||
if s.Internal.ClientImport == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientImport(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) {
|
||||
if s.Internal.ClientListDataTransfers == nil {
|
||||
return *new([]api.DataTransferChannel), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientListDataTransfers(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) {
|
||||
return *new([]api.DataTransferChannel), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) {
|
||||
if s.Internal.ClientListDeals == nil {
|
||||
return *new([]api.DealInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientListDeals(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) {
|
||||
return *new([]api.DealInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]api.Import, error) {
|
||||
if s.Internal.ClientListImports == nil {
|
||||
return *new([]api.Import), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientListImports(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]api.Import, error) {
|
||||
return *new([]api.Import), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) {
|
||||
if s.Internal.ClientListRetrievals == nil {
|
||||
return *new([]api.RetrievalInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientListRetrievals(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) {
|
||||
return *new([]api.RetrievalInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) {
|
||||
if s.Internal.ClientMinerQueryOffer == nil {
|
||||
return *new(api.QueryOffer), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) {
|
||||
return *new(api.QueryOffer), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) {
|
||||
if s.Internal.ClientQueryAsk == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientQueryAsk(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 imports.ID) error {
|
||||
if s.Internal.ClientRemoveImport == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRemoveImport(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 imports.ID) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
if s.Internal.ClientRestartDataTransfer == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error {
|
||||
if s.Internal.ClientRetrieve == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRetrieve(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
|
||||
if s.Internal.ClientRetrieveTryRestartInsufficientFunds == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||
if s.Internal.ClientRetrieveWithEvents == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientRetrieveWithEvents(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
|
||||
if s.Internal.ClientStartDeal == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientStartDeal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
|
||||
if s.Internal.ClientStatelessDeal == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientStatelessDeal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error {
|
||||
if s.Internal.CreateBackup == nil {
|
||||
return ErrNotSupported
|
||||
|
@ -20,9 +20,6 @@ import (
|
||||
|
||||
address "github.com/filecoin-project/go-address"
|
||||
bitfield "github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
big "github.com/filecoin-project/go-state-types/big"
|
||||
@ -36,13 +33,10 @@ import (
|
||||
|
||||
api "github.com/filecoin-project/lotus/api"
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
v0api "github.com/filecoin-project/lotus/api/v0api"
|
||||
miner1 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
types "github.com/filecoin-project/lotus/chain/types"
|
||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
// MockFullNode is a mock of FullNode interface.
|
||||
@ -455,404 +449,6 @@ func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *g
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientCalcCommP mocks base method.
|
||||
func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1)
|
||||
ret0, _ := ret[0].(*api.CommPRet)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientCalcCommP indicates an expected call of ClientCalcCommP.
|
||||
func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientCancelDataTransfer mocks base method.
|
||||
func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer.
|
||||
func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// ClientCancelRetrievalDeal mocks base method.
|
||||
func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal.
|
||||
func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientDataTransferUpdates mocks base method.
|
||||
func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0)
|
||||
ret0, _ := ret[0].(<-chan api.DataTransferChannel)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates.
|
||||
func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0)
|
||||
}
|
||||
|
||||
// ClientDealPieceCID mocks base method.
|
||||
func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1)
|
||||
ret0, _ := ret[0].(api.DataCIDSize)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientDealPieceCID indicates an expected call of ClientDealPieceCID.
|
||||
func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientDealSize mocks base method.
|
||||
func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1)
|
||||
ret0, _ := ret[0].(api.DataSize)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientDealSize indicates an expected call of ClientDealSize.
|
||||
func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientFindData mocks base method.
|
||||
func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].([]api.QueryOffer)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientFindData indicates an expected call of ClientFindData.
|
||||
func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// ClientGenCar mocks base method.
|
||||
func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientGenCar indicates an expected call of ClientGenCar.
|
||||
func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// ClientGetDealInfo mocks base method.
|
||||
func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1)
|
||||
ret0, _ := ret[0].(*api.DealInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientGetDealInfo indicates an expected call of ClientGetDealInfo.
|
||||
func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientGetDealStatus mocks base method.
|
||||
func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientGetDealStatus indicates an expected call of ClientGetDealStatus.
|
||||
func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientGetDealUpdates mocks base method.
|
||||
func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0)
|
||||
ret0, _ := ret[0].(<-chan api.DealInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates.
|
||||
func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0)
|
||||
}
|
||||
|
||||
// ClientGetRetrievalUpdates mocks base method.
|
||||
func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0)
|
||||
ret0, _ := ret[0].(<-chan api.RetrievalInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates.
|
||||
func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0)
|
||||
}
|
||||
|
||||
// ClientHasLocal mocks base method.
|
||||
func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientHasLocal indicates an expected call of ClientHasLocal.
|
||||
func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientImport mocks base method.
|
||||
func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientImport", arg0, arg1)
|
||||
ret0, _ := ret[0].(*api.ImportRes)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientImport indicates an expected call of ClientImport.
|
||||
func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientListDataTransfers mocks base method.
|
||||
func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0)
|
||||
ret0, _ := ret[0].([]api.DataTransferChannel)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientListDataTransfers indicates an expected call of ClientListDataTransfers.
|
||||
func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0)
|
||||
}
|
||||
|
||||
// ClientListDeals mocks base method.
|
||||
func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientListDeals", arg0)
|
||||
ret0, _ := ret[0].([]api.DealInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientListDeals indicates an expected call of ClientListDeals.
|
||||
func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0)
|
||||
}
|
||||
|
||||
// ClientListImports mocks base method.
|
||||
func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientListImports", arg0)
|
||||
ret0, _ := ret[0].([]api.Import)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientListImports indicates an expected call of ClientListImports.
|
||||
func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0)
|
||||
}
|
||||
|
||||
// ClientListRetrievals mocks base method.
|
||||
func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientListRetrievals", arg0)
|
||||
ret0, _ := ret[0].([]api.RetrievalInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientListRetrievals indicates an expected call of ClientListRetrievals.
|
||||
func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0)
|
||||
}
|
||||
|
||||
// ClientMinerQueryOffer mocks base method.
|
||||
func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(api.QueryOffer)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer.
|
||||
func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// ClientQueryAsk mocks base method.
|
||||
func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*storagemarket.StorageAsk)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientQueryAsk indicates an expected call of ClientQueryAsk.
|
||||
func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// ClientRemoveImport mocks base method.
|
||||
func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 imports.ID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientRemoveImport indicates an expected call of ClientRemoveImport.
|
||||
func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientRestartDataTransfer mocks base method.
|
||||
func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer.
|
||||
func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// ClientRetrieve mocks base method.
|
||||
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientRetrieve indicates an expected call of ClientRetrieve.
|
||||
func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// ClientRetrieveTryRestartInsufficientFunds mocks base method.
|
||||
func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds.
|
||||
func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientRetrieveWithEvents mocks base method.
|
||||
func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents.
|
||||
func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// ClientStartDeal mocks base method.
|
||||
func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1)
|
||||
ret0, _ := ret[0].(*cid.Cid)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientStartDeal indicates an expected call of ClientStartDeal.
|
||||
func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1)
|
||||
}
|
||||
|
||||
// ClientStatelessDeal mocks base method.
|
||||
func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1)
|
||||
ret0, _ := ret[0].(*cid.Cid)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ClientStatelessDeal indicates an expected call of ClientStatelessDeal.
|
||||
func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1)
|
||||
}
|
||||
|
||||
// Closing mocks base method.
|
||||
func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
@ -4,21 +4,16 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
)
|
||||
|
||||
type WrapperV1Full struct {
|
||||
@ -210,158 +205,10 @@ func (w *WrapperV1Full) ChainGetRandomnessFromBeacon(ctx context.Context, tsk ty
|
||||
return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk)
|
||||
}
|
||||
|
||||
func (w *WrapperV1Full) ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error {
|
||||
events := make(chan marketevents.RetrievalEvent)
|
||||
go w.clientRetrieve(ctx, order, ref, events)
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt, ok := <-events:
|
||||
if !ok { // done successfully
|
||||
return nil
|
||||
}
|
||||
|
||||
if evt.Err != "" {
|
||||
return xerrors.Errorf("retrieval failed: %s", evt.Err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return xerrors.Errorf("retrieval timed out")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WrapperV1Full) ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||
events := make(chan marketevents.RetrievalEvent)
|
||||
go w.clientRetrieve(ctx, order, ref, events)
|
||||
return events, nil
|
||||
}
|
||||
|
||||
func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, subscribeEvents <-chan api.RetrievalInfo, events chan marketevents.RetrievalEvent) error {
|
||||
for {
|
||||
var subscribeEvent api.RetrievalInfo
|
||||
var evt retrievalmarket.ClientEvent
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return xerrors.New("Retrieval Timed Out")
|
||||
case subscribeEvent = <-subscribeEvents:
|
||||
if subscribeEvent.ID != dealID {
|
||||
// we can't check the deal ID ahead of time because:
|
||||
// 1. We need to subscribe before retrieving.
|
||||
// 2. We won't know the deal ID until after retrieving.
|
||||
continue
|
||||
}
|
||||
if subscribeEvent.Event != nil {
|
||||
evt = *subscribeEvent.Event
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return xerrors.New("Retrieval Timed Out")
|
||||
case events <- marketevents.RetrievalEvent{
|
||||
Event: evt,
|
||||
Status: subscribeEvent.Status,
|
||||
BytesReceived: subscribeEvent.BytesReceived,
|
||||
FundsSpent: subscribeEvent.TotalPaid,
|
||||
}:
|
||||
}
|
||||
|
||||
switch subscribeEvent.Status {
|
||||
case retrievalmarket.DealStatusCompleted:
|
||||
return nil
|
||||
case retrievalmarket.DealStatusRejected:
|
||||
return xerrors.Errorf("Retrieval Proposal Rejected: %s", subscribeEvent.Message)
|
||||
case
|
||||
retrievalmarket.DealStatusDealNotFound,
|
||||
retrievalmarket.DealStatusErrored:
|
||||
return xerrors.Errorf("Retrieval Error: %s", subscribeEvent.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WrapperV1Full) clientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) {
|
||||
defer close(events)
|
||||
|
||||
finish := func(e error) {
|
||||
if e != nil {
|
||||
events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()}
|
||||
}
|
||||
}
|
||||
|
||||
var dealID retrievalmarket.DealID
|
||||
if order.FromLocalCAR == "" {
|
||||
// Subscribe to events before retrieving to avoid losing events.
|
||||
subscribeCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
retrievalEvents, err := w.ClientGetRetrievalUpdates(subscribeCtx)
|
||||
|
||||
if err != nil {
|
||||
finish(xerrors.Errorf("GetRetrievalUpdates failed: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
retrievalRes, err := w.FullNode.ClientRetrieve(ctx, api.RetrievalOrder{
|
||||
Root: order.Root,
|
||||
Piece: order.Piece,
|
||||
Size: order.Size,
|
||||
Total: order.Total,
|
||||
UnsealPrice: order.UnsealPrice,
|
||||
PaymentInterval: order.PaymentInterval,
|
||||
PaymentIntervalIncrease: order.PaymentIntervalIncrease,
|
||||
Client: order.Client,
|
||||
Miner: order.Miner,
|
||||
MinerPeer: order.MinerPeer,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
finish(xerrors.Errorf("Retrieve failed: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
dealID = retrievalRes.DealID
|
||||
|
||||
err = readSubscribeEvents(ctx, retrievalRes.DealID, retrievalEvents, events)
|
||||
if err != nil {
|
||||
finish(xerrors.Errorf("Retrieve: %w", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If ref is nil, it only fetches the data into the configured blockstore.
|
||||
if ref == nil {
|
||||
finish(nil)
|
||||
return
|
||||
}
|
||||
|
||||
eref := api.ExportRef{
|
||||
Root: order.Root,
|
||||
FromLocalCAR: order.FromLocalCAR,
|
||||
DealID: dealID,
|
||||
}
|
||||
|
||||
if order.DatamodelPathSelector != nil {
|
||||
s := api.Selector(*order.DatamodelPathSelector)
|
||||
eref.DAGs = append(eref.DAGs, api.DagSpec{
|
||||
DataSelector: &s,
|
||||
ExportMerkleProof: true,
|
||||
})
|
||||
}
|
||||
|
||||
finish(w.ClientExport(ctx, eref, *ref))
|
||||
}
|
||||
|
||||
func (w *WrapperV1Full) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) {
|
||||
return w.FullNode.PaychFund(ctx, from, to, amt)
|
||||
}
|
||||
|
||||
func (w *WrapperV1Full) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) {
|
||||
a, err := w.FullNode.ClientQueryAsk(ctx, p, miner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a.Response, nil
|
||||
}
|
||||
|
||||
func (w *WrapperV1Full) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
|
||||
return w.StateGetBeaconEntry(ctx, epoch)
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -242,7 +242,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4236"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3871"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -473,7 +473,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4247"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3882"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -572,7 +572,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4258"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3893"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -604,7 +604,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4269"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3904"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -710,7 +710,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4280"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3915"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -803,7 +803,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4291"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3926"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -887,7 +887,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4302"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3937"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -987,7 +987,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4313"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3948"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1043,7 +1043,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4324"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3959"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1116,7 +1116,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4335"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3970"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1189,7 +1189,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4346"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3981"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1236,7 +1236,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4357"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3992"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1268,7 +1268,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4368"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4003"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1305,7 +1305,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4390"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4025"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1352,7 +1352,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4401"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4036"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1392,7 +1392,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4412"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4047"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1439,7 +1439,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4423"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4058"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1494,7 +1494,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4434"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4069"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1523,7 +1523,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4445"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4080"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1660,7 +1660,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4456"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4091"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1689,7 +1689,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4467"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4102"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1743,7 +1743,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4478"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4113"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1834,7 +1834,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4489"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4124"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1862,7 +1862,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4500"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4135"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1952,7 +1952,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4511"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4146"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2208,7 +2208,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4522"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4157"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2453,7 +2453,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4533"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4168"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2509,7 +2509,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4544"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4179"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2556,7 +2556,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4555"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4190"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2654,7 +2654,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4566"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4201"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2720,7 +2720,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4577"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4212"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2786,7 +2786,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4588"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4223"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2895,7 +2895,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4599"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4234"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2953,7 +2953,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4610"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4245"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3075,7 +3075,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4621"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4256"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3267,7 +3267,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4632"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4267"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3476,7 +3476,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4643"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4278"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3567,7 +3567,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4654"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4289"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3625,7 +3625,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4665"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4300"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3883,7 +3883,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4676"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4311"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4158,7 +4158,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4687"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4322"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4186,7 +4186,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4698"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4333"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4224,7 +4224,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4709"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4344"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4332,7 +4332,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4720"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4355"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4370,7 +4370,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4731"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4366"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4399,7 +4399,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4742"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4377"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4462,7 +4462,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4753"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4388"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4525,7 +4525,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4764"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4399"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4570,7 +4570,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4775"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4410"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4692,7 +4692,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4786"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4421"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4847,7 +4847,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4797"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4432"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4901,7 +4901,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4808"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4443"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4955,7 +4955,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4819"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4454"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5010,7 +5010,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4830"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4465"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5112,7 +5112,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4841"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4476"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5335,7 +5335,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4852"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4487"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5518,7 +5518,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4863"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4498"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5712,7 +5712,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4874"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4509"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5758,7 +5758,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4885"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4520"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5908,7 +5908,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4896"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4531"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6045,7 +6045,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4907"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4542"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6113,7 +6113,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4918"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4553"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6230,7 +6230,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4929"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4564"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6321,7 +6321,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4940"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4575"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6407,7 +6407,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4951"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4586"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6434,7 +6434,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4962"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4597"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6461,7 +6461,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4973"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4608"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6529,7 +6529,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4984"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4619"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7035,7 +7035,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4995"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4630"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7132,7 +7132,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5006"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4641"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7232,7 +7232,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5017"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4652"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7332,7 +7332,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5028"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4663"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7457,7 +7457,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5039"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4674"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7566,7 +7566,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5050"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4685"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7669,7 +7669,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5061"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4696"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7799,7 +7799,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5072"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4707"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7906,7 +7906,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5083"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4718"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7967,7 +7967,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5094"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4729"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8035,7 +8035,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5105"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4740"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8116,7 +8116,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5116"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4751"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8280,7 +8280,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5127"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4762"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8373,7 +8373,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5138"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4773"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8574,7 +8574,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5149"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4784"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8685,7 +8685,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5160"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4795"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8816,7 +8816,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5171"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4806"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8902,7 +8902,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5182"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4817"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8929,7 +8929,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5193"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4828"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8982,7 +8982,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5204"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4839"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9070,7 +9070,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5215"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4850"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9521,7 +9521,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5226"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4861"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9688,7 +9688,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5237"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4872"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9861,7 +9861,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5248"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4883"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9929,7 +9929,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5259"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4894"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9997,7 +9997,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5270"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4905"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10158,7 +10158,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5281"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4916"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10203,7 +10203,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5303"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4938"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10248,7 +10248,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5314"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4949"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10275,7 +10275,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5325"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4960"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -161,7 +161,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7151"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6786"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -252,7 +252,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7162"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6797"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -420,7 +420,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7173"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6808"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -447,7 +447,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7184"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6819"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -597,7 +597,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7195"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6830"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -700,7 +700,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7206"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6841"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -803,7 +803,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7217"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6852"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -925,7 +925,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7228"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6863"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1135,7 +1135,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7239"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6874"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1306,7 +1306,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7250"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6885"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3350,7 +3350,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7261"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6896"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3470,7 +3470,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7272"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6907"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3531,7 +3531,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7283"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6918"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3569,7 +3569,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7294"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6929"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3729,7 +3729,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7305"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6940"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3913,7 +3913,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7316"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6951"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4054,7 +4054,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7327"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6962"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4107,7 +4107,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7338"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6973"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4250,7 +4250,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7349"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6984"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4474,7 +4474,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7360"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6995"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4601,7 +4601,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7371"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7006"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4768,7 +4768,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7382"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7017"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4895,7 +4895,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7393"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7028"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4933,7 +4933,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7404"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7039"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4972,7 +4972,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7415"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7050"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4995,7 +4995,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7426"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7061"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5034,7 +5034,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7437"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7072"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5057,7 +5057,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7448"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7083"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5096,7 +5096,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7459"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7094"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5130,7 +5130,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7470"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7105"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5184,7 +5184,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7481"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7116"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5223,7 +5223,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7492"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7127"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5262,7 +5262,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7503"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7138"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5297,7 +5297,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7514"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7149"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5477,7 +5477,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7525"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7160"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5506,7 +5506,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7536"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7171"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5529,7 +5529,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7547"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7182"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -29,17 +29,18 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/sub/ratelimit"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/unixfs"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node/impl/client"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
)
|
||||
|
||||
var log = logging.Logger("sub")
|
||||
var DefaultHashFunction = unixfs.DefaultHashFunction
|
||||
|
||||
var msgCidPrefix = cid.Prefix{
|
||||
Version: 1,
|
||||
Codec: cid.DagCBOR,
|
||||
MhType: client.DefaultHashFunction,
|
||||
MhType: DefaultHashFunction,
|
||||
MhLength: 32,
|
||||
}
|
||||
|
||||
|
@ -10,7 +10,6 @@ var Commands = []*cli.Command{
|
||||
lcli.WithCategory("basic", lcli.SendCmd),
|
||||
lcli.WithCategory("basic", lcli.WalletCmd),
|
||||
lcli.WithCategory("basic", lcli.InfoCmd),
|
||||
lcli.WithCategory("basic", lcli.ClientCmd),
|
||||
lcli.WithCategory("basic", lcli.MultisigCmd),
|
||||
lcli.WithCategory("basic", lcli.FilplusCmd),
|
||||
lcli.WithCategory("basic", lcli.PaychCmd),
|
||||
|
2468
cli/client.go
2468
cli/client.go
File diff suppressed because it is too large
Load Diff
@ -1,550 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/boxo/blockservice"
|
||||
offline "github.com/ipfs/boxo/exchange/offline"
|
||||
"github.com/ipfs/boxo/ipld/merkledag"
|
||||
"github.com/ipfs/go-cid"
|
||||
carv2 "github.com/ipld/go-car/v2"
|
||||
"github.com/ipld/go-car/v2/blockstore"
|
||||
"github.com/ipld/go-ipld-prime"
|
||||
"github.com/ipld/go-ipld-prime/codec/dagjson"
|
||||
basicnode "github.com/ipld/go-ipld-prime/node/basic"
|
||||
"github.com/ipld/go-ipld-prime/traversal"
|
||||
"github.com/ipld/go-ipld-prime/traversal/selector"
|
||||
"github.com/ipld/go-ipld-prime/traversal/selector/builder"
|
||||
selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/markets/utils"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
const DefaultMaxRetrievePrice = "0"
|
||||
|
||||
func retrieve(ctx context.Context, cctx *cli.Context, fapi lapi.FullNode, sel *lapi.Selector, printf func(string, ...interface{})) (*lapi.ExportRef, error) {
|
||||
var payer address.Address
|
||||
var err error
|
||||
if cctx.String("from") != "" {
|
||||
payer, err = address.NewFromString(cctx.String("from"))
|
||||
} else {
|
||||
payer, err = fapi.WalletDefaultAddress(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
file, err := cid.Parse(cctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pieceCid *cid.Cid
|
||||
if cctx.String("pieceCid") != "" {
|
||||
parsed, err := cid.Parse(cctx.String("pieceCid"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pieceCid = &parsed
|
||||
}
|
||||
|
||||
var eref *lapi.ExportRef
|
||||
if cctx.Bool("allow-local") {
|
||||
imports, err := fapi.ClientListImports(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, i := range imports {
|
||||
if i.Root != nil && i.Root.Equals(file) {
|
||||
eref = &lapi.ExportRef{
|
||||
Root: file,
|
||||
FromLocalCAR: i.CARPath,
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// no local found, so make a retrieval
|
||||
if eref == nil {
|
||||
var offer lapi.QueryOffer
|
||||
minerStrAddr := cctx.String("provider")
|
||||
if minerStrAddr == "" { // Local discovery
|
||||
offers, err := fapi.ClientFindData(ctx, file, pieceCid)
|
||||
|
||||
var cleaned []lapi.QueryOffer
|
||||
// filter out offers that errored
|
||||
for _, o := range offers {
|
||||
if o.Err == "" {
|
||||
cleaned = append(cleaned, o)
|
||||
}
|
||||
}
|
||||
|
||||
offers = cleaned
|
||||
|
||||
// sort by price low to high
|
||||
sort.Slice(offers, func(i, j int) bool {
|
||||
return offers[i].MinPrice.LessThan(offers[j].MinPrice)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: parse offer strings from `client find`, make this smarter
|
||||
if len(offers) < 1 {
|
||||
fmt.Println("Failed to find file")
|
||||
return nil, nil
|
||||
}
|
||||
offer = offers[0]
|
||||
} else { // Directed retrieval
|
||||
minerAddr, err := address.NewFromString(minerStrAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if offer.Err != "" {
|
||||
return nil, fmt.Errorf("offer error: %s", offer.Err)
|
||||
}
|
||||
|
||||
maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice)
|
||||
|
||||
if cctx.String("maxPrice") != "" {
|
||||
maxPrice, err = types.ParseFIL(cctx.String("maxPrice"))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parsing maxPrice: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if offer.MinPrice.GreaterThan(big.Int(maxPrice)) {
|
||||
return nil, xerrors.Errorf("failed to find offer satisfying maxPrice: %s. Try increasing maxPrice", maxPrice)
|
||||
}
|
||||
|
||||
o := offer.Order(payer)
|
||||
o.DataSelector = sel
|
||||
|
||||
subscribeEvents, err := fapi.ClientGetRetrievalUpdates(ctx)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error setting up retrieval updates: %w", err)
|
||||
}
|
||||
retrievalRes, err := fapi.ClientRetrieve(ctx, o)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error setting up retrieval: %w", err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
readEvents:
|
||||
for {
|
||||
var evt lapi.RetrievalInfo
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, xerrors.New("Retrieval Timed Out")
|
||||
case evt = <-subscribeEvents:
|
||||
if evt.ID != retrievalRes.DealID {
|
||||
// we can't check the deal ID ahead of time because:
|
||||
// 1. We need to subscribe before retrieving.
|
||||
// 2. We won't know the deal ID until after retrieving.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
event := "New"
|
||||
if evt.Event != nil {
|
||||
event = retrievalmarket.ClientEvents[*evt.Event]
|
||||
}
|
||||
|
||||
printf("Recv %s, Paid %s, %s (%s), %s [%d|%d]\n",
|
||||
types.SizeStr(types.NewInt(evt.BytesReceived)),
|
||||
types.FIL(evt.TotalPaid),
|
||||
strings.TrimPrefix(event, "ClientEvent"),
|
||||
strings.TrimPrefix(retrievalmarket.DealStatuses[evt.Status], "DealStatus"),
|
||||
time.Now().Sub(start).Truncate(time.Millisecond),
|
||||
evt.ID,
|
||||
types.NewInt(evt.BytesReceived),
|
||||
)
|
||||
|
||||
switch evt.Status {
|
||||
case retrievalmarket.DealStatusCompleted:
|
||||
break readEvents
|
||||
case retrievalmarket.DealStatusRejected:
|
||||
return nil, xerrors.Errorf("Retrieval Proposal Rejected: %s", evt.Message)
|
||||
case retrievalmarket.DealStatusCancelled:
|
||||
return nil, xerrors.Errorf("Retrieval Proposal Cancelled: %s", evt.Message)
|
||||
case
|
||||
retrievalmarket.DealStatusDealNotFound,
|
||||
retrievalmarket.DealStatusErrored:
|
||||
return nil, xerrors.Errorf("Retrieval Error: %s", evt.Message)
|
||||
}
|
||||
}
|
||||
|
||||
eref = &lapi.ExportRef{
|
||||
Root: file,
|
||||
DealID: retrievalRes.DealID,
|
||||
}
|
||||
}
|
||||
|
||||
return eref, nil
|
||||
}
|
||||
|
||||
var retrFlagsCommon = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Usage: "address to send transactions from",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "provider",
|
||||
Usage: "provider to use for retrieval, if not present it'll use local discovery",
|
||||
Aliases: []string{"miner"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "maxPrice",
|
||||
Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice),
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "pieceCid",
|
||||
Usage: "require data to be retrieved from a specific Piece CID",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "allow-local",
|
||||
// todo: default to true?
|
||||
},
|
||||
}
|
||||
|
||||
var clientRetrieveCmd = &cli.Command{
|
||||
Name: "retrieve",
|
||||
Usage: "Retrieve data from network",
|
||||
ArgsUsage: "[dataCid outputPath]",
|
||||
Description: `Retrieve data from the Filecoin network.
|
||||
|
||||
The retrieve command will attempt to find a provider make a retrieval deal with
|
||||
them. In case a provider can't be found, it can be specified with the --provider
|
||||
flag.
|
||||
|
||||
By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively
|
||||
a CAR file containing the raw IPLD graph can be exported by setting the --car
|
||||
flag.
|
||||
|
||||
Partial Retrieval:
|
||||
|
||||
The --data-selector flag can be used to specify a sub-graph to fetch. The
|
||||
selector can be specified as either IPLD datamodel text-path selector, or IPLD
|
||||
json selector.
|
||||
|
||||
In case of unixfs retrieval, the selector must point at a single root node, and
|
||||
match the entire graph under that node.
|
||||
|
||||
In case of CAR retrieval, the selector must have one common "sub-root" node.
|
||||
|
||||
Examples:
|
||||
|
||||
- Retrieve a file by CID
|
||||
$ lotus client retrieve Qm... my-file.txt
|
||||
|
||||
- Retrieve a file by CID from f0123
|
||||
$ lotus client retrieve --provider f0123 Qm... my-file.txt
|
||||
|
||||
- Retrieve a first file from a specified directory
|
||||
$ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt
|
||||
`,
|
||||
Flags: append([]cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "car",
|
||||
Usage: "Export to a car file instead of a regular file",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "data-selector",
|
||||
Aliases: []string{"datamodel-path-selector"},
|
||||
Usage: "IPLD datamodel text-path selector, or IPLD json selector",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "car-export-merkle-proof",
|
||||
Usage: "(requires --data-selector and --car) Export data-selector merkle proof",
|
||||
},
|
||||
}, retrFlagsCommon...),
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 2 {
|
||||
return IncorrectNumArgs(cctx)
|
||||
}
|
||||
|
||||
if cctx.Bool("car-export-merkle-proof") {
|
||||
if !cctx.Bool("car") || !cctx.IsSet("data-selector") {
|
||||
return ShowHelp(cctx, fmt.Errorf("--car-export-merkle-proof requires --car and --data-selector"))
|
||||
}
|
||||
}
|
||||
|
||||
fapi, closer, err := GetFullNodeAPIV1(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
var s *lapi.Selector
|
||||
if sel := lapi.Selector(cctx.String("data-selector")); sel != "" {
|
||||
s = &sel
|
||||
}
|
||||
|
||||
eref, err := retrieve(ctx, cctx, fapi, s, afmt.Printf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if eref == nil {
|
||||
return xerrors.Errorf("failed to find providers")
|
||||
}
|
||||
|
||||
if s != nil {
|
||||
eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: s, ExportMerkleProof: cctx.Bool("car-export-merkle-proof")})
|
||||
}
|
||||
|
||||
err = fapi.ClientExport(ctx, *eref, lapi.FileRef{
|
||||
Path: cctx.Args().Get(1),
|
||||
IsCAR: cctx.Bool("car"),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
afmt.Println("Success")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var clientRetrieveCatCmd = &cli.Command{
|
||||
Name: "cat",
|
||||
Usage: "Show data from network",
|
||||
ArgsUsage: "[dataCid]",
|
||||
Flags: append([]cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "ipld",
|
||||
Usage: "list IPLD datamodel links",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "data-selector",
|
||||
Usage: "IPLD datamodel text-path selector, or IPLD json selector",
|
||||
},
|
||||
}, retrFlagsCommon...),
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 1 {
|
||||
return IncorrectNumArgs(cctx)
|
||||
}
|
||||
|
||||
ainfo, err := GetAPIInfo(cctx, repo.FullNode)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not get API info: %w", err)
|
||||
}
|
||||
|
||||
fapi, closer, err := GetFullNodeAPIV1(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
sel := lapi.Selector(cctx.String("data-selector"))
|
||||
selp := &sel
|
||||
if sel == "" {
|
||||
selp = nil
|
||||
}
|
||||
|
||||
eref, err := retrieve(ctx, cctx, fapi, selp, afmt.Printf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println() // separate retrieval events from results
|
||||
|
||||
if sel != "" {
|
||||
eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: &sel})
|
||||
}
|
||||
|
||||
rc, err := cliutil.ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close() // nolint
|
||||
|
||||
_, err = io.Copy(os.Stdout, rc)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
func pathToSel(psel string, matchTraversal bool, sub builder.SelectorSpec) (lapi.Selector, error) {
|
||||
rs, err := textselector.SelectorSpecFromPath(textselector.Expression(psel), matchTraversal, sub)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("failed to parse path-selector: %w", err)
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := dagjson.Encode(rs.Node(), &b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return lapi.Selector(b.String()), nil
|
||||
}
|
||||
|
||||
var clientRetrieveLsCmd = &cli.Command{
|
||||
Name: "ls",
|
||||
Usage: "List object links",
|
||||
ArgsUsage: "[dataCid]",
|
||||
Flags: append([]cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "ipld",
|
||||
Usage: "list IPLD datamodel links",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "depth",
|
||||
Usage: "list links recursively up to the specified depth",
|
||||
Value: 1,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "data-selector",
|
||||
Usage: "IPLD datamodel text-path selector, or IPLD json selector",
|
||||
},
|
||||
}, retrFlagsCommon...),
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 1 {
|
||||
return IncorrectNumArgs(cctx)
|
||||
}
|
||||
|
||||
ainfo, err := GetAPIInfo(cctx, repo.FullNode)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not get API info: %w", err)
|
||||
}
|
||||
|
||||
fapi, closer, err := GetFullNodeAPIV1(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
dataSelector := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth")))
|
||||
|
||||
if cctx.IsSet("data-selector") {
|
||||
ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)
|
||||
dataSelector, err = pathToSel(cctx.String("data-selector"), cctx.Bool("ipld"),
|
||||
ssb.ExploreUnion(
|
||||
ssb.Matcher(),
|
||||
ssb.ExploreAll(
|
||||
ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))),
|
||||
)))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing datamodel path: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
eref, err := retrieve(ctx, cctx, fapi, &dataSelector, afmt.Printf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("retrieve: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println() // separate retrieval events from results
|
||||
|
||||
eref.DAGs = append(eref.DAGs, lapi.DagSpec{
|
||||
DataSelector: &dataSelector,
|
||||
})
|
||||
|
||||
rc, err := cliutil.ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, true)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("export: %w", err)
|
||||
}
|
||||
defer rc.Close() // nolint
|
||||
|
||||
var memcar bytes.Buffer
|
||||
_, err = io.Copy(&memcar, rc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cbs, err := blockstore.NewReadOnly(&bytesReaderAt{bytes.NewReader(memcar.Bytes())}, nil,
|
||||
carv2.ZeroLengthSectionAsEOF(true),
|
||||
blockstore.UseWholeCIDs(true))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening car blockstore: %w", err)
|
||||
}
|
||||
|
||||
roots, err := cbs.Roots()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting roots: %w", err)
|
||||
}
|
||||
|
||||
if len(roots) != 1 {
|
||||
return xerrors.Errorf("expected 1 car root, got %d", len(roots))
|
||||
}
|
||||
dserv := merkledag.NewDAGService(blockservice.New(cbs, offline.Exchange(cbs)))
|
||||
|
||||
if !cctx.Bool("ipld") {
|
||||
links, err := dserv.GetLinks(ctx, roots[0])
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting links: %w", err)
|
||||
}
|
||||
|
||||
for _, link := range links {
|
||||
fmt.Printf("%s %s\t%d\n", link.Cid, link.Name, link.Size)
|
||||
}
|
||||
} else {
|
||||
jsel := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth")))
|
||||
|
||||
if cctx.IsSet("data-selector") {
|
||||
ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)
|
||||
jsel, err = pathToSel(cctx.String("data-selector"), false,
|
||||
ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))),
|
||||
)
|
||||
}
|
||||
|
||||
sel, _ := selectorparse.ParseJSONSelector(string(jsel))
|
||||
|
||||
if err := utils.TraverseDag(
|
||||
ctx,
|
||||
dserv,
|
||||
roots[0],
|
||||
sel,
|
||||
nil,
|
||||
func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error {
|
||||
if r == traversal.VisitReason_SelectionMatch {
|
||||
fmt.Println(p.Path)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
type bytesReaderAt struct {
|
||||
btr *bytes.Reader
|
||||
}
|
||||
|
||||
func (b bytesReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return b.btr.ReadAt(p, off)
|
||||
}
|
||||
|
||||
var _ io.ReaderAt = &bytesReaderAt{}
|
@ -54,7 +54,6 @@ var GetFullNodeAPIV1 = cliutil.GetFullNodeAPIV1
|
||||
var GetGatewayAPI = cliutil.GetGatewayAPI
|
||||
|
||||
var GetStorageMinerAPI = cliutil.GetStorageMinerAPI
|
||||
var GetMarketsAPI = cliutil.GetMarketsAPI
|
||||
var GetWorkerAPI = cliutil.GetWorkerAPI
|
||||
|
||||
var CommonCommands = []*cli.Command{
|
||||
|
54
cli/info.go
54
cli/info.go
@ -3,9 +3,7 @@ package cli
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
@ -14,7 +12,6 @@ import (
|
||||
"github.com/fatih/color"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
@ -155,57 +152,6 @@ func infoCmdAct(cctx *cli.Context) error {
|
||||
fmt.Printf("Payment Channels: %v channels\n", len(chs))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
localDeals, err := fullapi.ClientListDeals(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var totalSize uint64
|
||||
byState := map[storagemarket.StorageDealStatus][]uint64{}
|
||||
for _, deal := range localDeals {
|
||||
totalSize += deal.Size
|
||||
byState[deal.State] = append(byState[deal.State], deal.Size)
|
||||
}
|
||||
|
||||
fmt.Printf("Deals: %d, %s\n", len(localDeals), types.SizeStr(types.NewInt(totalSize)))
|
||||
|
||||
type stateStat struct {
|
||||
state storagemarket.StorageDealStatus
|
||||
count int
|
||||
bytes uint64
|
||||
}
|
||||
|
||||
stateStats := make([]stateStat, 0, len(byState))
|
||||
for state, deals := range byState {
|
||||
if state == storagemarket.StorageDealActive {
|
||||
state = math.MaxUint64 // for sort
|
||||
}
|
||||
|
||||
st := stateStat{
|
||||
state: state,
|
||||
count: len(deals),
|
||||
}
|
||||
for _, b := range deals {
|
||||
st.bytes += b
|
||||
}
|
||||
|
||||
stateStats = append(stateStats, st)
|
||||
}
|
||||
|
||||
sort.Slice(stateStats, func(i, j int) bool {
|
||||
return int64(stateStats[i].state) < int64(stateStats[j].state)
|
||||
})
|
||||
|
||||
for _, st := range stateStats {
|
||||
if st.state == math.MaxUint64 {
|
||||
st.state = storagemarket.StorageDealActive
|
||||
}
|
||||
fmt.Printf(" %s: %d deals, %s\n", storagemarket.DealStates[st.state], st.count, types.SizeStr(types.NewInt(st.bytes)))
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 6, 6, 2, ' ', 0)
|
||||
|
||||
s, err := fullapi.NetBandwidthStats(ctx)
|
||||
|
@ -39,11 +39,6 @@ var paychAddFundsCmd = &cli.Command{
|
||||
Usage: "Add funds to the payment channel between fromAddress and toAddress. Creates the payment channel if it doesn't already exist.",
|
||||
ArgsUsage: "[fromAddress toAddress amount]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "restart-retrievals",
|
||||
Usage: "restart stalled retrieval deals on this payment channel",
|
||||
Value: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "reserve",
|
||||
Usage: "mark funds as reserved",
|
||||
@ -99,10 +94,6 @@ var paychAddFundsCmd = &cli.Command{
|
||||
}
|
||||
|
||||
fmt.Fprintln(cctx.App.Writer, chAddr)
|
||||
restartRetrievals := cctx.Bool("restart-retrievals")
|
||||
if restartRetrievals {
|
||||
return api.ClientRetrieveTryRestartInsufficientFunds(ctx, chAddr)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -456,27 +456,6 @@ func GetWorkerAPI(ctx *cli.Context) (api.Worker, jsonrpc.ClientCloser, error) {
|
||||
return client.NewWorkerRPCV0(ctx.Context, addr, headers)
|
||||
}
|
||||
|
||||
func GetMarketsAPI(ctx *cli.Context) (api.StorageMiner, jsonrpc.ClientCloser, error) {
|
||||
// to support lotus-miner cli tests.
|
||||
if tn, ok := ctx.App.Metadata["testnode-storage"]; ok {
|
||||
return tn.(api.StorageMiner), func() {}, nil
|
||||
}
|
||||
|
||||
addr, headers, err := GetRawAPI(ctx, repo.Markets, "v0")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if IsVeryVerbose {
|
||||
_, _ = fmt.Fprintln(ctx.App.Writer, "using markets API v0 endpoint:", addr)
|
||||
}
|
||||
|
||||
// the markets node is a specialised miner's node, supporting only the
|
||||
// markets API, which is a subset of the miner API. All non-markets
|
||||
// operations will error out with "unsupported".
|
||||
return client.NewStorageMinerRPCV0(ctx.Context, addr, headers)
|
||||
}
|
||||
|
||||
func GetGatewayAPI(ctx *cli.Context) (api.Gateway, jsonrpc.ClientCloser, error) {
|
||||
addr, headers, err := GetRawAPI(ctx, repo.FullNode, "v1")
|
||||
if err != nil {
|
||||
|
@ -2,7 +2,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"testing"
|
||||
"time"
|
||||
@ -43,11 +42,5 @@ func TestMinerAllInfo(t *testing.T) {
|
||||
|
||||
t.Run("pre-info-all", run)
|
||||
|
||||
//stm: @CLIENT_DATA_IMPORT_001, @CLIENT_STORAGE_DEALS_GET_001
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6})
|
||||
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
|
||||
kit.AssertFilesEqual(t, inPath, outPath)
|
||||
|
||||
t.Run("post-info-all", run)
|
||||
}
|
||||
|
@ -1,282 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/tablewriter"
|
||||
)
|
||||
|
||||
var dagstoreCmd = &cli.Command{
|
||||
Name: "dagstore",
|
||||
Usage: "Manage the dagstore on the markets subsystem",
|
||||
Subcommands: []*cli.Command{
|
||||
dagstoreListShardsCmd,
|
||||
dagstoreRegisterShardCmd,
|
||||
dagstoreInitializeShardCmd,
|
||||
dagstoreRecoverShardCmd,
|
||||
dagstoreInitializeAllCmd,
|
||||
dagstoreGcCmd,
|
||||
dagstoreLookupPiecesCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreListShardsCmd = &cli.Command{
|
||||
Name: "list-shards",
|
||||
Usage: "List all shards known to the dagstore, with their current status",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
shards, err := marketsApi.DagstoreListShards(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return printTableShards(shards)
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreRegisterShardCmd = &cli.Command{
|
||||
Name: "register-shard",
|
||||
ArgsUsage: "[key]",
|
||||
Usage: "Register a shard",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 1 {
|
||||
return lcli.IncorrectNumArgs(cctx)
|
||||
}
|
||||
|
||||
marketsAPI, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
shardKey := cctx.Args().First()
|
||||
err = marketsAPI.DagstoreRegisterShard(ctx, shardKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Registered shard " + shardKey)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreInitializeShardCmd = &cli.Command{
|
||||
Name: "initialize-shard",
|
||||
ArgsUsage: "[key]",
|
||||
Usage: "Initialize the specified shard",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 1 {
|
||||
return lcli.IncorrectNumArgs(cctx)
|
||||
}
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
return marketsApi.DagstoreInitializeShard(ctx, cctx.Args().First())
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreRecoverShardCmd = &cli.Command{
|
||||
Name: "recover-shard",
|
||||
ArgsUsage: "[key]",
|
||||
Usage: "Attempt to recover a shard in errored state",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 1 {
|
||||
return lcli.IncorrectNumArgs(cctx)
|
||||
}
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
return marketsApi.DagstoreRecoverShard(ctx, cctx.Args().First())
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreInitializeAllCmd = &cli.Command{
|
||||
Name: "initialize-all",
|
||||
Usage: "Initialize all uninitialized shards, streaming results as they're produced; only shards for unsealed pieces are initialized by default",
|
||||
Flags: []cli.Flag{
|
||||
&cli.UintFlag{
|
||||
Name: "concurrency",
|
||||
Usage: "maximum shards to initialize concurrently at a time; use 0 for unlimited",
|
||||
Required: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "include-sealed",
|
||||
Usage: "initialize sealed pieces as well",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
concurrency := cctx.Uint("concurrency")
|
||||
sealed := cctx.Bool("sealed")
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
params := api.DagstoreInitializeAllParams{
|
||||
MaxConcurrency: int(concurrency),
|
||||
IncludeSealed: sealed,
|
||||
}
|
||||
|
||||
ch, err := marketsApi.DagstoreInitializeAll(ctx, params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt, ok := <-ch:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
_, _ = fmt.Fprint(os.Stdout, color.New(color.BgHiBlack).Sprintf("(%d/%d)", evt.Current, evt.Total))
|
||||
_, _ = fmt.Fprint(os.Stdout, " ")
|
||||
if evt.Event == "start" {
|
||||
_, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.Reset).Sprint("STARTING"))
|
||||
} else {
|
||||
if evt.Success {
|
||||
_, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.FgGreen).Sprint("SUCCESS"))
|
||||
} else {
|
||||
_, _ = fmt.Fprintln(os.Stdout, evt.Key, color.New(color.FgRed).Sprint("ERROR"), evt.Error)
|
||||
}
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("aborted")
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var dagstoreGcCmd = &cli.Command{
|
||||
Name: "gc",
|
||||
Usage: "Garbage collect the dagstore",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
collected, err := marketsApi.DagstoreGC(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(collected) == 0 {
|
||||
_, _ = fmt.Fprintln(os.Stdout, "no shards collected")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, e := range collected {
|
||||
if e.Error == "" {
|
||||
_, _ = fmt.Fprintln(os.Stdout, e.Key, color.New(color.FgGreen).Sprint("SUCCESS"))
|
||||
} else {
|
||||
_, _ = fmt.Fprintln(os.Stdout, e.Key, color.New(color.FgRed).Sprint("ERROR"), e.Error)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func printTableShards(shards []api.DagstoreShardInfo) error {
|
||||
if len(shards) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tw := tablewriter.New(
|
||||
tablewriter.Col("Key"),
|
||||
tablewriter.Col("State"),
|
||||
tablewriter.Col("Error"),
|
||||
)
|
||||
|
||||
colors := map[string]color.Attribute{
|
||||
"ShardStateAvailable": color.FgGreen,
|
||||
"ShardStateServing": color.FgBlue,
|
||||
"ShardStateErrored": color.FgRed,
|
||||
"ShardStateNew": color.FgYellow,
|
||||
}
|
||||
|
||||
for _, s := range shards {
|
||||
m := map[string]interface{}{
|
||||
"Key": s.Key,
|
||||
"State": func() string {
|
||||
trimmedState := strings.TrimPrefix(s.State, "ShardState")
|
||||
if c, ok := colors[s.State]; ok {
|
||||
return color.New(c).Sprint(trimmedState)
|
||||
}
|
||||
return trimmedState
|
||||
}(),
|
||||
"Error": s.Error,
|
||||
}
|
||||
tw.Write(m)
|
||||
}
|
||||
return tw.Flush(os.Stdout)
|
||||
}
|
||||
|
||||
var dagstoreLookupPiecesCmd = &cli.Command{
|
||||
Name: "lookup-pieces",
|
||||
Usage: "Lookup pieces that a given CID belongs to",
|
||||
ArgsUsage: "<cid>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 1 {
|
||||
return lcli.IncorrectNumArgs(cctx)
|
||||
}
|
||||
|
||||
cidStr := cctx.Args().First()
|
||||
cid, err := cid.Parse(cidStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid CID: %w", err)
|
||||
}
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
shards, err := marketsApi.DagstoreLookupPieces(ctx, cid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return printTableShards(shards)
|
||||
},
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
)
|
||||
|
||||
var indexProvCmd = &cli.Command{
|
||||
Name: "index",
|
||||
Usage: "Manage the index provider on the markets subsystem",
|
||||
Subcommands: []*cli.Command{
|
||||
indexProvAnnounceCmd,
|
||||
indexProvAnnounceAllCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var indexProvAnnounceCmd = &cli.Command{
|
||||
Name: "announce",
|
||||
ArgsUsage: "<deal proposal cid>",
|
||||
Usage: "Announce a deal to indexers so they can download its index",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 1 {
|
||||
return lcli.IncorrectNumArgs(cctx)
|
||||
}
|
||||
|
||||
proposalCidStr := cctx.Args().First()
|
||||
proposalCid, err := cid.Parse(proposalCidStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid deal proposal CID: %w", err)
|
||||
}
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
return marketsApi.IndexerAnnounceDeal(ctx, proposalCid)
|
||||
},
|
||||
}
|
||||
|
||||
var indexProvAnnounceAllCmd = &cli.Command{
|
||||
Name: "announce-all",
|
||||
Usage: "Announce all active deals to indexers so they can download the indices",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
return marketsApi.IndexerAnnounceAllDeals(ctx)
|
||||
},
|
||||
}
|
@ -112,72 +112,6 @@ var infoAllCmd = &cli.Command{
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Storage Ask")
|
||||
if err := getAskCmd.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Storage Deals")
|
||||
{
|
||||
fs := &flag.FlagSet{}
|
||||
for _, f := range dealsListCmd.Flags {
|
||||
if err := f.Apply(fs); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
}
|
||||
if err := fs.Parse([]string{"--verbose"}); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
if err := dealsListCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Storage Deals JSON")
|
||||
{
|
||||
fs := &flag.FlagSet{}
|
||||
for _, f := range dealsListCmd.Flags {
|
||||
if err := f.Apply(fs); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
}
|
||||
if err := fs.Parse([]string{"--verbose", "--format=json"}); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
if err := dealsListCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Data Transfers")
|
||||
{
|
||||
fs := &flag.FlagSet{}
|
||||
for _, f := range transfersListCmd.Flags {
|
||||
if err := f.Apply(fs); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
}
|
||||
if err := fs.Parse([]string{"--verbose", "--completed", "--show-failed"}); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
if err := transfersListCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\n#: DAGStore shards")
|
||||
if err := dagstoreListShardsCmd.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Pending Batch Deals")
|
||||
if err := dealsPendingPublish.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Pending Batch Terminations")
|
||||
if err := sectorsTerminatePendingCmd.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
@ -217,11 +151,6 @@ var infoAllCmd = &cli.Command{
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Sector Refs")
|
||||
if err := sectorsRefsCmd.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
// Very Very Verbose info
|
||||
fmt.Println("\n#: Per Sector Info")
|
||||
|
||||
|
@ -43,16 +43,10 @@ func main() {
|
||||
backupCmd,
|
||||
lcli.WithCategory("chain", actorCmd),
|
||||
lcli.WithCategory("chain", infoCmd),
|
||||
lcli.WithCategory("market", setHidden(storageDealsCmd)),
|
||||
lcli.WithCategory("market", setHidden(retrievalDealsCmd)),
|
||||
lcli.WithCategory("market", setHidden(dataTransfersCmd)),
|
||||
lcli.WithCategory("market", setHidden(dagstoreCmd)),
|
||||
lcli.WithCategory("market", setHidden(indexProvCmd)),
|
||||
lcli.WithCategory("storage", sectorsCmd),
|
||||
lcli.WithCategory("storage", provingCmd),
|
||||
lcli.WithCategory("storage", storageCmd),
|
||||
lcli.WithCategory("storage", sealingCmd),
|
||||
lcli.WithCategory("retrieval", setHidden(piecesCmd)),
|
||||
}
|
||||
|
||||
jaeger := tracing.SetupJaegerTracing("lotus")
|
||||
@ -193,11 +187,6 @@ func getActorAddress(ctx context.Context, cctx *cli.Context) (maddr address.Addr
|
||||
return maddr, nil
|
||||
}
|
||||
|
||||
func setHidden(cmd *cli.Command) *cli.Command {
|
||||
cmd.Hidden = true
|
||||
return cmd
|
||||
}
|
||||
|
||||
func LMActorOrEnvGetter(cctx *cli.Context) (address.Address, error) {
|
||||
return getActorAddress(cctx.Context, cctx)
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,193 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/tablewriter"
|
||||
)
|
||||
|
||||
var piecesCmd = &cli.Command{
|
||||
Name: "pieces",
|
||||
Usage: "interact with the piecestore",
|
||||
Description: "The piecestore is a database that tracks and manages data that is made available to the retrieval market",
|
||||
Subcommands: []*cli.Command{
|
||||
piecesListPiecesCmd,
|
||||
piecesListCidInfosCmd,
|
||||
piecesInfoCmd,
|
||||
piecesCidInfoCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var piecesListPiecesCmd = &cli.Command{
|
||||
Name: "list-pieces",
|
||||
Usage: "list registered pieces",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
pieceCids, err := nodeApi.PiecesListPieces(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pc := range pieceCids {
|
||||
fmt.Println(pc)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var piecesListCidInfosCmd = &cli.Command{
|
||||
Name: "list-cids",
|
||||
Usage: "list registered payload CIDs",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "verbose",
|
||||
Aliases: []string{"v"},
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
cids, err := nodeApi.PiecesListCidInfos(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := tablewriter.New(tablewriter.Col("CID"),
|
||||
tablewriter.Col("Piece"),
|
||||
tablewriter.Col("BlockOffset"),
|
||||
tablewriter.Col("BlockLen"),
|
||||
tablewriter.Col("Deal"),
|
||||
tablewriter.Col("Sector"),
|
||||
tablewriter.Col("DealOffset"),
|
||||
tablewriter.Col("DealLen"),
|
||||
)
|
||||
|
||||
for _, c := range cids {
|
||||
if !cctx.Bool("verbose") {
|
||||
fmt.Println(c)
|
||||
continue
|
||||
}
|
||||
|
||||
ci, err := nodeApi.PiecesGetCIDInfo(ctx, c)
|
||||
if err != nil {
|
||||
fmt.Printf("Error getting CID info: %s\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, location := range ci.PieceBlockLocations {
|
||||
pi, err := nodeApi.PiecesGetPieceInfo(ctx, location.PieceCID)
|
||||
if err != nil {
|
||||
fmt.Printf("Error getting piece info: %s\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, deal := range pi.Deals {
|
||||
w.Write(map[string]interface{}{
|
||||
"CID": c,
|
||||
"Piece": location.PieceCID,
|
||||
"BlockOffset": location.RelOffset,
|
||||
"BlockLen": location.BlockSize,
|
||||
"Deal": deal.DealID,
|
||||
"Sector": deal.SectorID,
|
||||
"DealOffset": deal.Offset,
|
||||
"DealLen": deal.Length,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cctx.Bool("verbose") {
|
||||
return w.Flush(os.Stdout)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var piecesInfoCmd = &cli.Command{
|
||||
Name: "piece-info",
|
||||
Usage: "get registered information for a given piece CID",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return lcli.ShowHelp(cctx, fmt.Errorf("must specify piece cid"))
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
c, err := cid.Decode(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pi, err := nodeApi.PiecesGetPieceInfo(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Piece: ", pi.PieceCID)
|
||||
w := tabwriter.NewWriter(os.Stdout, 4, 4, 2, ' ', 0)
|
||||
fmt.Fprintln(w, "Deals:\nDealID\tSectorID\tLength\tOffset")
|
||||
for _, d := range pi.Deals {
|
||||
fmt.Fprintf(w, "%d\t%d\t%d\t%d\n", d.DealID, d.SectorID, d.Length, d.Offset)
|
||||
}
|
||||
return w.Flush()
|
||||
},
|
||||
}
|
||||
|
||||
var piecesCidInfoCmd = &cli.Command{
|
||||
Name: "cid-info",
|
||||
Usage: "get registered information for a given payload CID",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return lcli.ShowHelp(cctx, fmt.Errorf("must specify payload cid"))
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
c, err := cid.Decode(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ci, err := nodeApi.PiecesGetCIDInfo(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Info for: ", ci.CID)
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 4, 4, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "PieceCid\tOffset\tSize\n")
|
||||
for _, loc := range ci.PieceBlockLocations {
|
||||
fmt.Fprintf(w, "%s\t%d\t%d\n", loc.PieceCID, loc.RelOffset, loc.BlockSize)
|
||||
}
|
||||
return w.Flush()
|
||||
},
|
||||
}
|
@ -1,231 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
)
|
||||
|
||||
var retrievalDealsCmd = &cli.Command{
|
||||
Name: "retrieval-deals",
|
||||
Usage: "Manage retrieval deals and related configuration",
|
||||
Subcommands: []*cli.Command{
|
||||
retrievalDealSelectionCmd,
|
||||
retrievalSetAskCmd,
|
||||
retrievalGetAskCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var retrievalDealSelectionCmd = &cli.Command{
|
||||
Name: "selection",
|
||||
Usage: "Configure acceptance criteria for retrieval deal proposals",
|
||||
Subcommands: []*cli.Command{
|
||||
retrievalDealSelectionShowCmd,
|
||||
retrievalDealSelectionResetCmd,
|
||||
retrievalDealSelectionRejectCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var retrievalDealSelectionShowCmd = &cli.Command{
|
||||
Name: "list",
|
||||
Usage: "List retrieval deal proposal selection criteria",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
smapi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
onlineOk, err := smapi.DealsConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
offlineOk, err := smapi.DealsConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("considering online retrieval deals: %t\n", onlineOk)
|
||||
fmt.Printf("considering offline retrieval deals: %t\n", offlineOk)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var retrievalDealSelectionResetCmd = &cli.Command{
|
||||
Name: "reset",
|
||||
Usage: "Reset retrieval deal proposal selection criteria to default values",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
smapi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
err = smapi.DealsSetConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = smapi.DealsSetConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var retrievalDealSelectionRejectCmd = &cli.Command{
|
||||
Name: "reject",
|
||||
Usage: "Configure criteria which necessitate automatic rejection",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "online",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "offline",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
smapi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
if cctx.Bool("online") {
|
||||
err = smapi.DealsSetConsiderOnlineRetrievalDeals(lcli.DaemonContext(cctx), false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if cctx.Bool("offline") {
|
||||
err = smapi.DealsSetConsiderOfflineRetrievalDeals(lcli.DaemonContext(cctx), false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var retrievalSetAskCmd = &cli.Command{
|
||||
Name: "set-ask",
|
||||
Usage: "Configure the provider's retrieval ask",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "price",
|
||||
Usage: "Set the price of the ask for retrievals (FIL/GiB)",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "unseal-price",
|
||||
Usage: "Set the price to unseal",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "payment-interval",
|
||||
Usage: "Set the payment interval (in bytes) for retrieval",
|
||||
DefaultText: "1MiB",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "payment-interval-increase",
|
||||
Usage: "Set the payment interval increase (in bytes) for retrieval",
|
||||
DefaultText: "1MiB",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := lcli.DaemonContext(cctx)
|
||||
|
||||
api, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ask, err := api.MarketGetRetrievalAsk(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cctx.IsSet("price") {
|
||||
v, err := types.ParseFIL(cctx.String("price"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ask.PricePerByte = types.BigDiv(types.BigInt(v), types.NewInt(1<<30))
|
||||
}
|
||||
|
||||
if cctx.IsSet("unseal-price") {
|
||||
v, err := types.ParseFIL(cctx.String("unseal-price"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ask.UnsealPrice = abi.TokenAmount(v)
|
||||
}
|
||||
|
||||
if cctx.IsSet("payment-interval") {
|
||||
v, err := units.RAMInBytes(cctx.String("payment-interval"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ask.PaymentInterval = uint64(v)
|
||||
}
|
||||
|
||||
if cctx.IsSet("payment-interval-increase") {
|
||||
v, err := units.RAMInBytes(cctx.String("payment-interval-increase"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ask.PaymentIntervalIncrease = uint64(v)
|
||||
}
|
||||
|
||||
return api.MarketSetRetrievalAsk(ctx, ask)
|
||||
},
|
||||
}
|
||||
|
||||
var retrievalGetAskCmd = &cli.Command{
|
||||
Name: "get-ask",
|
||||
Usage: "Get the provider's current retrieval ask configured by the provider in the ask-store using the set-ask CLI command",
|
||||
Flags: []cli.Flag{},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := lcli.DaemonContext(cctx)
|
||||
|
||||
api, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ask, err := api.MarketGetRetrievalAsk(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "Price per Byte\tUnseal Price\tPayment Interval\tPayment Interval Increase\n")
|
||||
if ask == nil {
|
||||
_, _ = fmt.Fprintf(w, "<miner does not have an retrieval ask set>\n")
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\n",
|
||||
types.FIL(ask.PricePerByte),
|
||||
types.FIL(ask.UnsealPrice),
|
||||
units.BytesSize(float64(ask.PaymentInterval)),
|
||||
units.BytesSize(float64(ask.PaymentIntervalIncrease)),
|
||||
)
|
||||
return w.Flush()
|
||||
|
||||
},
|
||||
}
|
@ -588,7 +588,7 @@ var sectorsRefsCmd = &cli.Command{
|
||||
Name: "refs",
|
||||
Usage: "List References to sectors",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -48,7 +48,6 @@ func main() {
|
||||
proofsCmd,
|
||||
verifRegCmd,
|
||||
marketCmd,
|
||||
miscCmd,
|
||||
mpoolCmd,
|
||||
helloCmd,
|
||||
genesisVerifyCmd,
|
||||
|
@ -1,40 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
)
|
||||
|
||||
var miscCmd = &cli.Command{
|
||||
Name: "misc",
|
||||
Usage: "Assorted unsorted commands for various purposes",
|
||||
Flags: []cli.Flag{},
|
||||
Subcommands: []*cli.Command{
|
||||
dealStateMappingCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var dealStateMappingCmd = &cli.Command{
|
||||
Name: "deal-state",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
|
||||
}
|
||||
|
||||
num, err := strconv.Atoi(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ststr, ok := storagemarket.DealStates[uint64(num)]
|
||||
if !ok {
|
||||
return fmt.Errorf("no such deal state %d", num)
|
||||
}
|
||||
fmt.Println(ststr)
|
||||
return nil
|
||||
},
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -19,7 +19,6 @@ COMMANDS:
|
||||
send Send funds between accounts
|
||||
wallet Manage wallet
|
||||
info Print node info
|
||||
client Make deals, store data, retrieve data
|
||||
msig Interact with a multisig wallet
|
||||
filplus Interact with the verified registry actor used by Filplus
|
||||
paych Manage payment channels
|
||||
@ -403,515 +402,6 @@ OPTIONS:
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
## lotus client
|
||||
```
|
||||
NAME:
|
||||
lotus client - Make deals, store data, retrieve data
|
||||
|
||||
USAGE:
|
||||
lotus client command [command options] [arguments...]
|
||||
|
||||
COMMANDS:
|
||||
help, h Shows a list of commands or help for one command
|
||||
DATA:
|
||||
import Import data
|
||||
drop Remove import
|
||||
local List locally imported data
|
||||
stat Print information about a locally stored file (piece size, etc)
|
||||
RETRIEVAL:
|
||||
find Find data in the network
|
||||
retrieval-ask Get a miner's retrieval ask
|
||||
retrieve Retrieve data from network
|
||||
cat Show data from network
|
||||
ls List object links
|
||||
cancel-retrieval Cancel a retrieval deal by deal ID; this also cancels the associated transfer
|
||||
list-retrievals List retrieval market deals
|
||||
STORAGE:
|
||||
deal Initialize storage deal with a miner
|
||||
query-ask Find a miners ask
|
||||
list-deals List storage market deals
|
||||
get-deal Print detailed deal information
|
||||
list-asks List asks for top miners
|
||||
deal-stats Print statistics about local storage deals
|
||||
inspect-deal Inspect detailed information about deal's lifecycle and the various stages it goes through
|
||||
UTIL:
|
||||
commP Calculate the piece-cid (commP) of a CAR file
|
||||
generate-car Generate a car file from input
|
||||
balances Print storage market client balances
|
||||
list-transfers List ongoing data transfers for deals
|
||||
restart-transfer Force restart a stalled data transfer
|
||||
cancel-transfer Force cancel a data transfer
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client import
|
||||
```
|
||||
NAME:
|
||||
lotus client import - Import data
|
||||
|
||||
USAGE:
|
||||
lotus client import [command options] [inputPath]
|
||||
|
||||
CATEGORY:
|
||||
DATA
|
||||
|
||||
OPTIONS:
|
||||
--car import from a car file instead of a regular file (default: false)
|
||||
--quiet, -q Output root CID only (default: false)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client drop
|
||||
```
|
||||
NAME:
|
||||
lotus client drop - Remove import
|
||||
|
||||
USAGE:
|
||||
lotus client drop [command options] [import ID...]
|
||||
|
||||
CATEGORY:
|
||||
DATA
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client local
|
||||
```
|
||||
NAME:
|
||||
lotus client local - List locally imported data
|
||||
|
||||
USAGE:
|
||||
lotus client local [command options] [arguments...]
|
||||
|
||||
CATEGORY:
|
||||
DATA
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client stat
|
||||
```
|
||||
NAME:
|
||||
lotus client stat - Print information about a locally stored file (piece size, etc)
|
||||
|
||||
USAGE:
|
||||
lotus client stat [command options] <cid>
|
||||
|
||||
CATEGORY:
|
||||
DATA
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client find
|
||||
```
|
||||
NAME:
|
||||
lotus client find - Find data in the network
|
||||
|
||||
USAGE:
|
||||
lotus client find [command options] [dataCid]
|
||||
|
||||
CATEGORY:
|
||||
RETRIEVAL
|
||||
|
||||
OPTIONS:
|
||||
--pieceCid value require data to be retrieved from a specific Piece CID
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client retrieval-ask
|
||||
```
|
||||
NAME:
|
||||
lotus client retrieval-ask - Get a miner's retrieval ask
|
||||
|
||||
USAGE:
|
||||
lotus client retrieval-ask [command options] [minerAddress] [data CID]
|
||||
|
||||
CATEGORY:
|
||||
RETRIEVAL
|
||||
|
||||
OPTIONS:
|
||||
--size value data size in bytes (default: 0)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client retrieve
|
||||
```
|
||||
NAME:
|
||||
lotus client retrieve - Retrieve data from network
|
||||
|
||||
USAGE:
|
||||
lotus client retrieve [command options] [dataCid outputPath]
|
||||
|
||||
CATEGORY:
|
||||
RETRIEVAL
|
||||
|
||||
DESCRIPTION:
|
||||
Retrieve data from the Filecoin network.
|
||||
|
||||
The retrieve command will attempt to find a provider make a retrieval deal with
|
||||
them. In case a provider can't be found, it can be specified with the --provider
|
||||
flag.
|
||||
|
||||
By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively
|
||||
a CAR file containing the raw IPLD graph can be exported by setting the --car
|
||||
flag.
|
||||
|
||||
Partial Retrieval:
|
||||
|
||||
The --data-selector flag can be used to specify a sub-graph to fetch. The
|
||||
selector can be specified as either IPLD datamodel text-path selector, or IPLD
|
||||
json selector.
|
||||
|
||||
In case of unixfs retrieval, the selector must point at a single root node, and
|
||||
match the entire graph under that node.
|
||||
|
||||
In case of CAR retrieval, the selector must have one common "sub-root" node.
|
||||
|
||||
Examples:
|
||||
|
||||
- Retrieve a file by CID
|
||||
$ lotus client retrieve Qm... my-file.txt
|
||||
|
||||
- Retrieve a file by CID from f0123
|
||||
$ lotus client retrieve --provider f0123 Qm... my-file.txt
|
||||
|
||||
- Retrieve a first file from a specified directory
|
||||
$ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt
|
||||
|
||||
|
||||
OPTIONS:
|
||||
--car Export to a car file instead of a regular file (default: false)
|
||||
--data-selector value, --datamodel-path-selector value IPLD datamodel text-path selector, or IPLD json selector
|
||||
--car-export-merkle-proof (requires --data-selector and --car) Export data-selector merkle proof (default: false)
|
||||
--from value address to send transactions from
|
||||
--provider value, --miner value provider to use for retrieval, if not present it'll use local discovery
|
||||
--maxPrice value maximum price the client is willing to consider (default: 0 FIL)
|
||||
--pieceCid value require data to be retrieved from a specific Piece CID
|
||||
--allow-local (default: false)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client cat
|
||||
```
|
||||
NAME:
|
||||
lotus client cat - Show data from network
|
||||
|
||||
USAGE:
|
||||
lotus client cat [command options] [dataCid]
|
||||
|
||||
CATEGORY:
|
||||
RETRIEVAL
|
||||
|
||||
OPTIONS:
|
||||
--ipld list IPLD datamodel links (default: false)
|
||||
--data-selector value IPLD datamodel text-path selector, or IPLD json selector
|
||||
--from value address to send transactions from
|
||||
--provider value, --miner value provider to use for retrieval, if not present it'll use local discovery
|
||||
--maxPrice value maximum price the client is willing to consider (default: 0 FIL)
|
||||
--pieceCid value require data to be retrieved from a specific Piece CID
|
||||
--allow-local (default: false)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client ls
|
||||
```
|
||||
NAME:
|
||||
lotus client ls - List object links
|
||||
|
||||
USAGE:
|
||||
lotus client ls [command options] [dataCid]
|
||||
|
||||
CATEGORY:
|
||||
RETRIEVAL
|
||||
|
||||
OPTIONS:
|
||||
--ipld list IPLD datamodel links (default: false)
|
||||
--depth value list links recursively up to the specified depth (default: 1)
|
||||
--data-selector value IPLD datamodel text-path selector, or IPLD json selector
|
||||
--from value address to send transactions from
|
||||
--provider value, --miner value provider to use for retrieval, if not present it'll use local discovery
|
||||
--maxPrice value maximum price the client is willing to consider (default: 0 FIL)
|
||||
--pieceCid value require data to be retrieved from a specific Piece CID
|
||||
--allow-local (default: false)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client cancel-retrieval
|
||||
```
|
||||
NAME:
|
||||
lotus client cancel-retrieval - Cancel a retrieval deal by deal ID; this also cancels the associated transfer
|
||||
|
||||
USAGE:
|
||||
lotus client cancel-retrieval [command options] [arguments...]
|
||||
|
||||
CATEGORY:
|
||||
RETRIEVAL
|
||||
|
||||
OPTIONS:
|
||||
--deal-id value specify retrieval deal by deal ID (default: 0)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client list-retrievals
|
||||
```
|
||||
NAME:
|
||||
lotus client list-retrievals - List retrieval market deals
|
||||
|
||||
USAGE:
|
||||
lotus client list-retrievals [command options] [arguments...]
|
||||
|
||||
CATEGORY:
|
||||
RETRIEVAL
|
||||
|
||||
OPTIONS:
|
||||
--verbose, -v print verbose deal details (default: false)
|
||||
--show-failed show failed/failing deals (default: true)
|
||||
--completed show completed retrievals (default: false)
|
||||
--watch watch deal updates in real-time, rather than a one time list (default: false)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client deal
|
||||
```
|
||||
NAME:
|
||||
lotus client deal - Initialize storage deal with a miner
|
||||
|
||||
USAGE:
|
||||
lotus client deal [command options] [dataCid miner price duration]
|
||||
|
||||
CATEGORY:
|
||||
STORAGE
|
||||
|
||||
DESCRIPTION:
|
||||
Make a deal with a miner.
|
||||
dataCid comes from running 'lotus client import'.
|
||||
miner is the address of the miner you wish to make a deal with.
|
||||
price is measured in FIL/Epoch. Miners usually don't accept a bid
|
||||
lower than their advertised ask (which is in FIL/GiB/Epoch). You can check a miners listed price
|
||||
with 'lotus client query-ask <miner address>'.
|
||||
duration is how long the miner should store the data for, in blocks.
|
||||
The minimum value is 518400 (6 months).
|
||||
|
||||
OPTIONS:
|
||||
--manual-piece-cid value manually specify piece commitment for data (dataCid must be to a car file)
|
||||
--manual-piece-size value if manually specifying piece cid, used to specify size (dataCid must be to a car file) (default: 0)
|
||||
--manual-stateless-deal instructs the node to send an offline deal without registering it with the deallist/fsm (default: false)
|
||||
--from value specify address to fund the deal with
|
||||
--start-epoch value specify the epoch that the deal should start at (default: -1)
|
||||
--fast-retrieval indicates that data should be available for fast retrieval (default: true)
|
||||
--verified-deal indicate that the deal counts towards verified client total (default: true if client is verified, false otherwise)
|
||||
--provider-collateral value specify the requested provider collateral the miner should put up
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client query-ask
|
||||
```
|
||||
NAME:
|
||||
lotus client query-ask - Find a miners ask
|
||||
|
||||
USAGE:
|
||||
lotus client query-ask [command options] [minerAddress]
|
||||
|
||||
CATEGORY:
|
||||
STORAGE
|
||||
|
||||
OPTIONS:
|
||||
--peerid value specify peer ID of node to make query against
|
||||
--size value data size in bytes (default: 0)
|
||||
--duration value deal duration (default: 0)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client list-deals
|
||||
```
|
||||
NAME:
|
||||
lotus client list-deals - List storage market deals
|
||||
|
||||
USAGE:
|
||||
lotus client list-deals [command options] [arguments...]
|
||||
|
||||
CATEGORY:
|
||||
STORAGE
|
||||
|
||||
OPTIONS:
|
||||
--verbose, -v print verbose deal details (default: false)
|
||||
--show-failed show failed/failing deals (default: false)
|
||||
--watch watch deal updates in real-time, rather than a one time list (default: false)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client get-deal
|
||||
```
|
||||
NAME:
|
||||
lotus client get-deal - Print detailed deal information
|
||||
|
||||
USAGE:
|
||||
lotus client get-deal [command options] [proposalCID]
|
||||
|
||||
CATEGORY:
|
||||
STORAGE
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client list-asks
|
||||
```
|
||||
NAME:
|
||||
lotus client list-asks - List asks for top miners
|
||||
|
||||
USAGE:
|
||||
lotus client list-asks [command options] [arguments...]
|
||||
|
||||
CATEGORY:
|
||||
STORAGE
|
||||
|
||||
OPTIONS:
|
||||
--by-ping sort by ping (default: false)
|
||||
--output-format value Either 'text' or 'csv' (default: "text")
|
||||
--protocols Output supported deal protocols (default: false)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client deal-stats
|
||||
```
|
||||
NAME:
|
||||
lotus client deal-stats - Print statistics about local storage deals
|
||||
|
||||
USAGE:
|
||||
lotus client deal-stats [command options] [arguments...]
|
||||
|
||||
CATEGORY:
|
||||
STORAGE
|
||||
|
||||
OPTIONS:
|
||||
--newer-than value (default: 0s)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client inspect-deal
|
||||
```
|
||||
NAME:
|
||||
lotus client inspect-deal - Inspect detailed information about deal's lifecycle and the various stages it goes through
|
||||
|
||||
USAGE:
|
||||
lotus client inspect-deal [command options] [arguments...]
|
||||
|
||||
CATEGORY:
|
||||
STORAGE
|
||||
|
||||
OPTIONS:
|
||||
--deal-id value (default: 0)
|
||||
--proposal-cid value
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client commP
|
||||
```
|
||||
NAME:
|
||||
lotus client commP - Calculate the piece-cid (commP) of a CAR file
|
||||
|
||||
USAGE:
|
||||
lotus client commP [command options] [inputFile]
|
||||
|
||||
CATEGORY:
|
||||
UTIL
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client generate-car
|
||||
```
|
||||
NAME:
|
||||
lotus client generate-car - Generate a car file from input
|
||||
|
||||
USAGE:
|
||||
lotus client generate-car [command options] [inputPath outputPath]
|
||||
|
||||
CATEGORY:
|
||||
UTIL
|
||||
|
||||
OPTIONS:
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client balances
|
||||
```
|
||||
NAME:
|
||||
lotus client balances - Print storage market client balances
|
||||
|
||||
USAGE:
|
||||
lotus client balances [command options] [arguments...]
|
||||
|
||||
CATEGORY:
|
||||
UTIL
|
||||
|
||||
OPTIONS:
|
||||
--client value specify storage client address
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client list-transfers
|
||||
```
|
||||
NAME:
|
||||
lotus client list-transfers - List ongoing data transfers for deals
|
||||
|
||||
USAGE:
|
||||
lotus client list-transfers [command options] [arguments...]
|
||||
|
||||
CATEGORY:
|
||||
UTIL
|
||||
|
||||
OPTIONS:
|
||||
--verbose, -v print verbose transfer details (default: false)
|
||||
--completed show completed data transfers (default: false)
|
||||
--watch watch deal updates in real-time, rather than a one time list (default: false)
|
||||
--show-failed show failed/cancelled transfers (default: false)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client restart-transfer
|
||||
```
|
||||
NAME:
|
||||
lotus client restart-transfer - Force restart a stalled data transfer
|
||||
|
||||
USAGE:
|
||||
lotus client restart-transfer [command options] [transferID]
|
||||
|
||||
CATEGORY:
|
||||
UTIL
|
||||
|
||||
OPTIONS:
|
||||
--peerid value narrow to transfer with specific peer
|
||||
--initiator specify only transfers where peer is/is not initiator (default: true)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus client cancel-transfer
|
||||
```
|
||||
NAME:
|
||||
lotus client cancel-transfer - Force cancel a data transfer
|
||||
|
||||
USAGE:
|
||||
lotus client cancel-transfer [command options] [transferID]
|
||||
|
||||
CATEGORY:
|
||||
UTIL
|
||||
|
||||
OPTIONS:
|
||||
--peerid value narrow to transfer with specific peer
|
||||
--initiator specify only transfers where peer is/is not initiator (default: true)
|
||||
--cancel-timeout value time to wait for cancel to be sent to storage provider (default: 5s)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
## lotus msig
|
||||
```
|
||||
NAME:
|
||||
@ -1380,9 +870,8 @@ USAGE:
|
||||
lotus paych add-funds [command options] [fromAddress toAddress amount]
|
||||
|
||||
OPTIONS:
|
||||
--restart-retrievals restart stalled retrieval deals on this payment channel (default: true)
|
||||
--reserve mark funds as reserved (default: false)
|
||||
--help, -h show help
|
||||
--reserve mark funds as reserved (default: false)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus paych list
|
||||
|
5
go.mod
5
go.mod
@ -15,7 +15,6 @@ require (
|
||||
github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||
github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921
|
||||
github.com/buger/goterm v1.0.3
|
||||
github.com/chzyer/readline v1.5.1
|
||||
github.com/containerd/cgroups v1.1.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
@ -41,7 +40,6 @@ require (
|
||||
github.com/filecoin-project/go-crypto v0.0.1
|
||||
github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc8
|
||||
github.com/filecoin-project/go-fil-commcid v0.1.0
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0
|
||||
github.com/filecoin-project/go-fil-markets v1.28.3
|
||||
github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0
|
||||
github.com/filecoin-project/go-jsonrpc v0.4.0
|
||||
@ -124,7 +122,6 @@ require (
|
||||
github.com/multiformats/go-base32 v0.1.0
|
||||
github.com/multiformats/go-multiaddr v0.12.4
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1
|
||||
github.com/multiformats/go-multibase v0.2.0
|
||||
github.com/multiformats/go-multicodec v0.9.0
|
||||
github.com/multiformats/go-multihash v0.2.3
|
||||
github.com/multiformats/go-varint v0.0.7
|
||||
@ -199,6 +196,7 @@ require (
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect
|
||||
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.2 // indirect
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 // indirect
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect
|
||||
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
@ -279,6 +277,7 @@ require (
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multistream v0.5.0 // indirect
|
||||
github.com/nikkolasg/hexjson v0.1.0 // indirect
|
||||
github.com/nkovacs/streamquote v1.0.0 // indirect
|
||||
|
3
go.sum
3
go.sum
@ -152,8 +152,6 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/buger/goterm v1.0.3 h1:7V/HeAQHrzPk/U4BvyH2g9u+xbUW9nr4yRPyG59W4fM=
|
||||
github.com/buger/goterm v1.0.3/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
|
||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
@ -2108,7 +2106,6 @@ golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -1,147 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/sealiface"
|
||||
)
|
||||
|
||||
func TestBatchDealInput(t *testing.T) {
|
||||
//stm: @MINER_SECTOR_STATUS_001, @MINER_SECTOR_LIST_001
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
var (
|
||||
blockTime = 10 * time.Millisecond
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
dealStartEpoch = abi.ChainEpoch(2 << 12)
|
||||
)
|
||||
|
||||
run := func(piece, deals, expectSectors int) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
t.Logf("batchtest start")
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
publishPeriod := 10 * time.Second
|
||||
maxDealsPerMsg := uint64(deals)
|
||||
|
||||
// Set max deals per publish deals message to maxDealsPerMsg
|
||||
opts := kit.ConstructorOpts(node.Options(
|
||||
node.Override(
|
||||
new(*storageadapter.DealPublisher),
|
||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||
Period: publishPeriod,
|
||||
MaxDealsPerMsg: maxDealsPerMsg,
|
||||
})),
|
||||
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
|
||||
return func() (sealiface.Config, error) {
|
||||
cfg := config.DefaultStorageMiner()
|
||||
sc := modules.ToSealingConfig(cfg.Dealmaking, cfg.Sealing)
|
||||
sc.MaxWaitDealsSectors = 2
|
||||
sc.MaxSealingSectors = 1
|
||||
sc.MaxSealingSectorsForDeals = 3
|
||||
sc.AlwaysKeepUnsealedCopy = true
|
||||
sc.WaitDealsDelay = time.Hour
|
||||
sc.AggregateCommits = false
|
||||
|
||||
return sc, nil
|
||||
}, nil
|
||||
}),
|
||||
))
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts, kit.ThroughRPC())
|
||||
ens.InterconnectAll().BeginMining(blockTime)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("batchtest ask set")
|
||||
|
||||
checkNoPadding := func() {
|
||||
sl, err := miner.SectorsListNonGenesis(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Slice(sl, func(i, j int) bool {
|
||||
return sl[i] < sl[j]
|
||||
})
|
||||
|
||||
for _, snum := range sl {
|
||||
si, err := miner.SectorsStatus(ctx, snum, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
|
||||
|
||||
for _, deal := range si.Deals {
|
||||
if deal == 0 {
|
||||
fmt.Printf("sector %d had a padding piece!\n", snum)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Starts a deal and waits until it's published
|
||||
runDealTillSeal := func(rseed int) {
|
||||
res, _, _, err := kit.CreateImportFile(ctx, client, rseed, piece)
|
||||
require.NoError(t, err)
|
||||
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data.Root = res.Root
|
||||
dp.DealStartEpoch = dealStartEpoch
|
||||
|
||||
deal := dh.StartDeal(ctx, dp)
|
||||
dh.WaitDealSealed(ctx, deal, false, true, checkNoPadding)
|
||||
}
|
||||
|
||||
// Run maxDealsPerMsg deals in parallel
|
||||
done := make(chan struct{}, maxDealsPerMsg)
|
||||
for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ {
|
||||
rseed := rseed
|
||||
go func() {
|
||||
runDealTillSeal(rseed)
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
t.Logf("batchtest deals started")
|
||||
|
||||
// Wait for maxDealsPerMsg of the deals to be published
|
||||
for i := 0; i < int(maxDealsPerMsg); i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
t.Logf("batchtest deals published")
|
||||
|
||||
checkNoPadding()
|
||||
|
||||
t.Logf("batchtest no padding")
|
||||
|
||||
sl, err := miner.SectorsListNonGenesis(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(sl), expectSectors)
|
||||
|
||||
t.Logf("batchtest done")
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("4-p1600B", run(1600, 4, 4))
|
||||
t.Run("4-p513B", run(513, 4, 2))
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/cli/clicommands"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
// TestClient does a basic test to exercise the client CLI commands.
|
||||
func TestClient(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blockTime := 5 * time.Millisecond
|
||||
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
|
||||
ens.InterconnectAll().BeginMining(blockTime)
|
||||
kit.RunClientTest(t, clicommands.Commands, client)
|
||||
}
|
@ -1,51 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
func TestStorageDealMissingBlock(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||
ctx := context.Background()
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
client, miner, ens := kit.EnsembleMinimal(t,
|
||||
kit.MockProofs(),
|
||||
kit.SectorSize(512<<20), // 512MiB sectors.
|
||||
)
|
||||
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
client.WaitTillChain(ctx, kit.HeightAtLeast(5))
|
||||
|
||||
res, _ := client.CreateImportFile(ctx, 0, 64<<20) // 64MiB file.
|
||||
list, err := client.ClientListImports(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 1)
|
||||
require.Equal(t, res.Root, *list[0].Root)
|
||||
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data.Root = res.Root
|
||||
dp.FastRetrieval = true
|
||||
dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price.
|
||||
deal := dh.StartDeal(ctx, dp)
|
||||
|
||||
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||
}
|
@ -1,162 +0,0 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dag "github.com/ipfs/boxo/ipld/merkledag"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipldcbor "github.com/ipfs/go-ipld-cbor"
|
||||
format "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/ipld/go-car/v2/blockstore"
|
||||
selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
func TestDealRetrieveByAnyCid(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
startEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
// Override the dependency injection for the blockstore accessor, so that
|
||||
// we can get a reference to the blockstore containing our deal later in
|
||||
// the test
|
||||
var bsa storagemarket.BlockstoreAccessor
|
||||
bsaFn := func(importmgr dtypes.ClientImportMgr) storagemarket.BlockstoreAccessor {
|
||||
bsa = modules.StorageBlockstoreAccessor(importmgr)
|
||||
return bsa
|
||||
}
|
||||
bsaOpt := kit.ConstructorOpts(node.Override(new(storagemarket.BlockstoreAccessor), bsaFn))
|
||||
|
||||
// Allow 8MB sectors
|
||||
eightMBSectorsOpt := kit.SectorSize(8 << 20)
|
||||
|
||||
// Create a client, and a miner with its own full node
|
||||
_, client, miner, ens := kit.EnsembleTwoOne(t, kit.MockProofs(), bsaOpt, eightMBSectorsOpt)
|
||||
ens.InterconnectAll().BeginMining(250 * time.Millisecond)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
// Generate a DAG with multiple levels, so that we can test the case where
|
||||
// the client requests a CID for a block which is not the root block but
|
||||
// does have a subtree below it in the DAG
|
||||
dagOpts := kit.GeneratedDAGOpts{
|
||||
// Max size of a block
|
||||
ChunkSize: 1024,
|
||||
// Max links from a block to other blocks
|
||||
Maxlinks: 10,
|
||||
}
|
||||
carv1FilePath, _ := kit.CreateRandomCARv1(t, 5, 100*1024, dagOpts)
|
||||
res, err := client.ClientImport(ctx, api.FileRef{Path: carv1FilePath, IsCAR: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the blockstore for the file
|
||||
bs, err := bsa.Get(res.Root)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get all CIDs from the file
|
||||
sc := car.NewSelectiveCar(ctx, bs, []car.Dag{{Root: res.Root, Selector: selectorparse.CommonSelector_ExploreAllRecursively}})
|
||||
prepared, err := sc.Prepare()
|
||||
require.NoError(t, err)
|
||||
|
||||
reg := format.Registry{}
|
||||
reg.Register(cid.DagProtobuf, dag.DecodeProtobufBlock)
|
||||
reg.Register(cid.DagCBOR, ipldcbor.DecodeBlock)
|
||||
reg.Register(cid.Raw, dag.DecodeRawBlock)
|
||||
|
||||
cids := prepared.Cids()
|
||||
for i, c := range cids {
|
||||
blk, err := bs.Get(ctx, c)
|
||||
require.NoError(t, err)
|
||||
|
||||
nd, err := reg.Decode(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Log(i, c, len(nd.Links()))
|
||||
}
|
||||
|
||||
// Create a storage deal
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data.Root = res.Root
|
||||
dp.DealStartEpoch = startEpoch
|
||||
dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price
|
||||
dealCid := dh.StartDeal(ctx, dp)
|
||||
|
||||
// Wait for the deal to be sealed
|
||||
dh.WaitDealSealed(ctx, dealCid, false, false, nil)
|
||||
|
||||
ask, err := miner.MarketGetRetrievalAsk(ctx)
|
||||
require.NoError(t, err)
|
||||
ask.PricePerByte = abi.NewTokenAmount(0)
|
||||
ask.UnsealPrice = abi.NewTokenAmount(0)
|
||||
err = miner.MarketSetRetrievalAsk(ctx, ask)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fetch the deal data
|
||||
info, err := client.ClientGetDealInfo(ctx, *dealCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make retrievals against CIDs at different levels in the DAG
|
||||
cidIndices := []int{1, 11, 27, 32, 47}
|
||||
for _, val := range cidIndices {
|
||||
t.Logf("performing retrieval for cid at index %d", val)
|
||||
|
||||
targetCid := cids[val]
|
||||
offer, err := client.ClientMinerQueryOffer(ctx, miner.ActorAddr, targetCid, &info.PieceCID)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, offer.Err)
|
||||
|
||||
// retrieve in a CAR file and ensure roots match
|
||||
outputCar := dh.PerformRetrieval(ctx, dealCid, targetCid, true, offer)
|
||||
_, err = os.Stat(outputCar)
|
||||
require.NoError(t, err)
|
||||
f, err := os.Open(outputCar)
|
||||
require.NoError(t, err)
|
||||
ch, err := car.ReadHeader(bufio.NewReader(f))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, ch.Roots[0], targetCid)
|
||||
require.NoError(t, f.Close())
|
||||
|
||||
// create CAR from original file starting at targetCid and ensure it matches the retrieved CAR file.
|
||||
tmp, err := os.CreateTemp(t.TempDir(), "randcarv1")
|
||||
require.NoError(t, err)
|
||||
rd, err := blockstore.OpenReadOnly(carv1FilePath, blockstore.UseWholeCIDs(true))
|
||||
require.NoError(t, err)
|
||||
err = car.NewSelectiveCar(
|
||||
ctx,
|
||||
rd,
|
||||
[]car.Dag{{
|
||||
Root: targetCid,
|
||||
Selector: selectorparse.CommonSelector_ExploreAllRecursively,
|
||||
}},
|
||||
).Write(tmp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, tmp.Close())
|
||||
require.NoError(t, rd.Close())
|
||||
|
||||
kit.AssertFilesEqual(t, tmp.Name(), outputCar)
|
||||
t.Log("car files match")
|
||||
}
|
||||
}
|
@ -1,212 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
provider "github.com/ipni/index-provider"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/shared_testutil"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
// TestDealWithMarketAndMinerNode is running concurrently a number of storage and retrieval deals towards a miner
|
||||
// architecture where the `mining/sealing/proving` node is a separate process from the `markets` node
|
||||
func TestDealWithMarketAndMinerNode(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
t.Skip("skipping due to flakiness: see #6956")
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
startEpoch := abi.ChainEpoch(8 << 10)
|
||||
|
||||
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
|
||||
api.RunningNodeType = api.NodeMiner // TODO(anteva): fix me
|
||||
|
||||
idxProv := shared_testutil.NewMockIndexProvider()
|
||||
idxProvOpt := kit.ConstructorOpts(node.Override(new(provider.Interface), idxProv))
|
||||
client, main, market, _ := kit.EnsembleWithMinerAndMarketNodes(t, kit.ThroughRPC(), idxProvOpt)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, main, market)
|
||||
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
|
||||
N: n,
|
||||
FastRetrieval: fastRetrieval,
|
||||
CarExport: carExport,
|
||||
StartEpoch: startEpoch,
|
||||
IndexProvider: idxProv,
|
||||
})
|
||||
}
|
||||
|
||||
// this test is expensive because we don't use mock proofs; do a single cycle.
|
||||
cycles := []int{4}
|
||||
for _, n := range cycles {
|
||||
n := n
|
||||
ns := fmt.Sprintf("%d", n)
|
||||
t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) })
|
||||
t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) })
|
||||
t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, false, true) })
|
||||
t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) })
|
||||
}
|
||||
}
|
||||
|
||||
func TestDealCyclesConcurrent(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
startEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) {
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||
ens.InterconnectAll().BeginMining(250 * time.Millisecond)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
|
||||
N: n,
|
||||
FastRetrieval: fastRetrieval,
|
||||
CarExport: carExport,
|
||||
StartEpoch: startEpoch,
|
||||
})
|
||||
}
|
||||
|
||||
// this test is cheap because we use mock proofs, do various cycles
|
||||
cycles := []int{2, 4, 8, 16}
|
||||
for _, n := range cycles {
|
||||
n := n
|
||||
ns := fmt.Sprintf("%d", n)
|
||||
t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) })
|
||||
t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) })
|
||||
t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, false, true) })
|
||||
t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) })
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimultanenousTransferLimit(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
t.Skip("skipping as flaky #7152")
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
startEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
const (
|
||||
graphsyncThrottle = 2
|
||||
concurrency = 20
|
||||
)
|
||||
runTest := func(t *testing.T) {
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts(
|
||||
node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle, 0, graphsyncThrottle))),
|
||||
node.Override(new(dtypes.Graphsync), modules.Graphsync(graphsyncThrottle, graphsyncThrottle)),
|
||||
))
|
||||
ens.InterconnectAll().BeginMining(250 * time.Millisecond)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
du, err := miner.MarketDataTransferUpdates(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
var maxOngoing int
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
ongoing := map[datatransfer.TransferID]struct{}{}
|
||||
|
||||
for {
|
||||
select {
|
||||
case u := <-du:
|
||||
t.Logf("%d - %s", u.TransferID, datatransfer.Statuses[u.Status])
|
||||
if u.Status == datatransfer.Ongoing && u.Transferred > 0 {
|
||||
ongoing[u.TransferID] = struct{}{}
|
||||
} else {
|
||||
delete(ongoing, u.TransferID)
|
||||
}
|
||||
|
||||
if len(ongoing) > maxOngoing {
|
||||
maxOngoing = len(ongoing)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
t.Logf("running concurrent deals: %d", concurrency)
|
||||
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
|
||||
N: concurrency,
|
||||
FastRetrieval: true,
|
||||
StartEpoch: startEpoch,
|
||||
})
|
||||
|
||||
t.Logf("all deals finished")
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
// The eventing systems across go-data-transfer and go-graphsync
|
||||
// are racy, and that's why we can't enforce graphsyncThrottle exactly,
|
||||
// without making this test racy.
|
||||
//
|
||||
// Essentially what could happen is that the graphsync layer starts the
|
||||
// next transfer before the go-data-transfer FSM has the opportunity to
|
||||
// move the previously completed transfer to the next stage, thus giving
|
||||
// the appearance that more than graphsyncThrottle transfers are
|
||||
// in progress.
|
||||
//
|
||||
// Concurrency (20) is x10 higher than graphsyncThrottle (2), so if all
|
||||
// 20 transfers are not happening at once, we know the throttle is
|
||||
// in effect. Thus we are a little bit lenient here to account for the
|
||||
// above races and allow up to graphsyncThrottle*2.
|
||||
require.LessOrEqual(t, maxOngoing, graphsyncThrottle*2)
|
||||
}
|
||||
|
||||
runTest(t)
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
func TestMaxStagingDeals(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||
ctx := context.Background()
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
client, miner, ens := kit.EnsembleMinimal(t,
|
||||
kit.MockProofs(),
|
||||
kit.WithMaxStagingDealsBytes(8192), // max 8KB staging deals
|
||||
kit.SectorSize(512<<20), // 512MiB sectors.
|
||||
)
|
||||
ens.InterconnectAll().BeginMining(200 * time.Millisecond)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
client.WaitTillChain(ctx, kit.HeightAtLeast(5))
|
||||
|
||||
res, _ := client.CreateImportFile(ctx, 0, 8192) // 8KB file
|
||||
list, err := client.ClientListImports(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 1)
|
||||
|
||||
res2, _ := client.CreateImportFile(ctx, 0, 4096)
|
||||
list, err = client.ClientListImports(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 2)
|
||||
|
||||
// first deal stays in staging area, and is not yet passed to the sealing subsystem
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data.Root = res.Root
|
||||
dp.FastRetrieval = true
|
||||
dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price.
|
||||
deal := dh.StartDeal(ctx, dp)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// expecting second deal to fail since staging area is full
|
||||
dp.Data.Root = res2.Root
|
||||
dp.FastRetrieval = true
|
||||
dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price.
|
||||
deal2 := dh.StartDeal(ctx, dp)
|
||||
|
||||
_ = deal
|
||||
|
||||
err = dh.ExpectDealFailure(ctx, deal2, "cannot accept deal as miner is overloaded at the moment")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
@ -1,107 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||
commp "github.com/filecoin-project/go-fil-commp-hashhash"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
func TestOfflineDealFlow(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
//stm: @CLIENT_DATA_CALCULATE_COMMP_001, @CLIENT_DATA_GENERATE_CAR_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001
|
||||
runTest := func(t *testing.T, fastRet bool, upscale abi.PaddedPieceSize) {
|
||||
ctx := context.Background()
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems()) // no mock proofs
|
||||
ens.InterconnectAll().BeginMining(250 * time.Millisecond)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
// Create a random file and import on the client.
|
||||
res, inFile := client.CreateImportFile(ctx, 1, 200)
|
||||
|
||||
// Get the piece size and commP
|
||||
rootCid := res.Root
|
||||
pieceInfo, err := client.ClientDealPieceCID(ctx, rootCid)
|
||||
require.NoError(t, err)
|
||||
t.Log("FILE CID:", rootCid)
|
||||
|
||||
// test whether padding works as intended
|
||||
if upscale > 0 {
|
||||
newRawCp, err := commp.PadCommP(
|
||||
pieceInfo.PieceCID.Hash()[len(pieceInfo.PieceCID.Hash())-32:],
|
||||
uint64(pieceInfo.PieceSize),
|
||||
uint64(upscale),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
pieceInfo.PieceSize = upscale
|
||||
pieceInfo.PieceCID, err = commcid.DataCommitmentV1ToCID(newRawCp)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.DealStartEpoch = abi.ChainEpoch(4 << 10)
|
||||
dp.FastRetrieval = fastRet
|
||||
// Replace with params for manual storage deal (offline deal)
|
||||
dp.Data = &storagemarket.DataRef{
|
||||
TransferType: storagemarket.TTManual,
|
||||
Root: rootCid,
|
||||
PieceCid: &pieceInfo.PieceCID,
|
||||
PieceSize: pieceInfo.PieceSize.Unpadded(),
|
||||
}
|
||||
|
||||
proposalCid := dh.StartDeal(ctx, dp)
|
||||
|
||||
//stm: @CLIENT_STORAGE_DEALS_GET_001
|
||||
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
||||
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||
require.NoError(t, err)
|
||||
require.Eventually(t, func() bool {
|
||||
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||
return cd.State == storagemarket.StorageDealCheckForAcceptance
|
||||
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
|
||||
|
||||
// Create a CAR file from the raw file
|
||||
carFileDir := t.TempDir()
|
||||
carFilePath := filepath.Join(carFileDir, "out.car")
|
||||
err = client.ClientGenCar(ctx, lapi.FileRef{Path: inFile}, carFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Import the CAR file on the miner - this is the equivalent to
|
||||
// transferring the file across the wire in a normal (non-offline) deal
|
||||
err = miner.DealsImportData(ctx, *proposalCid, carFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the deal to be published
|
||||
dh.WaitDealPublished(ctx, proposalCid)
|
||||
|
||||
t.Logf("deal published, retrieving")
|
||||
|
||||
// Retrieve the deal
|
||||
outFile := dh.PerformRetrieval(ctx, proposalCid, rootCid, false)
|
||||
|
||||
kit.AssertFilesEqual(t, inFile, outFile)
|
||||
|
||||
}
|
||||
|
||||
t.Run("stdretrieval", func(t *testing.T) { runTest(t, false, 0) })
|
||||
t.Run("fastretrieval", func(t *testing.T) { runTest(t, true, 0) })
|
||||
t.Run("fastretrieval", func(t *testing.T) { runTest(t, true, 1024) })
|
||||
}
|
@ -1,84 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||
commp "github.com/filecoin-project/go-fil-commp-hashhash"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
func TestDealPadding(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
//stm: @CLIENT_DATA_GET_DEAL_PIECE_CID_001
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
var blockTime = 250 * time.Millisecond
|
||||
startEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs.
|
||||
ens.InterconnectAll().BeginMining(blockTime)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
ctx := context.Background()
|
||||
client.WaitTillChain(ctx, kit.BlocksMinedByAll(miner.ActorAddr))
|
||||
|
||||
// Create a random file, would originally be a 256-byte sector
|
||||
res, inFile := client.CreateImportFile(ctx, 1, 200)
|
||||
|
||||
// Get the piece size and commP
|
||||
pieceInfo, err := client.ClientDealPieceCID(ctx, res.Root)
|
||||
require.NoError(t, err)
|
||||
t.Log("FILE CID:", res.Root)
|
||||
|
||||
runTest := func(t *testing.T, upscale abi.PaddedPieceSize) {
|
||||
// test whether padding works as intended
|
||||
newRawCp, err := commp.PadCommP(
|
||||
pieceInfo.PieceCID.Hash()[len(pieceInfo.PieceCID.Hash())-32:],
|
||||
uint64(pieceInfo.PieceSize),
|
||||
uint64(upscale),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
pcid, err := commcid.DataCommitmentV1ToCID(newRawCp)
|
||||
require.NoError(t, err)
|
||||
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data.Root = res.Root
|
||||
dp.Data.PieceCid = &pcid
|
||||
dp.Data.PieceSize = upscale.Unpadded()
|
||||
dp.DealStartEpoch = startEpoch
|
||||
proposalCid := dh.StartDeal(ctx, dp)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
|
||||
//stm: @CLIENT_STORAGE_DEALS_GET_001
|
||||
di, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||
require.NoError(t, err)
|
||||
require.True(t, di.PieceCID.Equals(pcid))
|
||||
|
||||
dh.WaitDealSealed(ctx, proposalCid, false, false, nil)
|
||||
|
||||
// Retrieve the deal
|
||||
outFile := dh.PerformRetrieval(ctx, proposalCid, res.Root, false)
|
||||
|
||||
kit.AssertFilesEqual(t, inFile, outFile)
|
||||
}
|
||||
|
||||
t.Run("padQuarterSector", func(t *testing.T) { runTest(t, 512) })
|
||||
t.Run("padHalfSector", func(t *testing.T) { runTest(t, 1024) })
|
||||
t.Run("padFullSector", func(t *testing.T) { runTest(t, 2048) })
|
||||
}
|
@ -1,267 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipld/go-car"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
api0 "github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
// please talk to @ribasushi or @mikeal before modifying these test: there are
|
||||
// downstream dependencies on ADL-less operation
|
||||
var (
|
||||
adlFixtureCar = "fixtures/adl_test.car"
|
||||
adlFixtureRoot, _ = cid.Parse("bafybeiaigxwanoxyeuzyiknhrg6io6kobfbm37ozcips6qdwumub2gaomy")
|
||||
adlFixtureCommp, _ = cid.Parse("baga6ea4seaqjnmnrv4qsfz2rnda54mvo5al22dwpguhn2pmep63gl7bbqqqraai")
|
||||
adlFixturePieceSize = abi.PaddedPieceSize(1024)
|
||||
dmSelector = api.Selector("Links/0/Hash")
|
||||
dmTextSelector = textselector.Expression(dmSelector)
|
||||
dmExpectedResult = "NO ADL"
|
||||
dmExpectedCarBlockCount = 4
|
||||
dmDagSpec = []api.DagSpec{{DataSelector: &dmSelector, ExportMerkleProof: true}}
|
||||
)
|
||||
|
||||
func TestDMLevelPartialRetrieval(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
//stm: @CLIENT_RETRIEVAL_RETRIEVE_001, @CLIENT_RETRIEVAL_FIND_001
|
||||
ctx := context.Background()
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC())
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
ens.InterconnectAll().BeginMiningMustPost(50 * time.Millisecond)
|
||||
|
||||
_, err := client.ClientImport(ctx, api.FileRef{Path: adlFixtureCar, IsCAR: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
caddr, err := client.WalletDefaultAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
//
|
||||
// test retrieval from local car 1st
|
||||
require.NoError(t, testDMExportAsCar(
|
||||
ctx, client, api.ExportRef{
|
||||
FromLocalCAR: adlFixtureCar,
|
||||
Root: adlFixtureRoot,
|
||||
DAGs: dmDagSpec,
|
||||
}, t.TempDir(),
|
||||
))
|
||||
require.NoError(t, testDMExportAsFile(
|
||||
ctx, client, api.ExportRef{
|
||||
FromLocalCAR: adlFixtureCar,
|
||||
Root: adlFixtureRoot,
|
||||
DAGs: dmDagSpec,
|
||||
}, t.TempDir(),
|
||||
))
|
||||
|
||||
//
|
||||
// ensure V0 continues functioning as expected
|
||||
require.NoError(t, tesV0RetrievalAsCar(
|
||||
ctx, client, api0.RetrievalOrder{
|
||||
FromLocalCAR: adlFixtureCar,
|
||||
Root: adlFixtureRoot,
|
||||
DatamodelPathSelector: &dmTextSelector,
|
||||
}, t.TempDir(),
|
||||
))
|
||||
require.NoError(t, testV0RetrievalAsFile(
|
||||
ctx, client, api0.RetrievalOrder{
|
||||
FromLocalCAR: adlFixtureCar,
|
||||
Root: adlFixtureRoot,
|
||||
DatamodelPathSelector: &dmTextSelector,
|
||||
}, t.TempDir(),
|
||||
))
|
||||
|
||||
//
|
||||
// now perform a storage/retrieval deal as well, and retest
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data = &storagemarket.DataRef{
|
||||
Root: adlFixtureRoot,
|
||||
PieceCid: &adlFixtureCommp,
|
||||
PieceSize: adlFixturePieceSize.Unpadded(),
|
||||
}
|
||||
proposalCid := dh.StartDeal(ctx, dp)
|
||||
|
||||
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
||||
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||
require.NoError(t, err)
|
||||
require.Eventually(t, func() bool {
|
||||
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||
return cd.State == storagemarket.StorageDealCheckForAcceptance
|
||||
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
|
||||
|
||||
dh.WaitDealSealed(ctx, proposalCid, false, false, nil)
|
||||
|
||||
offers, err := client.ClientFindData(ctx, adlFixtureRoot, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, offers, "no offers")
|
||||
|
||||
retOrder := offers[0].Order(caddr)
|
||||
retOrder.DataSelector = &dmSelector
|
||||
|
||||
rr, err := client.ClientRetrieve(ctx, retOrder)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.ClientRetrieveWait(ctx, rr.DealID)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, testDMExportAsCar(
|
||||
ctx, client, api.ExportRef{
|
||||
DealID: rr.DealID,
|
||||
Root: adlFixtureRoot,
|
||||
DAGs: dmDagSpec,
|
||||
}, t.TempDir(),
|
||||
))
|
||||
require.NoError(t, testDMExportAsFile(
|
||||
ctx, client, api.ExportRef{
|
||||
DealID: rr.DealID,
|
||||
Root: adlFixtureRoot,
|
||||
DAGs: dmDagSpec,
|
||||
}, t.TempDir(),
|
||||
))
|
||||
|
||||
}
|
||||
|
||||
func testDMExportAsFile(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error {
|
||||
out := tempDir + string(os.PathSeparator) + "exp-test" + expDirective.Root.String()
|
||||
|
||||
fileDest := api.FileRef{
|
||||
Path: out,
|
||||
}
|
||||
err := client.ClientExport(ctx, expDirective, fileDest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Open(out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
return validateDMUnixFile(f)
|
||||
}
|
||||
func testV0RetrievalAsFile(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error {
|
||||
out := tempDir + string(os.PathSeparator) + "exp-test" + retOrder.Root.String()
|
||||
|
||||
cv0 := &api0.WrapperV1Full{FullNode: client.FullNode}
|
||||
err := cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{
|
||||
Path: out,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Open(out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
return validateDMUnixFile(f)
|
||||
}
|
||||
func validateDMUnixFile(r io.Reader) error {
|
||||
data, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if string(data) != dmExpectedResult {
|
||||
return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testDMExportAsCar(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error {
|
||||
out, err := os.CreateTemp(tempDir, "exp-test")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close() //nolint:errcheck
|
||||
|
||||
carDest := api.FileRef{
|
||||
IsCAR: true,
|
||||
Path: out.Name(),
|
||||
}
|
||||
err = client.ClientExport(ctx, expDirective, carDest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return validateDMCar(out)
|
||||
}
|
||||
func tesV0RetrievalAsCar(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error {
|
||||
out, err := os.CreateTemp(tempDir, "exp-test")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close() //nolint:errcheck
|
||||
|
||||
cv0 := &api0.WrapperV1Full{FullNode: client.FullNode}
|
||||
err = cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{
|
||||
Path: out.Name(),
|
||||
IsCAR: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return validateDMCar(out)
|
||||
}
|
||||
func validateDMCar(r io.Reader) error {
|
||||
cr, err := car.NewCarReader(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cr.Header.Roots) != 1 {
|
||||
return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots))
|
||||
} else if cr.Header.Roots[0].String() != adlFixtureRoot.String() {
|
||||
return fmt.Errorf("expected root cid '%s', got '%s'", adlFixtureRoot.String(), cr.Header.Roots[0].String())
|
||||
}
|
||||
|
||||
blks := make([]blocks.Block, 0)
|
||||
for {
|
||||
b, err := cr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blks = append(blks, b)
|
||||
}
|
||||
|
||||
if len(blks) != dmExpectedCarBlockCount {
|
||||
return fmt.Errorf("expected a car file with %d blocks, got one with %d instead", dmExpectedCarBlockCount, len(blks))
|
||||
}
|
||||
|
||||
data := fmt.Sprintf("%s%s", blks[2].RawData(), blks[3].RawData())
|
||||
if data != dmExpectedResult {
|
||||
return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,256 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
// use the mainnet carfile as text fixture: it will always be here
|
||||
// https://dweb.link/ipfs/bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2/8/1/8/1/0/1/0
|
||||
var (
|
||||
sourceCar = "../build/genesis/mainnet.car"
|
||||
carRoot, _ = cid.Parse("bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2")
|
||||
carCommp, _ = cid.Parse("baga6ea4seaqmrivgzei3fmx5qxtppwankmtou6zvigyjaveu3z2zzwhysgzuina")
|
||||
selectedCid, _ = cid.Parse("bafkqaetgnfwc6mjpon2g64tbm5sxa33xmvza")
|
||||
carPieceSize = abi.PaddedPieceSize(2097152)
|
||||
textSelector = api.Selector("8/1/8/1/0/1/0")
|
||||
textSelectorNonLink = api.Selector("8/1/8/1/0/1")
|
||||
textSelectorNonexistent = api.Selector("42")
|
||||
expectedResult = "fil/1/storagepower"
|
||||
)
|
||||
|
||||
func TestPartialRetrieval(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
//stm: @CLIENT_RETRIEVAL_RETRIEVE_001
|
||||
ctx := context.Background()
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MockProofs(), kit.SectorSize(512<<20))
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||
|
||||
_, err := client.ClientImport(ctx, api.FileRef{Path: sourceCar, IsCAR: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
caddr, err := client.WalletDefaultAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// first test retrieval from local car, then do an actual deal
|
||||
for _, exportMerkleProof := range []bool{false, true} {
|
||||
for _, fullCycle := range []bool{false, true} {
|
||||
|
||||
var retOrder api.RetrievalOrder
|
||||
var eref api.ExportRef
|
||||
|
||||
if !fullCycle {
|
||||
eref.FromLocalCAR = sourceCar
|
||||
} else {
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data = &storagemarket.DataRef{
|
||||
// FIXME: figure out how to do this with an online partial transfer
|
||||
TransferType: storagemarket.TTManual,
|
||||
Root: carRoot,
|
||||
PieceCid: &carCommp,
|
||||
PieceSize: carPieceSize.Unpadded(),
|
||||
}
|
||||
proposalCid := dh.StartDeal(ctx, dp)
|
||||
|
||||
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
|
||||
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||
require.NoError(t, err)
|
||||
require.Eventually(t, func() bool {
|
||||
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
|
||||
return cd.State == storagemarket.StorageDealCheckForAcceptance
|
||||
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
|
||||
|
||||
err = miner.DealsImportData(ctx, *proposalCid, sourceCar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the deal to be published, we should be able to start retrieval right away
|
||||
dh.WaitDealPublished(ctx, proposalCid)
|
||||
|
||||
offers, err := client.ClientFindData(ctx, carRoot, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, offers, "no offers")
|
||||
|
||||
retOrder = offers[0].Order(caddr)
|
||||
}
|
||||
|
||||
retOrder.DataSelector = &textSelector
|
||||
eref.DAGs = append(eref.DAGs, api.DagSpec{
|
||||
DataSelector: &textSelector,
|
||||
ExportMerkleProof: exportMerkleProof,
|
||||
})
|
||||
eref.Root = carRoot
|
||||
|
||||
// test retrieval of either data or constructing a partial selective-car
|
||||
for _, retrieveAsCar := range []bool{false, true} {
|
||||
outFile := t.TempDir() + string(os.PathSeparator) + "ret-file" + retOrder.Root.String()
|
||||
|
||||
require.NoError(t, testGenesisRetrieval(
|
||||
ctx,
|
||||
client,
|
||||
retOrder,
|
||||
eref,
|
||||
&api.FileRef{
|
||||
Path: outFile,
|
||||
IsCAR: retrieveAsCar,
|
||||
},
|
||||
))
|
||||
|
||||
// UGH if I do not sleep here, I get things like:
|
||||
/*
|
||||
retrieval failed: Retrieve failed: there is an active retrieval deal with peer 12D3KooWK9fB9a3HZ4PQLVmEQ6pweMMn5CAyKtumB71CPTnuBDi6 for payload CID bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2 (retrieval deal ID 1631259332180384709, state DealStatusFinalizingBlockstore) - existing deal must be cancelled before starting a new retrieval deal:
|
||||
github.com/filecoin-project/lotus/node/impl/client.(*API).ClientRetrieve
|
||||
/home/circleci/project/node/impl/client/client.go:774
|
||||
*/
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ensure non-existent paths fail
|
||||
require.EqualError(
|
||||
t,
|
||||
testGenesisRetrieval(
|
||||
ctx,
|
||||
client,
|
||||
api.RetrievalOrder{
|
||||
Root: carRoot,
|
||||
DataSelector: &textSelectorNonexistent,
|
||||
},
|
||||
api.ExportRef{
|
||||
Root: carRoot,
|
||||
FromLocalCAR: sourceCar,
|
||||
DAGs: []api.DagSpec{{DataSelector: &textSelectorNonexistent}},
|
||||
},
|
||||
&api.FileRef{},
|
||||
),
|
||||
fmt.Sprintf("parsing dag spec: path selection does not match a node within %s", carRoot),
|
||||
)
|
||||
|
||||
// ensure non-boundary retrievals fail
|
||||
require.EqualError(
|
||||
t,
|
||||
testGenesisRetrieval(
|
||||
ctx,
|
||||
client,
|
||||
api.RetrievalOrder{
|
||||
Root: carRoot,
|
||||
DataSelector: &textSelectorNonLink,
|
||||
},
|
||||
api.ExportRef{
|
||||
Root: carRoot,
|
||||
FromLocalCAR: sourceCar,
|
||||
DAGs: []api.DagSpec{{DataSelector: &textSelectorNonLink}},
|
||||
},
|
||||
&api.FileRef{},
|
||||
),
|
||||
fmt.Sprintf("parsing dag spec: error while locating partial retrieval sub-root: unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", textSelectorNonLink),
|
||||
)
|
||||
}
|
||||
|
||||
func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrder api.RetrievalOrder, eref api.ExportRef, retRef *api.FileRef) error {
|
||||
|
||||
if retOrder.Total.Nil() {
|
||||
retOrder.Total = big.Zero()
|
||||
}
|
||||
if retOrder.UnsealPrice.Nil() {
|
||||
retOrder.UnsealPrice = big.Zero()
|
||||
}
|
||||
|
||||
if eref.FromLocalCAR == "" {
|
||||
rr, err := client.ClientRetrieve(ctx, retOrder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eref.DealID = rr.DealID
|
||||
|
||||
if err := client.ClientRetrieveWait(ctx, rr.DealID); err != nil {
|
||||
return xerrors.Errorf("retrieval wait: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := client.ClientExport(ctx, eref, *retRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outFile, err := os.Open(retRef.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer outFile.Close() //nolint:errcheck
|
||||
|
||||
var data []byte
|
||||
if !retRef.IsCAR {
|
||||
|
||||
data, err = io.ReadAll(outFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
cr, err := car.NewCarReader(outFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cr.Header.Roots) != 1 {
|
||||
return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots))
|
||||
} else if eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != carRoot.String() {
|
||||
return fmt.Errorf("expected root cid '%s', got '%s'", carRoot.String(), cr.Header.Roots[0].String())
|
||||
} else if !eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != selectedCid.String() {
|
||||
return fmt.Errorf("expected root cid '%s', got '%s'", selectedCid.String(), cr.Header.Roots[0].String())
|
||||
}
|
||||
|
||||
blks := make([]blocks.Block, 0)
|
||||
for {
|
||||
b, err := cr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blks = append(blks, b)
|
||||
}
|
||||
|
||||
if (eref.DAGs[0].ExportMerkleProof && len(blks) != 3) || (!eref.DAGs[0].ExportMerkleProof && len(blks) != 1) {
|
||||
return fmt.Errorf("expected a car file with 3/1 blocks, got one with %d instead", len(blks))
|
||||
}
|
||||
|
||||
data = blks[len(blks)-1].RawData()
|
||||
}
|
||||
|
||||
if string(data) != expectedResult {
|
||||
return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", expectedResult, data)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
func TestFirstDealEnablesMining(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
// test making a deal with a fresh miner, and see if it starts to mine.
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
var (
|
||||
client kit.TestFullNode
|
||||
genMiner kit.TestMiner // bootstrap
|
||||
provider kit.TestMiner // no sectors, will need to create one
|
||||
)
|
||||
|
||||
ens := kit.NewEnsemble(t, kit.MockProofs())
|
||||
ens.FullNode(&client)
|
||||
ens.Miner(&genMiner, &client, kit.WithAllSubsystems())
|
||||
ens.Miner(&provider, &client, kit.WithAllSubsystems(), kit.PresealSectors(0))
|
||||
ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
dh := kit.NewDealHarness(t, &client, &provider, &provider)
|
||||
|
||||
ref, _ := client.CreateImportFile(ctx, 5, 0)
|
||||
|
||||
t.Log("FILE CID:", ref.Root)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// start a goroutine to monitor head changes from the client
|
||||
// once the provider has mined a block, thanks to the power acquired from the deal,
|
||||
// we pass the test.
|
||||
providerMined := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
_ = client.WaitTillChain(ctx, kit.BlocksMinedByAll(provider.ActorAddr))
|
||||
close(providerMined)
|
||||
}()
|
||||
|
||||
// now perform the deal.
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data.Root = ref.Root
|
||||
deal := dh.StartDeal(ctx, dp)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
|
||||
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||
|
||||
<-providerMined
|
||||
}
|
@ -1,150 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
var (
|
||||
ctx = context.Background()
|
||||
blocktime = 50 * time.Millisecond
|
||||
)
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
client, miner, ens := kit.EnsembleMinimal(t)
|
||||
ens.InterconnectAll().BeginMiningMustPost(blocktime)
|
||||
|
||||
var (
|
||||
ppb = int64(1)
|
||||
unsealPrice = int64(77)
|
||||
)
|
||||
|
||||
// Set unsealed price to non-zero
|
||||
ask, err := miner.MarketGetRetrievalAsk(ctx)
|
||||
require.NoError(t, err)
|
||||
ask.PricePerByte = abi.NewTokenAmount(ppb)
|
||||
ask.UnsealPrice = abi.NewTokenAmount(unsealPrice)
|
||||
err = miner.MarketSetRetrievalAsk(ctx, ask)
|
||||
require.NoError(t, err)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
deal1, res1, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
|
||||
|
||||
// one more storage deal for the same data
|
||||
_, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6})
|
||||
require.Equal(t, res1.Root, res2.Root)
|
||||
|
||||
//stm: @CLIENT_STORAGE_DEALS_GET_001
|
||||
// Retrieval
|
||||
dealInfo, err := client.ClientGetDealInfo(ctx, *deal1)
|
||||
require.NoError(t, err)
|
||||
|
||||
//stm: @CLIENT_RETRIEVAL_FIND_001
|
||||
// fetch quote -> zero for unsealed price since unsealed file already exists.
|
||||
offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, offers, 2)
|
||||
require.Equal(t, offers[0], offers[1])
|
||||
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
|
||||
require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64())
|
||||
|
||||
// remove ONLY one unsealed file
|
||||
//stm: @STORAGE_LIST_001, @MINER_SECTOR_LIST_001
|
||||
ss, err := miner.StorageList(context.Background())
|
||||
require.NoError(t, err)
|
||||
_, err = miner.SectorsListNonGenesis(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
//stm: @STORAGE_DROP_SECTOR_001, @STORAGE_LIST_001
|
||||
iLoop:
|
||||
for storeID, sd := range ss {
|
||||
for _, sector := range sd {
|
||||
err := miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed)
|
||||
require.NoError(t, err)
|
||||
break iLoop // remove ONLY one
|
||||
}
|
||||
}
|
||||
|
||||
//stm: @CLIENT_RETRIEVAL_FIND_001
|
||||
// get retrieval quote -> zero for unsealed price as unsealed file exists.
|
||||
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, offers, 2)
|
||||
require.Equal(t, offers[0], offers[1])
|
||||
require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64())
|
||||
require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64())
|
||||
|
||||
// remove the other unsealed file as well
|
||||
ss, err = miner.StorageList(context.Background())
|
||||
require.NoError(t, err)
|
||||
_, err = miner.SectorsListNonGenesis(ctx)
|
||||
require.NoError(t, err)
|
||||
for storeID, sd := range ss {
|
||||
for _, sector := range sd {
|
||||
require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed))
|
||||
}
|
||||
}
|
||||
|
||||
//stm: @CLIENT_RETRIEVAL_FIND_001
|
||||
// fetch quote -> non-zero for unseal price as we no more unsealed files.
|
||||
offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, offers, 2)
|
||||
require.Equal(t, offers[0], offers[1])
|
||||
require.Equal(t, uint64(unsealPrice), offers[0].UnsealPrice.Uint64())
|
||||
total := (dealInfo.Size * uint64(ppb)) + uint64(unsealPrice)
|
||||
require.Equal(t, total, offers[0].MinPrice.Uint64())
|
||||
}
|
||||
|
||||
func TestZeroPricePerByteRetrieval(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
var (
|
||||
blockTime = 10 * time.Millisecond
|
||||
startEpoch = abi.ChainEpoch(2 << 12)
|
||||
)
|
||||
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||
ens.InterconnectAll().BeginMiningMustPost(blockTime)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
ask, err := miner.MarketGetRetrievalAsk(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
ask.PricePerByte = abi.NewTokenAmount(0)
|
||||
err = miner.MarketSetRetrievalAsk(ctx, ask)
|
||||
require.NoError(t, err)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{
|
||||
N: 1,
|
||||
StartEpoch: startEpoch,
|
||||
})
|
||||
}
|
@ -1,143 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet/key"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/storage/ctladdr"
|
||||
)
|
||||
|
||||
func TestPublishDealsBatching(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
var (
|
||||
ctx = context.Background()
|
||||
publishPeriod = 10 * time.Second
|
||||
maxDealsPerMsg = uint64(2) // Set max deals per publish deals message to 2
|
||||
startEpoch = abi.ChainEpoch(2 << 12)
|
||||
)
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
publisherKey, err := key.GenerateKey(types.KTSecp256k1)
|
||||
require.NoError(t, err)
|
||||
|
||||
opts := node.Options(
|
||||
node.Override(new(*storageadapter.DealPublisher),
|
||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||
Period: publishPeriod,
|
||||
MaxDealsPerMsg: maxDealsPerMsg,
|
||||
}),
|
||||
),
|
||||
node.Override(new(*ctladdr.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{
|
||||
DealPublishControl: []string{
|
||||
publisherKey.Address.String(),
|
||||
},
|
||||
DisableOwnerFallback: true,
|
||||
DisableWorkerFallback: true,
|
||||
})),
|
||||
)
|
||||
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.Account(publisherKey, types.FromFil(10)), kit.MockProofs(), kit.ConstructorOpts(opts))
|
||||
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
|
||||
|
||||
_, err = client.WalletImport(ctx, &publisherKey.KeyInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
miner.SetControlAddresses(publisherKey.Address)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
// Starts a deal and waits until it's published
|
||||
runDealTillPublish := func(rseed int) {
|
||||
res, _ := client.CreateImportFile(ctx, rseed, 0)
|
||||
|
||||
upds, err := client.ClientGetDealUpdates(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data.Root = res.Root
|
||||
dp.DealStartEpoch = startEpoch
|
||||
dh.StartDeal(ctx, dp)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
for upd := range upds {
|
||||
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
|
||||
done <- struct{}{}
|
||||
}
|
||||
}
|
||||
}()
|
||||
<-done
|
||||
}
|
||||
|
||||
// Run three deals in parallel
|
||||
done := make(chan struct{}, maxDealsPerMsg+1)
|
||||
for rseed := 1; rseed <= 3; rseed++ {
|
||||
rseed := rseed
|
||||
go func() {
|
||||
runDealTillPublish(rseed)
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for two of the deals to be published
|
||||
for i := 0; i < int(maxDealsPerMsg); i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
// Expect a single PublishStorageDeals message that includes the first two deals
|
||||
//stm: @CHAIN_STATE_LIST_MESSAGES_001
|
||||
msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
|
||||
require.NoError(t, err)
|
||||
count := 0
|
||||
for _, msgCid := range msgCids {
|
||||
msg, err := client.ChainGetMessage(ctx, msgCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
if msg.Method == market.Methods.PublishStorageDeals {
|
||||
count++
|
||||
var pubDealsParams market2.PublishStorageDealsParams
|
||||
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
|
||||
require.Equal(t, publisherKey.Address.String(), msg.From.String())
|
||||
}
|
||||
}
|
||||
require.Equal(t, 1, count)
|
||||
|
||||
// The third deal should be published once the publish period expires.
|
||||
// Allow a little padding as it takes a moment for the state change to
|
||||
// be noticed by the client.
|
||||
padding := 10 * time.Second
|
||||
select {
|
||||
case <-time.After(publishPeriod + padding):
|
||||
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
|
||||
case <-done: // Success
|
||||
}
|
||||
}
|
@ -1,104 +0,0 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
func TestNetStoreRetrieval(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
|
||||
full, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
|
||||
ens.InterconnectAll().BeginMining(blocktime)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
rseed := 7
|
||||
|
||||
dh := kit.NewDealHarness(t, full, miner, miner)
|
||||
dealCid, res, _ := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{
|
||||
Rseed: rseed,
|
||||
StartEpoch: dealStartEpoch,
|
||||
UseCARFileForStorageDeal: true,
|
||||
})
|
||||
|
||||
// create deal store
|
||||
id := uuid.New()
|
||||
rstore := bstore.NewMemorySync()
|
||||
|
||||
au, err := url.Parse(full.ListenURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
switch au.Scheme {
|
||||
case "http":
|
||||
au.Scheme = "ws"
|
||||
case "https":
|
||||
au.Scheme = "wss"
|
||||
}
|
||||
|
||||
au.Path = path.Join(au.Path, "/rest/v0/store/"+id.String())
|
||||
|
||||
conn, _, err := websocket.DefaultDialer.Dial(au.String(), nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_ = bstore.HandleNetBstoreWS(ctx, rstore, conn)
|
||||
|
||||
dh.PerformRetrievalWithOrder(ctx, dealCid, res.Root, false, func(offer api.QueryOffer, address address.Address) api.RetrievalOrder {
|
||||
order := offer.Order(address)
|
||||
|
||||
order.RemoteStore = &id
|
||||
|
||||
return order
|
||||
})
|
||||
|
||||
// check blockstore blocks
|
||||
carv1FilePath, _ := kit.CreateRandomCARv1(t, rseed, 200)
|
||||
cb, err := os.ReadFile(carv1FilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
cr, err := car.NewCarReader(bytes.NewReader(cb))
|
||||
require.NoError(t, err)
|
||||
|
||||
var blocks int
|
||||
for {
|
||||
cb, err := cr.Next()
|
||||
if err == io.EOF {
|
||||
fmt.Println("blocks: ", blocks)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
sb, err := rstore.Get(ctx, cb.Cid())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, cb.RawData(), sb.RawData())
|
||||
|
||||
blocks++
|
||||
}
|
||||
}
|
@ -1,188 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet/key"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/storage/ctladdr"
|
||||
)
|
||||
|
||||
var (
|
||||
publishPeriod = 1 * time.Second
|
||||
maxDealsPerMsg = uint64(2) // Set max deals per publish deals message to 2
|
||||
|
||||
blockTime = 3 * time.Millisecond
|
||||
)
|
||||
|
||||
func TestDealsRetryLackOfFunds(t *testing.T) {
|
||||
t.Run("cover-gas", func(t *testing.T) {
|
||||
testDealsRetryLackOfFunds(t, types.NewInt(1020000000000))
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
testDealsRetryLackOfFunds(t, types.NewInt(1))
|
||||
})
|
||||
}
|
||||
|
||||
func testDealsRetryLackOfFunds(t *testing.T, publishStorageAccountFunds abi.TokenAmount) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||
ctx := context.Background()
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
// Allow 8MB sectors
|
||||
eightMBSectorsOpt := kit.SectorSize(8 << 20)
|
||||
|
||||
publishStorageDealKey, err := key.GenerateKey(types.KTSecp256k1)
|
||||
require.NoError(t, err)
|
||||
|
||||
opts := node.Options(
|
||||
node.Override(new(*storageadapter.DealPublisher),
|
||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||
Period: publishPeriod,
|
||||
MaxDealsPerMsg: maxDealsPerMsg,
|
||||
}),
|
||||
),
|
||||
node.Override(new(*ctladdr.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{
|
||||
DealPublishControl: []string{
|
||||
publishStorageDealKey.Address.String(),
|
||||
},
|
||||
DisableOwnerFallback: true,
|
||||
DisableWorkerFallback: true,
|
||||
})),
|
||||
)
|
||||
|
||||
minerFullNode, clientFullNode, miner, ens := kit.EnsembleTwoOne(t, kit.Account(publishStorageDealKey, publishStorageAccountFunds), kit.ConstructorOpts(opts), kit.MockProofs(), eightMBSectorsOpt)
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
ens.
|
||||
Start().
|
||||
InterconnectAll().
|
||||
BeginMining(blockTime)
|
||||
|
||||
_, err = minerFullNode.WalletImport(ctx, &publishStorageDealKey.KeyInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
miner.SetControlAddresses(publishStorageDealKey.Address)
|
||||
|
||||
dh := kit.NewDealHarness(t, clientFullNode, miner, miner)
|
||||
|
||||
res, _ := clientFullNode.CreateImportFile(ctx, 0, 4<<20) // 4MiB file.
|
||||
list, err := clientFullNode.ClientListImports(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 1)
|
||||
require.Equal(t, res.Root, *list[0].Root)
|
||||
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data.Root = res.Root
|
||||
dp.FastRetrieval = true
|
||||
dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price.
|
||||
deal := dh.StartDeal(ctx, dp)
|
||||
|
||||
propcid := *deal
|
||||
|
||||
go func() {
|
||||
time.Sleep(30 * time.Second)
|
||||
|
||||
kit.SendFunds(ctx, t, minerFullNode, publishStorageDealKey.Address, types.FromFil(1))
|
||||
|
||||
err := miner.MarketRetryPublishDeal(ctx, propcid)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||
}
|
||||
|
||||
func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||
ctx := context.Background()
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
// Allow 8MB sectors
|
||||
eightMBSectorsOpt := kit.SectorSize(8 << 20)
|
||||
|
||||
publishStorageDealKey, err := key.GenerateKey(types.KTSecp256k1)
|
||||
require.NoError(t, err)
|
||||
|
||||
opts := node.Options(
|
||||
node.Override(new(*storageadapter.DealPublisher),
|
||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
||||
Period: publishPeriod,
|
||||
MaxDealsPerMsg: maxDealsPerMsg,
|
||||
}),
|
||||
),
|
||||
node.Override(new(*ctladdr.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{
|
||||
DealPublishControl: []string{
|
||||
publishStorageDealKey.Address.String(),
|
||||
},
|
||||
DisableOwnerFallback: true,
|
||||
DisableWorkerFallback: true,
|
||||
})),
|
||||
)
|
||||
|
||||
publishStorageAccountFunds := types.NewInt(1020000000000)
|
||||
minerFullNode, clientFullNode, miner, ens := kit.EnsembleTwoOne(t, kit.Account(publishStorageDealKey, publishStorageAccountFunds), kit.ConstructorOpts(opts), kit.MockProofs(), eightMBSectorsOpt)
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
ens.
|
||||
Start().
|
||||
InterconnectAll().
|
||||
BeginMining(blockTime)
|
||||
|
||||
_, err = minerFullNode.WalletImport(ctx, &publishStorageDealKey.KeyInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
miner.SetControlAddresses(publishStorageDealKey.Address)
|
||||
|
||||
dh := kit.NewDealHarness(t, clientFullNode, miner, miner)
|
||||
|
||||
res, _ := clientFullNode.CreateImportFile(ctx, 0, 4<<20) // 4MiB file.
|
||||
list, err := clientFullNode.ClientListImports(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 1)
|
||||
require.Equal(t, res.Root, *list[0].Root)
|
||||
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data.Root = res.Root
|
||||
dp.FastRetrieval = true
|
||||
dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price.
|
||||
deal := dh.StartDeal(ctx, dp)
|
||||
|
||||
dealSealed := make(chan struct{})
|
||||
go func() {
|
||||
dh.WaitDealSealedQuiet(ctx, deal, false, false, nil)
|
||||
dealSealed <- struct{}{}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-dealSealed:
|
||||
t.Fatal("deal shouldn't have sealed")
|
||||
case <-time.After(time.Second * 15):
|
||||
}
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
func TestDealsWithSealingAndRPC(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs.
|
||||
ens.InterconnectAll().BeginMiningMustPost(250 * time.Millisecond)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
t.Run("stdretrieval", func(t *testing.T) {
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1})
|
||||
})
|
||||
|
||||
t.Run("fastretrieval", func(t *testing.T) {
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true})
|
||||
})
|
||||
|
||||
t.Run("fastretrieval-twodeals-sequential", func(t *testing.T) {
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true})
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true})
|
||||
})
|
||||
|
||||
t.Run("stdretrieval-carv1", func(t *testing.T) {
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, UseCARFileForStorageDeal: true})
|
||||
})
|
||||
|
||||
}
|
@ -24,7 +24,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/cli/clicommands"
|
||||
"github.com/filecoin-project/lotus/gateway"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/itests/multisig"
|
||||
@ -194,46 +193,6 @@ func TestGatewayMsigCLI(t *testing.T) {
|
||||
multisig.RunMultisigTests(t, lite)
|
||||
}
|
||||
|
||||
func TestGatewayDealFlow(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
dh := kit.NewDealHarness(t, nodes.lite, nodes.miner, nodes.miner)
|
||||
dealCid, res, _ := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{
|
||||
Rseed: 6,
|
||||
StartEpoch: dealStartEpoch,
|
||||
})
|
||||
dh.PerformRetrieval(ctx, dealCid, res.Root, false)
|
||||
}
|
||||
|
||||
func TestGatewayCLIDealFlow(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit)
|
||||
|
||||
kit.RunClientTest(t, clicommands.Commands, nodes.lite)
|
||||
}
|
||||
|
||||
type testNodes struct {
|
||||
lite *kit.TestFullNode
|
||||
full *kit.TestFullNode
|
||||
|
@ -1,161 +0,0 @@
|
||||
package kit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
lcli "github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
// RunClientTest exercises some of the Client CLI commands
|
||||
func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode *TestFullNode) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// Create mock CLI
|
||||
mockCLI := NewMockCLI(ctx, t, cmds, api.NodeFull)
|
||||
clientCLI := mockCLI.Client(clientNode.ListenAddr)
|
||||
|
||||
// Get the Miner address
|
||||
addrs, err := clientNode.StateListMiners(ctx, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, addrs, 1)
|
||||
|
||||
minerAddr := addrs[0]
|
||||
fmt.Println("Miner:", minerAddr)
|
||||
|
||||
// client query-ask <Miner addr>
|
||||
out := clientCLI.RunCmd("client", "query-ask", minerAddr.String())
|
||||
require.Regexp(t, regexp.MustCompile("Ask:"), out)
|
||||
|
||||
// Create a deal (non-interactive)
|
||||
// client deal --start-epoch=<start epoch> <cid> <miner addr> 1000000attofil <duration>
|
||||
res, _, _, err := CreateImportFile(ctx, clientNode, 1, 0)
|
||||
|
||||
require.NoError(t, err)
|
||||
startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12)
|
||||
dataCid := res.Root
|
||||
price := "1000000attofil"
|
||||
duration := fmt.Sprintf("%d", build.MinDealDuration)
|
||||
out = clientCLI.RunCmd("client", "deal", startEpoch, dataCid.String(), minerAddr.String(), price, duration)
|
||||
fmt.Println("client deal", out)
|
||||
|
||||
// Create a deal (interactive)
|
||||
// client deal
|
||||
// <cid>
|
||||
// <duration> (in days)
|
||||
// <miner addr>
|
||||
// "no" (verified Client)
|
||||
// "yes" (confirm deal)
|
||||
res, _, _, err = CreateImportFile(ctx, clientNode, 2, 0)
|
||||
require.NoError(t, err)
|
||||
dataCid2 := res.Root
|
||||
duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay)
|
||||
cmd := []string{"client", "deal"}
|
||||
interactiveCmds := []string{
|
||||
dataCid2.String(),
|
||||
duration,
|
||||
minerAddr.String(),
|
||||
"no",
|
||||
"yes",
|
||||
}
|
||||
out = clientCLI.RunInteractiveCmd(cmd, interactiveCmds)
|
||||
fmt.Println("client deal:\n", out)
|
||||
|
||||
// Wait for provider to start sealing deal
|
||||
dealStatus := ""
|
||||
for {
|
||||
// client list-deals
|
||||
out = clientCLI.RunCmd("client", "list-deals", "--show-failed")
|
||||
fmt.Println("list-deals:\n", out)
|
||||
|
||||
lines := strings.Split(out, "\n")
|
||||
require.GreaterOrEqual(t, len(lines), 2)
|
||||
re := regexp.MustCompile(`\s+`)
|
||||
parts := re.Split(lines[1], -1)
|
||||
if len(parts) < 4 {
|
||||
require.Fail(t, "bad list-deals output format")
|
||||
}
|
||||
dealStatus = parts[3]
|
||||
fmt.Println(" Deal status:", dealStatus)
|
||||
|
||||
st := CategorizeDealState(dealStatus)
|
||||
require.NotEqual(t, TestDealStateFailed, st)
|
||||
if st == TestDealStateComplete {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
// client retrieval-ask --size=1 <miner addr> <data CID>
|
||||
out = clientCLI.RunCmd("client", "retrieval-ask", "--size=1", minerAddr.String(), dataCid.String())
|
||||
require.Regexp(t, regexp.MustCompile("Ask:"), out)
|
||||
fmt.Println("retrieval ask:\n", out)
|
||||
|
||||
// Retrieve the first file from the Miner
|
||||
// client retrieve <cid> <file path>
|
||||
tmpdir, err := os.MkdirTemp(os.TempDir(), "test-cli-client")
|
||||
require.NoError(t, err)
|
||||
path := filepath.Join(tmpdir, "outfile.dat")
|
||||
|
||||
// Wait for client retrieve to succeed.
|
||||
for {
|
||||
out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path)
|
||||
fmt.Println("retrieve:\n", out)
|
||||
if strings.Contains(out, "Success") {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func CreateImportFile(ctx context.Context, client api.FullNode, rseed int, size int) (res *api.ImportRes, path string, data []byte, err error) {
|
||||
data, path, err = createRandomFile(rseed, size)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
}
|
||||
|
||||
res, err = client.ClientImport(ctx, api.FileRef{Path: path})
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
}
|
||||
return res, path, data, nil
|
||||
}
|
||||
|
||||
func createRandomFile(rseed, size int) ([]byte, string, error) {
|
||||
if size == 0 {
|
||||
size = 1600
|
||||
}
|
||||
data := make([]byte, size)
|
||||
_, err := rand.New(rand.NewSource(int64(rseed))).Read(data)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
dir, err := os.MkdirTemp(os.TempDir(), "test-make-deal-")
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
path := filepath.Join(dir, "sourcefile.dat")
|
||||
err = os.WriteFile(path, data, 0644)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return data, path, nil
|
||||
}
|
@ -1,483 +0,0 @@
|
||||
package kit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/boxo/files"
|
||||
dag "github.com/ipfs/boxo/ipld/merkledag"
|
||||
dstest "github.com/ipfs/boxo/ipld/merkledag/test"
|
||||
unixfile "github.com/ipfs/boxo/ipld/unixfs/file"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipldcbor "github.com/ipfs/go-ipld-cbor"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipld/go-car"
|
||||
_ "github.com/ipld/go-ipld-prime/codec/dagcbor"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared_testutil"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
)
|
||||
|
||||
type DealHarness struct {
|
||||
t *testing.T
|
||||
client *TestFullNode
|
||||
main *TestMiner
|
||||
market *TestMiner
|
||||
}
|
||||
|
||||
type MakeFullDealParams struct {
|
||||
Rseed int
|
||||
FastRet bool
|
||||
StartEpoch abi.ChainEpoch
|
||||
UseCARFileForStorageDeal bool
|
||||
|
||||
// SuspendUntilCryptoeconStable suspends deal-making, until cryptoecon
|
||||
// parameters are stabilised. This affects projected collateral, and tests
|
||||
// will fail in network version 13 and higher if deals are started too soon
|
||||
// after network birth.
|
||||
//
|
||||
// The reason is that the formula for collateral calculation takes
|
||||
// circulating supply into account:
|
||||
//
|
||||
// [portion of power this deal will be] * [~1% of tokens].
|
||||
//
|
||||
// In the first epochs after genesis, the total circulating supply is
|
||||
// changing dramatically in percentual terms. Therefore, if the deal is
|
||||
// proposed too soon, by the time it gets published on chain, the quoted
|
||||
// provider collateral will no longer be valid.
|
||||
//
|
||||
// The observation is that deals fail with:
|
||||
//
|
||||
// GasEstimateMessageGas error: estimating gas used: message execution
|
||||
// failed: exit 16, reason: Provider collateral out of bounds. (RetCode=16)
|
||||
//
|
||||
// Enabling this will suspend deal-making until the network has reached a
|
||||
// height of 300.
|
||||
SuspendUntilCryptoeconStable bool
|
||||
}
|
||||
|
||||
// NewDealHarness creates a test harness that contains testing utilities for deals.
|
||||
func NewDealHarness(t *testing.T, client *TestFullNode, main *TestMiner, market *TestMiner) *DealHarness {
|
||||
return &DealHarness{
|
||||
t: t,
|
||||
client: client,
|
||||
main: main,
|
||||
market: market,
|
||||
}
|
||||
}
|
||||
|
||||
// MakeOnlineDeal makes an online deal, generating a random file with the
|
||||
// supplied seed, and setting the specified fast retrieval flag and start epoch
|
||||
// on the storage deal. It returns when the deal is sealed.
|
||||
//
|
||||
// TODO: convert input parameters to struct, and add size as an input param.
|
||||
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
|
||||
deal, res, path = dh.StartRandomDeal(ctx, params)
|
||||
|
||||
fmt.Printf("WAIT DEAL SEALEDS START\n")
|
||||
dh.WaitDealSealed(ctx, deal, false, false, nil)
|
||||
fmt.Printf("WAIT DEAL SEALEDS END\n")
|
||||
return deal, res, path
|
||||
}
|
||||
|
||||
func (dh *DealHarness) StartRandomDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
|
||||
if params.UseCARFileForStorageDeal {
|
||||
res, _, path = dh.client.ClientImportCARFile(ctx, params.Rseed, 200)
|
||||
} else {
|
||||
res, path = dh.client.CreateImportFile(ctx, params.Rseed, 0)
|
||||
}
|
||||
|
||||
dh.t.Logf("FILE CID: %s", res.Root)
|
||||
|
||||
if params.SuspendUntilCryptoeconStable {
|
||||
dh.t.Logf("deal-making suspending until cryptecon parameters have stabilised")
|
||||
ts := dh.client.WaitTillChain(ctx, HeightAtLeast(300))
|
||||
dh.t.Logf("deal-making continuing; current height is %d", ts.Height())
|
||||
}
|
||||
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data.Root = res.Root
|
||||
dp.DealStartEpoch = params.StartEpoch
|
||||
dp.FastRetrieval = params.FastRet
|
||||
deal = dh.StartDeal(ctx, dp)
|
||||
|
||||
return deal, res, path
|
||||
}
|
||||
|
||||
func (dh *DealHarness) DefaultStartDealParams() api.StartDealParams {
|
||||
dp := api.StartDealParams{
|
||||
Data: &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync},
|
||||
EpochPrice: types.NewInt(1000000),
|
||||
MinBlocksDuration: uint64(build.MinDealDuration),
|
||||
}
|
||||
|
||||
var err error
|
||||
dp.Miner, err = dh.main.ActorAddress(context.Background())
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
dp.Wallet, err = dh.client.WalletDefaultAddress(context.Background())
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
return dp
|
||||
}
|
||||
|
||||
// StartDeal starts a storage deal between the client and the miner.
|
||||
func (dh *DealHarness) StartDeal(ctx context.Context, dealParams api.StartDealParams) *cid.Cid {
|
||||
dealProposalCid, err := dh.client.ClientStartDeal(ctx, &dealParams)
|
||||
require.NoError(dh.t, err)
|
||||
return dealProposalCid
|
||||
}
|
||||
|
||||
// WaitDealSealed waits until the deal is sealed.
|
||||
func (dh *DealHarness) WaitDealSealed(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
|
||||
loop:
|
||||
for {
|
||||
di, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
switch di.State {
|
||||
case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
|
||||
if noseal {
|
||||
return
|
||||
}
|
||||
if !noSealStart {
|
||||
dh.StartSealingWaiting(ctx)
|
||||
}
|
||||
case storagemarket.StorageDealProposalRejected:
|
||||
dh.t.Fatal("deal rejected")
|
||||
case storagemarket.StorageDealFailing:
|
||||
dh.t.Fatal("deal failed")
|
||||
case storagemarket.StorageDealError:
|
||||
dh.t.Fatal("deal errored", di.Message)
|
||||
case storagemarket.StorageDealActive:
|
||||
dh.t.Log("COMPLETE", di)
|
||||
break loop
|
||||
}
|
||||
|
||||
mds, err := dh.market.MarketListIncompleteDeals(ctx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
var minerState storagemarket.StorageDealStatus
|
||||
for _, md := range mds {
|
||||
if md.DealID == di.DealID {
|
||||
minerState = md.State
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
dh.t.Logf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
|
||||
time.Sleep(time.Second / 2)
|
||||
if cb != nil {
|
||||
cb()
|
||||
}
|
||||
}
|
||||
fmt.Printf("WAIT DEAL SEALED LOOP BROKEN\n")
|
||||
}
|
||||
|
||||
// WaitDealSealedQuiet waits until the deal is sealed, without logging anything.
|
||||
func (dh *DealHarness) WaitDealSealedQuiet(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
|
||||
loop:
|
||||
for {
|
||||
di, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
switch di.State {
|
||||
case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
|
||||
if noseal {
|
||||
return
|
||||
}
|
||||
if !noSealStart {
|
||||
dh.StartSealingWaiting(ctx)
|
||||
}
|
||||
case storagemarket.StorageDealProposalRejected:
|
||||
dh.t.Fatal("deal rejected")
|
||||
case storagemarket.StorageDealFailing:
|
||||
dh.t.Fatal("deal failed")
|
||||
case storagemarket.StorageDealError:
|
||||
dh.t.Fatal("deal errored", di.Message)
|
||||
case storagemarket.StorageDealActive:
|
||||
break loop
|
||||
}
|
||||
|
||||
_, err = dh.market.MarketListIncompleteDeals(ctx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
time.Sleep(time.Second / 2)
|
||||
if cb != nil {
|
||||
cb()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dh *DealHarness) ExpectDealFailure(ctx context.Context, deal *cid.Cid, errs string) error {
|
||||
for {
|
||||
di, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
switch di.State {
|
||||
case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
|
||||
return fmt.Errorf("deal is sealing, and we expected an error: %s", errs)
|
||||
case storagemarket.StorageDealProposalRejected:
|
||||
if strings.Contains(di.Message, errs) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unexpected error: %s ; expected: %s", di.Message, errs)
|
||||
case storagemarket.StorageDealFailing:
|
||||
if strings.Contains(di.Message, errs) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unexpected error: %s ; expected: %s", di.Message, errs)
|
||||
case storagemarket.StorageDealError:
|
||||
if strings.Contains(di.Message, errs) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unexpected error: %s ; expected: %s", di.Message, errs)
|
||||
case storagemarket.StorageDealActive:
|
||||
return errors.New("expected to get an error, but didn't get one")
|
||||
}
|
||||
|
||||
mds, err := dh.market.MarketListIncompleteDeals(ctx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
var minerState storagemarket.StorageDealStatus
|
||||
for _, md := range mds {
|
||||
if md.DealID == di.DealID {
|
||||
minerState = md.State
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
dh.t.Logf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
|
||||
time.Sleep(time.Second / 2)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitDealPublished waits until the deal is published.
|
||||
func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
updates, err := dh.market.MarketGetDealUpdates(subCtx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
dh.t.Fatal("context timeout")
|
||||
case di := <-updates:
|
||||
if deal.Equals(di.ProposalCid) {
|
||||
switch di.State {
|
||||
case storagemarket.StorageDealProposalRejected:
|
||||
dh.t.Fatal("deal rejected")
|
||||
case storagemarket.StorageDealFailing:
|
||||
dh.t.Fatal("deal failed")
|
||||
case storagemarket.StorageDealError:
|
||||
dh.t.Fatal("deal errored", di.Message)
|
||||
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
||||
dh.t.Log("COMPLETE", di)
|
||||
return
|
||||
}
|
||||
dh.t.Log("Deal state: ", storagemarket.DealStates[di.State])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
|
||||
snums, err := dh.main.SectorsListNonGenesis(ctx)
|
||||
require.NoError(dh.t, err)
|
||||
for _, snum := range snums {
|
||||
si, err := dh.main.SectorsStatus(ctx, snum, false)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
dh.t.Logf("Sector state <%d>-[%d]:, %s", snum, si.SealProof, si.State)
|
||||
if si.State == api.SectorState(sealing.WaitDeals) {
|
||||
require.NoError(dh.t, dh.main.SectorStartSealing(ctx, snum))
|
||||
}
|
||||
|
||||
dh.main.FlushSealingBatches(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool, offers ...api.QueryOffer) (path string) {
|
||||
return dh.PerformRetrievalWithOrder(ctx, deal, root, carExport, func(offer api.QueryOffer, a address.Address) api.RetrievalOrder {
|
||||
return offer.Order(a)
|
||||
}, offers...)
|
||||
}
|
||||
|
||||
func (dh *DealHarness) PerformRetrievalWithOrder(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool, makeOrder func(api.QueryOffer, address.Address) api.RetrievalOrder, offers ...api.QueryOffer) (path string) {
|
||||
var offer api.QueryOffer
|
||||
if len(offers) == 0 {
|
||||
// perform retrieval.
|
||||
info, err := dh.client.ClientGetDealInfo(ctx, *deal)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
offers, err := dh.client.ClientFindData(ctx, root, &info.PieceCID)
|
||||
require.NoError(dh.t, err)
|
||||
require.NotEmpty(dh.t, offers, "no offers")
|
||||
offer = offers[0]
|
||||
} else {
|
||||
offer = offers[0]
|
||||
}
|
||||
|
||||
carFile := dh.t.TempDir() + string(os.PathSeparator) + "ret-car-" + root.String()
|
||||
|
||||
caddr, err := dh.client.WalletDefaultAddress(ctx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
updatesCtx, cancel := context.WithCancel(ctx)
|
||||
updates, err := dh.client.ClientGetRetrievalUpdates(updatesCtx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
order := makeOrder(offer, caddr)
|
||||
|
||||
retrievalRes, err := dh.client.ClientRetrieve(ctx, order)
|
||||
require.NoError(dh.t, err)
|
||||
consumeEvents:
|
||||
for {
|
||||
var evt api.RetrievalInfo
|
||||
select {
|
||||
case <-updatesCtx.Done():
|
||||
dh.t.Fatal("Retrieval Timed Out")
|
||||
case evt = <-updates:
|
||||
if evt.ID != retrievalRes.DealID {
|
||||
continue
|
||||
}
|
||||
}
|
||||
switch evt.Status {
|
||||
case retrievalmarket.DealStatusCompleted:
|
||||
break consumeEvents
|
||||
case retrievalmarket.DealStatusRejected:
|
||||
dh.t.Fatalf("Retrieval Proposal Rejected: %s", evt.Message)
|
||||
case
|
||||
retrievalmarket.DealStatusDealNotFound,
|
||||
retrievalmarket.DealStatusErrored:
|
||||
dh.t.Fatalf("Retrieval Error: %s", evt.Message)
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
|
||||
if order.RemoteStore != nil {
|
||||
// if we're retrieving into a remote store, skip export
|
||||
return ""
|
||||
}
|
||||
|
||||
require.NoError(dh.t, dh.client.ClientExport(ctx,
|
||||
api.ExportRef{
|
||||
Root: root,
|
||||
DealID: retrievalRes.DealID,
|
||||
},
|
||||
api.FileRef{
|
||||
Path: carFile,
|
||||
IsCAR: carExport,
|
||||
}))
|
||||
|
||||
ret := carFile
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, file *os.File) string {
|
||||
bserv := dstest.Bserv()
|
||||
ch, err := car.LoadCar(ctx, bserv.Blockstore(), file)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
blk, err := bserv.GetBlock(ctx, ch.Roots[0])
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
reg := ipld.Registry{}
|
||||
reg.Register(cid.DagProtobuf, dag.DecodeProtobufBlock)
|
||||
reg.Register(cid.DagCBOR, ipldcbor.DecodeBlock)
|
||||
reg.Register(cid.Raw, dag.DecodeRawBlock)
|
||||
|
||||
nd, err := reg.Decode(blk)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
dserv := dag.NewDAGService(bserv)
|
||||
|
||||
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
tmpfile := dh.t.TempDir() + string(os.PathSeparator) + "file-in-car" + nd.Cid().String()
|
||||
|
||||
err = files.WriteTo(fil, tmpfile)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
return tmpfile
|
||||
}
|
||||
|
||||
type RunConcurrentDealsOpts struct {
|
||||
N int
|
||||
FastRetrieval bool
|
||||
CarExport bool
|
||||
StartEpoch abi.ChainEpoch
|
||||
UseCARFileForStorageDeal bool
|
||||
IndexProvider *shared_testutil.MockIndexProvider
|
||||
}
|
||||
|
||||
func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) {
|
||||
ctx := context.Background()
|
||||
errgrp, _ := errgroup.WithContext(context.Background())
|
||||
for i := 0; i < opts.N; i++ {
|
||||
i := i
|
||||
errgrp.Go(func() (err error) {
|
||||
defer dh.t.Logf("finished concurrent deal %d/%d", i, opts.N)
|
||||
defer func() {
|
||||
// This is necessary because golang can't deal with test
|
||||
// failures being reported from children goroutines ¯\_(ツ)_/¯
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("deal failed: %s", r)
|
||||
}
|
||||
}()
|
||||
|
||||
dh.t.Logf("making storage deal %d/%d", i, opts.N)
|
||||
|
||||
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), MakeFullDealParams{
|
||||
Rseed: 5 + i,
|
||||
FastRet: opts.FastRetrieval,
|
||||
StartEpoch: opts.StartEpoch,
|
||||
UseCARFileForStorageDeal: opts.UseCARFileForStorageDeal,
|
||||
})
|
||||
|
||||
// Check that the storage provider announced the deal to indexers
|
||||
if opts.IndexProvider != nil {
|
||||
notifs := opts.IndexProvider.GetNotifs()
|
||||
_, ok := notifs[string(deal.Bytes())]
|
||||
require.True(dh.t, ok)
|
||||
}
|
||||
|
||||
dh.t.Logf("retrieving deal %d/%d", i, opts.N)
|
||||
|
||||
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, opts.CarExport)
|
||||
|
||||
if opts.CarExport {
|
||||
f, err := os.Open(outPath)
|
||||
require.NoError(dh.t, err)
|
||||
actualFile := dh.ExtractFileFromCAR(ctx, f)
|
||||
require.NoError(dh.t, f.Close())
|
||||
|
||||
AssertFilesEqual(dh.t, inPath, actualFile)
|
||||
} else {
|
||||
AssertFilesEqual(dh.t, inPath, outPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
require.NoError(dh.t, errgrp.Wait())
|
||||
}
|
@ -74,22 +74,6 @@ func (f TestFullNode) Shutdown(ctx context.Context) error {
|
||||
return f.Stop(ctx)
|
||||
}
|
||||
|
||||
func (f *TestFullNode) ClientImportCARFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, carv1FilePath string, origFilePath string) {
|
||||
carv1FilePath, origFilePath = CreateRandomCARv1(f.t, rseed, size)
|
||||
res, err := f.ClientImport(ctx, api.FileRef{Path: carv1FilePath, IsCAR: true})
|
||||
require.NoError(f.t, err)
|
||||
return res, carv1FilePath, origFilePath
|
||||
}
|
||||
|
||||
// CreateImportFile creates a random file with the specified seed and size, and
|
||||
// imports it into the full node.
|
||||
func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, path string) {
|
||||
path = CreateRandomFile(f.t, rseed, size)
|
||||
res, err := f.ClientImport(ctx, api.FileRef{Path: path})
|
||||
require.NoError(f.t, err)
|
||||
return res, path
|
||||
}
|
||||
|
||||
// WaitTillChain waits until a specified chain condition is met. It returns
|
||||
// the first tipset where the condition is met.
|
||||
func (f *TestFullNode) WaitTillChain(ctx context.Context, pred ChainPredicate) *types.TipSet {
|
||||
|
@ -1,200 +0,0 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
func TestPathTypeFilters(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
runTest := func(t *testing.T, name string, asserts func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func())) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
_ = logging.SetLogLevel("storageminer", "INFO")
|
||||
|
||||
var (
|
||||
client kit.TestFullNode
|
||||
miner kit.TestMiner
|
||||
wiw, wdw kit.TestWorker
|
||||
)
|
||||
ens := kit.NewEnsemble(t, kit.LatestActorsAt(-1)).
|
||||
FullNode(&client, kit.ThroughRPC()).
|
||||
Miner(&miner, &client, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.PresealSectors(2), kit.NoStorage()).
|
||||
Worker(&miner, &wiw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWinningPoSt})).
|
||||
Worker(&miner, &wdw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt})).
|
||||
Start()
|
||||
|
||||
ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond)
|
||||
|
||||
asserts(t, ctx, &miner, func() {
|
||||
dh := kit.NewDealHarness(t, &client, &miner, &miner)
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
runTest(t, "invalid-type-alert", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) {
|
||||
slU := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanSeal = true
|
||||
meta.AllowTypes = []string{"unsealed", "seeled"}
|
||||
})
|
||||
|
||||
storlist, err := miner.StorageList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, storlist, 2) // 1 path we've added + preseal
|
||||
|
||||
si, err := miner.StorageInfo(ctx, slU)
|
||||
require.NoError(t, err)
|
||||
|
||||
// check that bad entries are filtered
|
||||
require.Len(t, si.DenyTypes, 0)
|
||||
require.Len(t, si.AllowTypes, 1)
|
||||
require.Equal(t, "unsealed", si.AllowTypes[0])
|
||||
|
||||
as, err := miner.LogAlerts(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
var found bool
|
||||
for _, a := range as {
|
||||
if a.Active && a.Type.System == "sector-index" && strings.HasPrefix(a.Type.Subsystem, "pathconf-") {
|
||||
require.False(t, found)
|
||||
require.Contains(t, string(a.LastActive.Message), "unknown sector file type 'seeled'")
|
||||
found = true
|
||||
}
|
||||
}
|
||||
require.True(t, found)
|
||||
})
|
||||
|
||||
runTest(t, "seal-to-stor-unseal-allowdeny", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) {
|
||||
// allow all types in the sealing path
|
||||
sealScratch := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanSeal = true
|
||||
})
|
||||
|
||||
// unsealed storage
|
||||
unsStor := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanStore = true
|
||||
meta.AllowTypes = []string{"unsealed"}
|
||||
})
|
||||
|
||||
// other storage
|
||||
sealStor := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanStore = true
|
||||
meta.DenyTypes = []string{"unsealed"}
|
||||
})
|
||||
|
||||
storlist, err := miner.StorageList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, storlist, 4) // 3 paths we've added + preseal
|
||||
|
||||
run()
|
||||
|
||||
storlist, err = miner.StorageList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, storlist[sealScratch], 0)
|
||||
require.Len(t, storlist[unsStor], 1)
|
||||
require.Len(t, storlist[sealStor], 1)
|
||||
|
||||
require.Equal(t, storiface.FTUnsealed, storlist[unsStor][0].SectorFileType)
|
||||
require.Equal(t, storiface.FTSealed|storiface.FTCache, storlist[sealStor][0].SectorFileType)
|
||||
})
|
||||
|
||||
runTest(t, "sealstor-unseal-allowdeny", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) {
|
||||
// unsealed storage
|
||||
unsStor := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanStore = true
|
||||
meta.CanSeal = true
|
||||
meta.AllowTypes = []string{"unsealed"}
|
||||
})
|
||||
|
||||
// other storage
|
||||
sealStor := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanStore = true
|
||||
meta.CanSeal = true
|
||||
meta.DenyTypes = []string{"unsealed"}
|
||||
})
|
||||
|
||||
storlist, err := miner.StorageList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, storlist, 3) // 2 paths we've added + preseal
|
||||
|
||||
run()
|
||||
|
||||
storlist, err = miner.StorageList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, storlist[unsStor], 1)
|
||||
require.Len(t, storlist[sealStor], 1)
|
||||
|
||||
require.Equal(t, storiface.FTUnsealed, storlist[unsStor][0].SectorFileType)
|
||||
require.Equal(t, storiface.FTSealed|storiface.FTCache, storlist[sealStor][0].SectorFileType)
|
||||
})
|
||||
|
||||
runTest(t, "seal-store-allseparate", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) {
|
||||
// sealing stores
|
||||
slU := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanSeal = true
|
||||
meta.AllowTypes = []string{"unsealed"}
|
||||
})
|
||||
slS := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanSeal = true
|
||||
meta.AllowTypes = []string{"sealed"}
|
||||
})
|
||||
slC := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanSeal = true
|
||||
meta.AllowTypes = []string{"cache"}
|
||||
})
|
||||
|
||||
// storage stores
|
||||
stU := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanStore = true
|
||||
meta.AllowTypes = []string{"unsealed"}
|
||||
})
|
||||
stS := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanStore = true
|
||||
meta.AllowTypes = []string{"sealed"}
|
||||
})
|
||||
stC := miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.CanStore = true
|
||||
meta.AllowTypes = []string{"cache"}
|
||||
})
|
||||
|
||||
storlist, err := miner.StorageList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, storlist, 7) // 6 paths we've added + preseal
|
||||
|
||||
run()
|
||||
|
||||
storlist, err = miner.StorageList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, storlist[slU], 0)
|
||||
require.Len(t, storlist[slS], 0)
|
||||
require.Len(t, storlist[slC], 0)
|
||||
|
||||
require.Len(t, storlist[stU], 1)
|
||||
require.Len(t, storlist[stS], 1)
|
||||
require.Len(t, storlist[stC], 1)
|
||||
|
||||
require.Equal(t, storiface.FTUnsealed, storlist[stU][0].SectorFileType)
|
||||
require.Equal(t, storiface.FTSealed, storlist[stS][0].SectorFileType)
|
||||
require.Equal(t, storiface.FTCache, storlist[stC][0].SectorFileType)
|
||||
})
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
// stm: #integration
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
func TestDealsWithFinalizeEarly(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
//stm: @STORAGE_INFO_001
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
var blockTime = 50 * time.Millisecond
|
||||
|
||||
// We use two miners so that in case the actively tested miner misses PoSt, we still have a blockchain
|
||||
client, miner, poster, ens := kit.EnsembleOneTwo(t, kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) { sc.FinalizeEarly = true })) // no mock proofs.
|
||||
ens.InterconnectAll().BeginMiningMustPost(blockTime, poster)
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.Weight = 1000000000
|
||||
meta.CanSeal = true
|
||||
})
|
||||
miner.AddStorage(ctx, t, func(meta *storiface.LocalStorageMeta) {
|
||||
meta.Weight = 1000000000
|
||||
meta.CanStore = true
|
||||
})
|
||||
|
||||
//stm: @STORAGE_LIST_001
|
||||
sl, err := miner.StorageList(ctx)
|
||||
require.NoError(t, err)
|
||||
for si, d := range sl {
|
||||
i, err := miner.StorageInfo(ctx, si)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("stor d:%d %+v\n", len(d), i)
|
||||
}
|
||||
|
||||
t.Run("single", func(t *testing.T) {
|
||||
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1})
|
||||
})
|
||||
|
||||
//stm: @STORAGE_LIST_001
|
||||
sl, err = miner.StorageList(ctx)
|
||||
require.NoError(t, err)
|
||||
for si, d := range sl {
|
||||
i, err := miner.StorageInfo(ctx, si)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("stor d:%d %+v\n", len(d), i)
|
||||
}
|
||||
}
|
@ -585,82 +585,3 @@ waitForProof:
|
||||
require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params)))
|
||||
require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof)
|
||||
}
|
||||
|
||||
func TestWorkerPledgeExpireCommit(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
_ = logging.SetLogLevel("sectors", "debug")
|
||||
|
||||
var tasksNoC2 = kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTAddPiece, sealtasks.TTDataCid, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2,
|
||||
sealtasks.TTUnseal, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFinalizeUnsealed})
|
||||
|
||||
fc := config.DefaultStorageMiner().Fees
|
||||
fc.MaxCommitGasFee = types.FIL(abi.NewTokenAmount(10000)) // 10000 attofil, way too low for anything to land
|
||||
|
||||
ctx := context.Background()
|
||||
client, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
|
||||
kit.MutateSealingConfig(func(sc *config.SealingConfig) {
|
||||
sc.AggregateCommits = true
|
||||
}),
|
||||
kit.ConstructorOpts(
|
||||
node.Override(new(*sealing.Sealing), modules.SealingPipeline(fc)),
|
||||
),
|
||||
kit.SplitstoreDisable(), // disable splitstore because messages which take a long time may get dropped
|
||||
tasksNoC2) // no mock proofs
|
||||
|
||||
ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond)
|
||||
|
||||
e, err := worker.Enabled(ctx)
|
||||
require.NoError(t, err)
|
||||
require.True(t, e)
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
|
||||
startEpoch := abi.ChainEpoch(4 << 10)
|
||||
|
||||
dh.StartRandomDeal(ctx, kit.MakeFullDealParams{
|
||||
Rseed: 7,
|
||||
StartEpoch: startEpoch,
|
||||
})
|
||||
|
||||
var sn abi.SectorNumber
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
s, err := miner.SectorsListNonGenesis(ctx)
|
||||
require.NoError(t, err)
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(s) > 1 {
|
||||
t.Fatalf("expected 1 sector, got %d", len(s))
|
||||
}
|
||||
sn = s[0]
|
||||
return true
|
||||
}, 30*time.Second, 1*time.Second)
|
||||
|
||||
t.Log("sector", sn)
|
||||
|
||||
t.Log("sector committing")
|
||||
|
||||
// wait until after startEpoch
|
||||
client.WaitTillChain(ctx, kit.HeightAtLeast(startEpoch+20))
|
||||
|
||||
t.Log("after start")
|
||||
|
||||
sstate, err := miner.SectorsStatus(ctx, sn, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, api.SectorState(sealing.SubmitCommitAggregate), sstate.State)
|
||||
|
||||
_, err = miner.SectorCommitFlush(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
sstate, err := miner.SectorsStatus(ctx, sn, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("sector state: %s", sstate.State)
|
||||
|
||||
return sstate.State == api.SectorState(sealing.Removed)
|
||||
}, 30*time.Second, 1*time.Second)
|
||||
|
||||
t.Log("sector removed")
|
||||
}
|
||||
|
@ -1,170 +0,0 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
func TestWorkerUpgradeAbortCleanup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
blockTime := 1 * time.Millisecond
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
|
||||
kit.NoStorage(), // no storage to have better control over path settings
|
||||
kit.MutateSealingConfig(func(sc *config.SealingConfig) { sc.FinalizeEarly = true })) // no mock proofs
|
||||
|
||||
var worker kit.TestWorker
|
||||
ens.Worker(miner, &worker, kit.ThroughRPC(), kit.NoStorage(), // no storage to have better control over path settings
|
||||
kit.WithTaskTypes([]sealtasks.TaskType{
|
||||
sealtasks.TTFetch, sealtasks.TTAddPiece,
|
||||
sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFinalizeUnsealed, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2,
|
||||
sealtasks.TTReplicaUpdate, // only first update step, later steps will not run and we'll abort
|
||||
}),
|
||||
)
|
||||
|
||||
ens.Start().InterconnectAll().BeginMiningMustPost(blockTime)
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// get storage paths
|
||||
|
||||
// store-only path on the miner
|
||||
miner.AddStorage(ctx, t, func(cfg *storiface.LocalStorageMeta) {
|
||||
cfg.CanSeal = false
|
||||
cfg.CanStore = true
|
||||
})
|
||||
|
||||
mlocal, err := miner.StorageLocal(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, mlocal, 2) // genesis and one local
|
||||
|
||||
// we want a seal-only path on the worker disconnected from miner path
|
||||
worker.AddStorage(ctx, t, func(cfg *storiface.LocalStorageMeta) {
|
||||
cfg.CanSeal = true
|
||||
cfg.CanStore = false
|
||||
})
|
||||
|
||||
wpaths, err := worker.Paths(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, wpaths, 1)
|
||||
|
||||
// check sectors in paths
|
||||
checkSectors := func(miners, workers storiface.SectorFileType) {
|
||||
paths, err := miner.StorageList(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, paths, 3) // genesis, miner, worker
|
||||
|
||||
// first loop for debugging
|
||||
for id, decls := range paths {
|
||||
pinfo, err := miner.StorageInfo(ctx, id)
|
||||
require.NoError(t, err)
|
||||
|
||||
switch {
|
||||
case id == wpaths[0].ID: // worker path
|
||||
fmt.Println("Worker Decls ", len(decls), decls)
|
||||
case !pinfo.CanStore && !pinfo.CanSeal: // genesis path
|
||||
fmt.Println("Genesis Decls ", len(decls), decls)
|
||||
default: // miner path
|
||||
fmt.Println("Miner Decls ", len(decls), decls)
|
||||
}
|
||||
}
|
||||
|
||||
for id, decls := range paths {
|
||||
pinfo, err := miner.StorageInfo(ctx, id)
|
||||
require.NoError(t, err)
|
||||
|
||||
switch {
|
||||
case id == wpaths[0].ID: // worker path
|
||||
if workers != storiface.FTNone {
|
||||
require.Len(t, decls, 1)
|
||||
require.EqualValues(t, workers.Strings(), decls[0].SectorFileType.Strings())
|
||||
} else {
|
||||
require.Len(t, decls, 0)
|
||||
}
|
||||
case !pinfo.CanStore && !pinfo.CanSeal: // genesis path
|
||||
require.Len(t, decls, kit.DefaultPresealsPerBootstrapMiner)
|
||||
default: // miner path
|
||||
if miners != storiface.FTNone {
|
||||
require.Len(t, decls, 1)
|
||||
require.EqualValues(t, miners.Strings(), decls[0].SectorFileType.Strings())
|
||||
} else {
|
||||
require.Len(t, decls, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
checkSectors(storiface.FTNone, storiface.FTNone)
|
||||
|
||||
// get a sector for upgrading
|
||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||
sl, err := miner.SectorsListNonGenesis(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, sl, 1, "expected 1 sector")
|
||||
|
||||
snum := sl[0]
|
||||
|
||||
checkSectors(storiface.FTCache|storiface.FTSealed, storiface.FTNone)
|
||||
|
||||
client.WaitForSectorActive(ctx, t, snum, maddr)
|
||||
|
||||
// make available
|
||||
err = miner.SectorMarkForUpgrade(ctx, snum, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start a deal
|
||||
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
res, _ := client.CreateImportFile(ctx, 123, 0)
|
||||
dp := dh.DefaultStartDealParams()
|
||||
dp.Data.Root = res.Root
|
||||
deal := dh.StartDeal(ctx, dp)
|
||||
|
||||
// wait for the deal to be in a sector
|
||||
dh.WaitDealSealed(ctx, deal, true, false, nil)
|
||||
|
||||
// wait for replica update to happen
|
||||
require.Eventually(t, func() bool {
|
||||
sstate, err := miner.SectorsStatus(ctx, snum, false)
|
||||
require.NoError(t, err)
|
||||
return sstate.State == api.SectorState(sealing.ProveReplicaUpdate)
|
||||
}, 10*time.Second, 50*time.Millisecond)
|
||||
|
||||
// check that the sector was copied to the worker
|
||||
checkSectors(storiface.FTCache|storiface.FTSealed, storiface.FTCache|storiface.FTSealed|storiface.FTUnsealed|storiface.FTUpdate|storiface.FTUpdateCache)
|
||||
|
||||
// abort upgrade
|
||||
err = miner.SectorAbortUpgrade(ctx, snum)
|
||||
require.NoError(t, err)
|
||||
|
||||
// the task is stuck in scheduler, so manually abort the task to get the sector fsm moving
|
||||
si := miner.SchedInfo(ctx)
|
||||
err = miner.SealingRemoveRequest(ctx, si.SchedInfo.Requests[0].SchedId)
|
||||
require.NoError(t, err)
|
||||
|
||||
var lastState api.SectorState
|
||||
require.Eventually(t, func() bool {
|
||||
sstate, err := miner.SectorsStatus(ctx, snum, false)
|
||||
require.NoError(t, err)
|
||||
lastState = sstate.State
|
||||
|
||||
return sstate.State == api.SectorState(sealing.Proving)
|
||||
}, 10*time.Second, 50*time.Millisecond, "last state was %s", &lastState)
|
||||
|
||||
// check that nothing was left on the worker
|
||||
checkSectors(storiface.FTCache|storiface.FTSealed, storiface.FTNone)
|
||||
}
|
@ -1,91 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/ipld/go-car/util"
|
||||
"github.com/multiformats/go-varint"
|
||||
)
|
||||
|
||||
// —————————————————————————————————————————————————————————
|
||||
//
|
||||
// This code is temporary, and should be deleted when
|
||||
// https://github.com/ipld/go-car/issues/196 is resolved.
|
||||
//
|
||||
// —————————————————————————————————————————————————————————
|
||||
|
||||
func init() {
|
||||
cbor.RegisterCborType(CarHeader{})
|
||||
}
|
||||
|
||||
type CarHeader struct {
|
||||
Roots []cid.Cid
|
||||
Version uint64
|
||||
}
|
||||
|
||||
func readHeader(r io.Reader) (*CarHeader, error) {
|
||||
hb, err := ldRead(r, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ch CarHeader
|
||||
if err := cbor.DecodeInto(hb, &ch); err != nil {
|
||||
return nil, fmt.Errorf("invalid header: %v", err)
|
||||
}
|
||||
|
||||
return &ch, nil
|
||||
}
|
||||
|
||||
func writeHeader(h *CarHeader, w io.Writer) error {
|
||||
hb, err := cbor.DumpObject(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return util.LdWrite(w, hb)
|
||||
}
|
||||
|
||||
func ldRead(r io.Reader, zeroLenAsEOF bool) ([]byte, error) {
|
||||
l, err := varint.ReadUvarint(toByteReader(r))
|
||||
if err != nil {
|
||||
// If the length of bytes read is non-zero when the error is EOF then signal an unclean EOF.
|
||||
if l > 0 && err == io.EOF {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil, err
|
||||
} else if l == 0 && zeroLenAsEOF {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
buf := make([]byte, l)
|
||||
if _, err := io.ReadFull(r, buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
type readerPlusByte struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (rb readerPlusByte) ReadByte() (byte, error) {
|
||||
return readByte(rb)
|
||||
}
|
||||
|
||||
func readByte(r io.Reader) (byte, error) {
|
||||
var p [1]byte
|
||||
_, err := io.ReadFull(r, p[:])
|
||||
return p[0], err
|
||||
}
|
||||
|
||||
func toByteReader(r io.Reader) io.ByteReader {
|
||||
if br, ok := r.(io.ByteReader); ok {
|
||||
return br
|
||||
}
|
||||
return &readerPlusByte{r}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,136 +0,0 @@
|
||||
// stm: #unit
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"embed"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/boxo/blockservice"
|
||||
blockstore "github.com/ipfs/boxo/blockstore"
|
||||
offline "github.com/ipfs/boxo/exchange/offline"
|
||||
"github.com/ipfs/boxo/files"
|
||||
"github.com/ipfs/boxo/ipld/merkledag"
|
||||
unixfile "github.com/ipfs/boxo/ipld/unixfs/file"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
"github.com/ipld/go-car"
|
||||
carv2 "github.com/ipld/go-car/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
//go:embed testdata/*
|
||||
var testdata embed.FS
|
||||
|
||||
func TestImportLocal(t *testing.T) {
|
||||
// stm: @CLIENT_STORAGE_DEALS_IMPORT_LOCAL_001, @CLIENT_RETRIEVAL_FIND_001
|
||||
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||
dir := t.TempDir()
|
||||
im := imports.NewManager(ds, dir)
|
||||
ctx := context.Background()
|
||||
|
||||
a := &API{
|
||||
Imports: im,
|
||||
StorageBlockstoreAccessor: storageadapter.NewImportsBlockstoreAccessor(im),
|
||||
}
|
||||
|
||||
b, err := testdata.ReadFile("testdata/payload.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
// stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
||||
root, err := a.ClientImportLocal(ctx, bytes.NewReader(b))
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, cid.Undef, root)
|
||||
|
||||
list, err := a.ClientListImports(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 1)
|
||||
|
||||
it := list[0]
|
||||
require.Equal(t, root, *it.Root)
|
||||
require.True(t, strings.HasPrefix(it.CARPath, dir))
|
||||
|
||||
// stm: @CLIENT_DATA_HAS_LOCAL_001
|
||||
local, err := a.ClientHasLocal(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.True(t, local)
|
||||
|
||||
order := api.ExportRef{
|
||||
Root: root,
|
||||
FromLocalCAR: it.CARPath,
|
||||
}
|
||||
|
||||
// retrieve as UnixFS.
|
||||
out1 := filepath.Join(dir, "retrieval1.data") // as unixfs
|
||||
out2 := filepath.Join(dir, "retrieval2.data") // as car
|
||||
err = a.ClientExport(ctx, order, api.FileRef{
|
||||
Path: out1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
outBytes, err := os.ReadFile(out1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, outBytes)
|
||||
|
||||
err = a.ClientExport(ctx, order, api.FileRef{
|
||||
Path: out2,
|
||||
IsCAR: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// open the CARv2 being custodied by the import manager
|
||||
orig, err := carv2.OpenReader(it.CARPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// open the CARv1 we just exported
|
||||
exported, err := carv2.OpenReader(out2)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.EqualValues(t, 1, exported.Version)
|
||||
require.EqualValues(t, 2, orig.Version)
|
||||
|
||||
origRoots, err := orig.Roots()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, origRoots, 1)
|
||||
|
||||
exportedRoots, err := exported.Roots()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, exportedRoots, 1)
|
||||
|
||||
require.EqualValues(t, origRoots, exportedRoots)
|
||||
|
||||
// recreate the unixfs dag, and see if it matches the original file byte by byte
|
||||
// import the car into a memory blockstore, then export the unixfs file.
|
||||
bs := blockstore.NewBlockstore(datastore.NewMapDatastore())
|
||||
r, err := exported.DataReader()
|
||||
require.NoError(t, err)
|
||||
_, err = car.LoadCar(ctx, bs, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||
|
||||
nd, err := dag.Get(ctx, exportedRoots[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
file, err := unixfile.NewUnixfsFile(ctx, dag, nd)
|
||||
require.NoError(t, err)
|
||||
|
||||
exportedPath := filepath.Join(dir, "exported.data")
|
||||
err = files.WriteTo(file, exportedPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
exportedBytes, err := os.ReadFile(exportedPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// compare original file to recreated unixfs file.
|
||||
require.Equal(t, b, exportedBytes)
|
||||
}
|
File diff suppressed because one or more lines are too long
49
node/impl/client/testdata/payload.txt
vendored
49
node/impl/client/testdata/payload.txt
vendored
@ -1,49 +0,0 @@
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu.
|
||||
|
||||
Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed.
|
||||
|
||||
Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique.
|
||||
|
||||
Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus.
|
||||
|
||||
Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh.
|
||||
|
||||
Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit.
|
||||
|
||||
Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet.
|
||||
|
||||
Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices.
|
||||
|
||||
Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis.
|
||||
|
||||
Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem.
|
||||
|
||||
Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis.
|
||||
|
||||
Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo.
|
||||
|
||||
Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam.
|
||||
|
||||
Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada.
|
||||
|
||||
Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque.
|
||||
|
||||
Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl.
|
||||
|
||||
Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla.
|
||||
|
||||
Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet.
|
||||
|
||||
Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam.
|
||||
|
||||
Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus.
|
||||
|
||||
Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor.
|
||||
|
||||
Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est.
|
||||
|
||||
Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat.
|
||||
|
||||
Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum.
|
||||
|
||||
Eros in cursus turpis massa tincidunt dui ut. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed.
|
49
node/impl/client/testdata/payload2.txt
vendored
49
node/impl/client/testdata/payload2.txt
vendored
@ -1,49 +0,0 @@
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu.
|
||||
|
||||
Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed.
|
||||
|
||||
Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique.
|
||||
|
||||
Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus.
|
||||
|
||||
Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh.
|
||||
|
||||
Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit.
|
||||
|
||||
Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet.
|
||||
|
||||
Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices.
|
||||
|
||||
Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis.
|
||||
|
||||
Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem.
|
||||
|
||||
Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis.
|
||||
|
||||
Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo.
|
||||
|
||||
Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam.
|
||||
|
||||
Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada.
|
||||
|
||||
Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque.
|
||||
|
||||
Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl.
|
||||
|
||||
Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla.
|
||||
|
||||
Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet.
|
||||
|
||||
Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam.
|
||||
|
||||
Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus.
|
||||
|
||||
Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor.
|
||||
|
||||
Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est.
|
||||
|
||||
Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat.
|
||||
|
||||
Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum.
|
||||
|
||||
Eros in cursus turpis massa tincidunt dui ut. Aarsh shah is simply an amazing person. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed.
|
@ -9,7 +9,6 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/node/impl/client"
|
||||
"github.com/filecoin-project/lotus/node/impl/common"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
"github.com/filecoin-project/lotus/node/impl/market"
|
||||
@ -25,7 +24,6 @@ type FullNodeAPI struct {
|
||||
common.CommonAPI
|
||||
net.NetAPI
|
||||
full.ChainAPI
|
||||
client.API
|
||||
full.MpoolAPI
|
||||
full.GasAPI
|
||||
market.MarketAPI
|
||||
|
120
node/rpc.go
120
node/rpc.go
@ -2,8 +2,6 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
@ -11,10 +9,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
@ -27,12 +22,10 @@ import (
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/metrics/proxy"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
"github.com/filecoin-project/lotus/node/impl/client"
|
||||
)
|
||||
|
||||
var rpclog = logging.Logger("rpc")
|
||||
@ -98,33 +91,6 @@ func FullNodeHandler(a v1api.FullNode, permissioned bool, opts ...jsonrpc.Server
|
||||
serveRpc("/rpc/v1", fnapi)
|
||||
serveRpc("/rpc/v0", v0)
|
||||
|
||||
// Import handler
|
||||
handleImportFunc := handleImport(a.(*impl.FullNodeAPI))
|
||||
handleExportFunc := handleExport(a.(*impl.FullNodeAPI))
|
||||
handleRemoteStoreFunc := handleRemoteStore(a.(*impl.FullNodeAPI))
|
||||
if permissioned {
|
||||
importAH := &auth.Handler{
|
||||
Verify: a.AuthVerify,
|
||||
Next: handleImportFunc,
|
||||
}
|
||||
m.Handle("/rest/v0/import", importAH)
|
||||
exportAH := &auth.Handler{
|
||||
Verify: a.AuthVerify,
|
||||
Next: handleExportFunc,
|
||||
}
|
||||
m.Handle("/rest/v0/export", exportAH)
|
||||
|
||||
storeAH := &auth.Handler{
|
||||
Verify: a.AuthVerify,
|
||||
Next: handleRemoteStoreFunc,
|
||||
}
|
||||
m.Handle("/rest/v0/store/{uuid}", storeAH)
|
||||
} else {
|
||||
m.HandleFunc("/rest/v0/import", handleImportFunc)
|
||||
m.HandleFunc("/rest/v0/export", handleExportFunc)
|
||||
m.HandleFunc("/rest/v0/store/{uuid}", handleRemoteStoreFunc)
|
||||
}
|
||||
|
||||
// debugging
|
||||
m.Handle("/debug/metrics", metrics.Exporter())
|
||||
m.Handle("/debug/pprof-set/block", handleFractionOpt("BlockProfileRate", runtime.SetBlockProfileRate))
|
||||
@ -191,61 +157,6 @@ func MinerHandler(a api.StorageMiner, permissioned bool) (http.Handler, error) {
|
||||
return rootMux, nil
|
||||
}
|
||||
|
||||
func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "PUT" {
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
if !auth.HasPerm(r.Context(), nil, api.PermWrite) {
|
||||
w.WriteHeader(401)
|
||||
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"})
|
||||
return
|
||||
}
|
||||
|
||||
c, err := a.ClientImportLocal(r.Context(), r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(500)
|
||||
_ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()})
|
||||
return
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
err = json.NewEncoder(w).Encode(struct{ Cid cid.Cid }{c})
|
||||
if err != nil {
|
||||
rpclog.Errorf("/rest/v0/import: Writing response failed: %+v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleExport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "GET" {
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
if !auth.HasPerm(r.Context(), nil, api.PermWrite) {
|
||||
w.WriteHeader(401)
|
||||
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"})
|
||||
return
|
||||
}
|
||||
|
||||
var eref api.ExportRef
|
||||
if err := json.Unmarshal([]byte(r.FormValue("export")), &eref); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
car := r.FormValue("car") == "true"
|
||||
|
||||
err := a.ClientExportInto(r.Context(), eref, car, client.ExportDest{Writer: w})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleFractionOpt(name string, setter func(int)) http.HandlerFunc {
|
||||
return func(rw http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
@ -272,34 +183,3 @@ func handleFractionOpt(name string, setter func(int)) http.HandlerFunc {
|
||||
setter(fr)
|
||||
}
|
||||
}
|
||||
|
||||
var upgrader = websocket.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool {
|
||||
return true
|
||||
},
|
||||
}
|
||||
|
||||
func handleRemoteStore(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
id, err := uuid.Parse(vars["uuid"])
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("parse uuid: %s", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
c, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
nstore := bstore.NewNetworkStoreWS(c)
|
||||
if err := a.ApiBlockstoreAccessor.RegisterApiStore(id, nstore); err != nil {
|
||||
log.Errorw("registering api bstore", "error", err)
|
||||
_ = c.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user