Merge pull request #7908 from filecoin-project/release/v1.13.2

build: release: v1.13.2
This commit is contained in:
Jiaying Wang 2022-01-09 23:12:14 -05:00 committed by GitHub
commit f9db71ee12
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
134 changed files with 7281 additions and 2171 deletions

View File

@ -805,6 +805,11 @@ workflows:
suite: itest-deals_padding
target: "./itests/deals_padding_test.go"
- test:
name: test-itest-deals_partial_retrieval_dm-level
suite: itest-deals_partial_retrieval_dm-level
target: "./itests/deals_partial_retrieval_dm-level_test.go"
- test:
name: test-itest-deals_partial_retrieval
suite: itest-deals_partial_retrieval
@ -940,7 +945,7 @@ workflows:
codecov-upload: false
suite: conformance-bleeding-edge
target: "./conformance"
vectors-branch: master
vectors-branch: specs-actors-v7
- trigger-testplans:
filters:
branches:
@ -971,19 +976,10 @@ workflows:
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-appimage:
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish:
requires:
- build-all
- build-macos
- build-appimage
filters:
branches:
ignore:

View File

@ -785,7 +785,7 @@ workflows:
codecov-upload: false
suite: conformance-bleeding-edge
target: "./conformance"
vectors-branch: master
vectors-branch: specs-actors-v7
- trigger-testplans:
filters:
branches:
@ -816,19 +816,10 @@ workflows:
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-appimage:
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish:
requires:
- build-all
- build-macos
- build-appimage
filters:
branches:
ignore:

View File

@ -1,5 +1,107 @@
# Lotus changelog
# v1.13.2 / 2022-01-09
Lotus v1.13.2 is a *highly recommended* feature release with remarkable retrieval improvements, new features like
worker management, schedule enhancements and so on.
## Highlights
- 🚀🚀🚀Improve retrieval deal experience
- Testing result with MinerX.3 shows the retrieval deal success rate has increased dramatically with faster transfer
speed, you can join or follow along furthur performance testings [here](https://github.com/filecoin-project/lotus/discussions/7874). We recommend application developers to integrate with the new
retrieval APIs to provide a better client experience.
- 🌟🌟🌟 Reduce retrieval Time-To-First-Byte over 100x ([#7693](https://github.com/filecoin-project/lotus/pull/7693))
- This change makes most free, small retrievals sub-second
- 🌟🌟🌟 Partial retrieval ux improvements ([#7610](https://github.com/filecoin-project/lotus/pull/7610))
- New retrieval commands for clients:
- `lotus client ls`: retrieve and list desired object links
- `lotus client cat`: retrieve and print the data from the network
- 🌟🌟 The monolith `ClientRetrieve` method was broken into:
- `ClientRetrieve` which retrieves data into the local repo (or into an IPFS node if ipfs integration is enabled)
- `ClientRetrieveWait` which will wait for the retrieval to complete
- `ClientExport` which will export data from the local node
- Note: this change only applies to v1 API. v0 API remains unchanged.
- 🌟 Support for full ipld selectors was added (for example making it possible to only retrieve list of directories in a deal, without fetching any file data)
- To learn more, see [here](https://github.com/filecoin-project/lotus/blob/0523c946f984b22b3f5de8cc3003cc791389527e/api/types.go#L230-L264)
- 🚀🚀 Sealing scheduler enhancements ([#7703](https://github.com/filecoin-project/lotus/pull/7703),
[#7269](https://github.com/filecoin-project/lotus/pull/7269)), [#7714](https://github.com/filecoin-project/lotus/pull/7714)
- Workers are now aware of cgroup memory limits
- Multiple tasks which use a GPU can be scheduled on a single worker
- Workers can override default resource table through env vars
- Default value list: https://gist.github.com/magik6k/c0e1c7cd73c1241a9acabc30bf469a43
- 🚀🚀 Sector storage groups ([#7453](https://github.com/filecoin-project/lotus/pull/7453))
- Storage groups allow for better control of data flow between workers, for example, it makes it possible to define that data from PC1 on a given worker has to have it's PC2 step executed on the same worker
- To set it up, follow the instructions under the `Sector Storage Group` section [here](https://lotus.filecoin.io/docs/storage-providers/seal-workers/#lotus-worker-co-location)
## New Features
- Add RLE dump code ([#7691](https://github.com/filecoin-project/lotus/pull/7691))
- Shed: Add a util to list miner faults ([#7605](https://github.com/filecoin-project/lotus/pull/7605))
- lotus-shed msg: Decode submessages/msig proposals ([#7639](https://github.com/filecoin-project/lotus/pull/7639))
- CLI: Add a lotus multisig cancel command ([#7645](https://github.com/filecoin-project/lotus/pull/7645))
- shed: simple wallet balancer util ([#7414](https://github.com/filecoin-project/lotus/pull/7414))
- balancing token balance between multiple accounts
## Improvements
- Add verbose mode to `lotus-miner pieces list-cids` ([#7699](https://github.com/filecoin-project/lotus/pull/7699))
- retrieval: Only output matching nodes, MatchPath dagspec ([#7706](https://github.com/filecoin-project/lotus/pull/7706))
- Cleanup partial retrieval codepaths ( zero functional changes ) ([#7688](https://github.com/filecoin-project/lotus/pull/7688))
- storage: Use 1M buffers for Tar transfers ([#7681](https://github.com/filecoin-project/lotus/pull/7681))
- Chore/dm level tests plus merkle proof cars ([#7673](https://github.com/filecoin-project/lotus/pull/7673))
- Shed: Add a util to create miners more easily ([#7595](https://github.com/filecoin-project/lotus/pull/7595))
- add timeout flag to wait-api command ([#7592](https://github.com/filecoin-project/lotus/pull/7592))
- add log for restart windows post scheduler ([#7613](https://github.com/filecoin-project/lotus/pull/7613))
- remove jaeger envvars ([#7631](https://github.com/filecoin-project/lotus/pull/7631))
- remove api and jaeger env from docker file ([#7624](https://github.com/filecoin-project/lotus/pull/7624))
- Wdpost worker: Reduce challenge confidence to 1 epoch ([#7572](https://github.com/filecoin-project/lotus/pull/7572))
- add additional methods to lotus gateway ([#7644](https://github.com/filecoin-project/lotus/pull/7644))
- Add caches to lotus-stats and splitcode ([#7329](https://github.com/filecoin-project/lotus/pull/7329))
- remote store: Remove debug printf ([#7664](https://github.com/filecoin-project/lotus/pull/7664))
- docsgen-cli: Handle commands with no description correctly ([#7659](https://github.com/filecoin-project/lotus/pull/7659))
## Bug Fixes
- fix docker logic error ([#7709](https://github.com/filecoin-project/lotus/pull/7709))
- add missing NodeType tag ([#7559](https://github.com/filecoin-project/lotus/pull/7559))
- checkCommit should return SectorCommitFailed ([#7555](https://github.com/filecoin-project/lotus/pull/7555))
- ffiwrapper: Validate PC2 by calling C1 with random seeds ([#7710](https://github.com/filecoin-project/lotus/pull/7710))
## Dependency Updates
- Update go-graphsync v0.10.6 ([#7708](https://github.com/filecoin-project/lotus/pull/7708))
- update go-libp2p-pubsub to v0.5.6 ([#7581](https://github.com/filecoin-project/lotus/pull/7581))
- Update go-state-types ([#7591](https://github.com/filecoin-project/lotus/pull/7591))
- disable mplex stream muxer ([#7689](https://github.com/filecoin-project/lotus/pull/7689))
- Bump ws from 5.2.2 to 5.2.3 in /lotuspond/front ([#7660](https://github.com/filecoin-project/lotus/pull/7660))
- Bump color-string from 1.5.3 to 1.6.0 in /lotuspond/front ([#7658](https://github.com/filecoin-project/lotus/pull/7658))
- Bump postcss from 7.0.17 to 7.0.39 in /lotuspond/front ([#7657](https://github.com/filecoin-project/lotus/pull/7657))
- Bump path-parse from 1.0.6 to 1.0.7 in /lotuspond/front ([#7656](https://github.com/filecoin-project/lotus/pull/7656))
- Bump tmpl from 1.0.4 to 1.0.5 in /lotuspond/front ([#7655](https://github.com/filecoin-project/lotus/pull/7655))
- Bump url-parse from 1.4.7 to 1.5.3 in /lotuspond/front ([#7654](https://github.com/filecoin-project/lotus/pull/7654))
- github.com/filecoin-project/go-state-types (v0.1.1-0.20210915140513-d354ccf10379 -> v0.1.1):
## Others
- Update archive script ([#7690](https://github.com/filecoin-project/lotus/pull/7690))
## Contributors
| Contributor | Commits | Lines ± | Files Changed |
|-------------|---------|---------|---------------|
| @magik6k | 89 | +5200/-1818 | 232 |
| Travis Person | 5 | +1473/-953 | 38 |
| @arajasek | 6 | +550/-38 | 19 |
| @clinta | 4 | +393/-123 | 26 |
| @ribasushi | 3 | +334/-68 | 7 |
| @jennijuju| 13 | +197/-120 | 67 |
| @Kubuxu | 10 | +153/-30 | 10 |
| @coryschwartz | 6 | +18/-26 | 6 |
| Marten Seemann | 2 | +6/-34 | 5 |
| @vyzo | 1 | +3/-3 | 2 |
| @hannahhoward | 1 | +3/-3 | 2 |
| @zenground0 | 2 | +2/-2 | 2 |
| @yaohcn | 2 | +2/-2 | 2 |
| @jennijuju | 1 | +1/-1 | 1 |
| @hunjixin | 1 | +1/-0 | 1 |
# v1.13.1 / 2021-11-26
This is an optional Lotus v1.13.1 release.

View File

@ -36,7 +36,7 @@ WORKDIR /opt/filecoin
ARG RUSTFLAGS=""
ARG GOFLAGS=""
RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway
RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway lotus-stats
FROM ubuntu:20.04 AS base
@ -66,8 +66,6 @@ COPY scripts/docker-lotus-entrypoint.sh /
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_PATH /var/lib/lotus
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
ENV DOCKER_LOTUS_IMPORT_WALLET ""
@ -92,8 +90,6 @@ MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/
ENV WALLET_PATH /var/lib/lotus-wallet
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
RUN mkdir /var/lib/lotus-wallet
RUN chown fc: /var/lib/lotus-wallet
@ -114,10 +110,6 @@ MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http
USER fc
EXPOSE 1234
@ -135,11 +127,7 @@ COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/
COPY scripts/docker-lotus-miner-entrypoint.sh /
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
ENV DOCKER_LOTUS_MINER_INIT true
RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
@ -163,10 +151,7 @@ MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
RUN mkdir /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-worker
@ -186,16 +171,11 @@ CMD ["-help"]
from base as lotus-all-in-one
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
ENV LOTUS_PATH /var/lib/lotus
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http
ENV WALLET_PATH /var/lib/lotus-wallet
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
ENV DOCKER_LOTUS_MINER_INIT true
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
@ -203,6 +183,7 @@ COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-stats /usr/local/bin/
RUN mkdir /var/tmp/filecoin-proof-parameters
RUN mkdir /var/lib/lotus

View File

@ -7,7 +7,6 @@ import (
"time"
"github.com/ipfs/go-cid"
textselector "github.com/ipld/go-ipld-selector-text-lite"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/go-address"
@ -28,7 +27,6 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo/imports"
)
@ -352,10 +350,11 @@ type FullNode interface {
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error //perm:admin
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
// of status updates.
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
ClientRetrieve(ctx context.Context, params RetrievalOrder) (*RestrievalRes, error) //perm:admin
// ClientRetrieveWait waits for retrieval to be complete
ClientRetrieveWait(ctx context.Context, deal retrievalmarket.DealID) error //perm:admin
// ClientExport exports a file stored in the local filestore to a system file
ClientExport(ctx context.Context, exportRef ExportRef, fileRef FileRef) error //perm:admin
// ClientListRetrievals returns information about retrievals made by the local client
ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write
// ClientGetRetrievalUpdates returns status of updated retrieval deals
@ -630,10 +629,14 @@ type FullNode interface {
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
// MsigCancel cancels a previously-proposed multisig message
// It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
MsigCancel(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign
// MsigCancel cancels a previously-proposed multisig message
// It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
MsigCancelTxnHash(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
// MsigAddPropose proposes adding a signer in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>,
@ -930,15 +933,14 @@ type MarketDeal struct {
}
type RetrievalOrder struct {
// TODO: make this less unixfs specific
Root cid.Cid
Piece *cid.Cid
DatamodelPathSelector *textselector.Expression
Size uint64
Root cid.Cid
Piece *cid.Cid
DataSelector *Selector
// todo: Size/Total are only used for calculating price per byte; we should let users just pass that
Size uint64
Total types.BigInt
FromLocalCAR string // if specified, get data from a local CARv2 file.
// TODO: support offset
Total types.BigInt
UnsealPrice types.BigInt
PaymentInterval uint64
PaymentIntervalIncrease uint64

View File

@ -31,6 +31,8 @@ import (
type Gateway interface {
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainHead(ctx context.Context) (*types.TipSet, error)
ChainGetParentMessages(context.Context, cid.Cid) ([]Message, error)
ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*HeadChange, error)
@ -39,6 +41,7 @@ type Gateway interface {
ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
ChainNotify(context.Context) (<-chan []*HeadChange, error)
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
ChainGetGenesis(context.Context) (*types.TipSet, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)

View File

@ -91,6 +91,8 @@ func init() {
storeIDExample := imports.ID(50)
textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash")
apiSelExample := api.Selector("Links/21/Hash/Links/42/Hash")
clientEvent := retrievalmarket.ClientEventDealAccepted
addExample(bitfield.NewFromSet([]uint64{5}))
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
@ -122,9 +124,12 @@ func init() {
addExample(datatransfer.Ongoing)
addExample(storeIDExample)
addExample(&storeIDExample)
addExample(clientEvent)
addExample(&clientEvent)
addExample(retrievalmarket.ClientEventDealAccepted)
addExample(retrievalmarket.DealStatusNew)
addExample(&textSelExample)
addExample(&apiSelExample)
addExample(network.ReachabilityPublic)
addExample(build.NewestNetworkVersion)
addExample(map[string]int{"name": 42})
@ -226,16 +231,18 @@ func init() {
Hostname: "host",
Resources: storiface.WorkerResources{
MemPhysical: 256 << 30,
MemUsed: 2 << 30,
MemSwap: 120 << 30,
MemReserved: 2 << 30,
MemSwapUsed: 2 << 30,
CPUs: 64,
GPUs: []string{"aGPU 1337"},
Resources: storiface.ResourceTable,
},
},
Enabled: true,
MemUsedMin: 0,
MemUsedMax: 0,
GpuUsed: false,
GpuUsed: 0,
CpuUse: 0,
},
})
@ -281,6 +288,7 @@ func init() {
State: "ShardStateAvailable",
Error: "<error>",
})
addExample(storiface.ResourceTable)
}
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {

View File

@ -25,7 +25,6 @@ import (
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
types "github.com/filecoin-project/lotus/chain/types"
alerting "github.com/filecoin-project/lotus/journal/alerting"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
imports "github.com/filecoin-project/lotus/node/repo/imports"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
@ -537,6 +536,20 @@ func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomo
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1)
}
// ClientExport mocks base method.
func (m *MockFullNode) ClientExport(arg0 context.Context, arg1 api.ExportRef, arg2 api.FileRef) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ClientExport", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ClientExport indicates an expected call of ClientExport.
func (mr *MockFullNodeMockRecorder) ClientExport(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientExport", reflect.TypeOf((*MockFullNode)(nil).ClientExport), arg0, arg1, arg2)
}
// ClientFindData mocks base method.
func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) {
m.ctrl.T.Helper()
@ -775,17 +788,18 @@ func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2,
}
// ClientRetrieve mocks base method.
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error {
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder) (*api.RestrievalRes, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1)
ret0, _ := ret[0].(*api.RestrievalRes)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ClientRetrieve indicates an expected call of ClientRetrieve.
func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call {
func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1)
}
// ClientRetrieveTryRestartInsufficientFunds mocks base method.
@ -802,19 +816,18 @@ func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(ar
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1)
}
// ClientRetrieveWithEvents mocks base method.
func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
// ClientRetrieveWait mocks base method.
func (m *MockFullNode) ClientRetrieveWait(arg0 context.Context, arg1 retrievalmarket.DealID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2)
ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent)
ret1, _ := ret[1].(error)
return ret0, ret1
ret := m.ctrl.Call(m, "ClientRetrieveWait", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents.
func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call {
// ClientRetrieveWait indicates an expected call of ClientRetrieveWait.
func (mr *MockFullNodeMockRecorder) ClientRetrieveWait(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWait", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWait), arg0, arg1)
}
// ClientStartDeal mocks base method.
@ -1428,18 +1441,33 @@ func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, a
}
// MsigCancel mocks base method.
func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) {
func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (*api.MessagePrototype, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*api.MessagePrototype)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// MsigCancel indicates an expected call of MsigCancel.
func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call {
func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3)
}
// MsigCancelTxnHash mocks base method.
func (m *MockFullNode) MsigCancelTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MsigCancelTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
ret0, _ := ret[0].(*api.MessagePrototype)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// MsigCancelTxnHash indicates an expected call of MsigCancelTxnHash.
func (mr *MockFullNodeMockRecorder) MsigCancelTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancelTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigCancelTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
}
// MsigCreate mocks base method.

View File

@ -28,7 +28,6 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/lotus/journal/alerting"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo/imports"
"github.com/filecoin-project/specs-storage/storage"
@ -162,6 +161,8 @@ type FullNodeStruct struct {
ClientDealSize func(p0 context.Context, p1 cid.Cid) (DataSize, error) `perm:"read"`
ClientExport func(p0 context.Context, p1 ExportRef, p2 FileRef) error `perm:"admin"`
ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) `perm:"read"`
ClientGenCar func(p0 context.Context, p1 FileRef, p2 string) error `perm:"write"`
@ -194,11 +195,11 @@ type FullNodeStruct struct {
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error `perm:"admin"`
ClientRetrieve func(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) `perm:"admin"`
ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
ClientRetrieveWait func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"admin"`
ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"`
@ -270,7 +271,9 @@ type FullNodeStruct struct {
MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) `perm:"sign"`
MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"`
MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) `perm:"sign"`
MsigCancelTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"`
MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) `perm:"sign"`
@ -478,8 +481,14 @@ type GatewayStruct struct {
Internal struct {
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) ``
ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) ``
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]Message, error) ``
ChainGetParentReceipts func(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) ``
ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) ``
ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) ``
@ -1349,6 +1358,17 @@ func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize,
return *new(DataSize), ErrNotSupported
}
func (s *FullNodeStruct) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error {
if s.Internal.ClientExport == nil {
return ErrNotSupported
}
return s.Internal.ClientExport(p0, p1, p2)
}
func (s *FullNodeStub) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error {
return ErrNotSupported
}
func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) {
if s.Internal.ClientFindData == nil {
return *new([]QueryOffer), ErrNotSupported
@ -1525,15 +1545,15 @@ func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatran
return ErrNotSupported
}
func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error {
func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) {
if s.Internal.ClientRetrieve == nil {
return ErrNotSupported
return nil, ErrNotSupported
}
return s.Internal.ClientRetrieve(p0, p1, p2)
return s.Internal.ClientRetrieve(p0, p1)
}
func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error {
return ErrNotSupported
func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
@ -1547,15 +1567,15 @@ func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Cont
return ErrNotSupported
}
func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) {
if s.Internal.ClientRetrieveWithEvents == nil {
return nil, ErrNotSupported
func (s *FullNodeStruct) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error {
if s.Internal.ClientRetrieveWait == nil {
return ErrNotSupported
}
return s.Internal.ClientRetrieveWithEvents(p0, p1, p2)
return s.Internal.ClientRetrieveWait(p0, p1)
}
func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) {
return nil, ErrNotSupported
func (s *FullNodeStub) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error {
return ErrNotSupported
}
func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
@ -1943,14 +1963,25 @@ func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address
return nil, ErrNotSupported
}
func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) {
if s.Internal.MsigCancel == nil {
return nil, ErrNotSupported
}
return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7)
return s.Internal.MsigCancel(p0, p1, p2, p3)
}
func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
if s.Internal.MsigCancelTxnHash == nil {
return nil, ErrNotSupported
}
return s.Internal.MsigCancelTxnHash(p0, p1, p2, p3, p4, p5, p6, p7)
}
func (s *FullNodeStub) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
return nil, ErrNotSupported
}
@ -3032,6 +3063,17 @@ func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*Bl
return nil, ErrNotSupported
}
func (s *GatewayStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) {
if s.Internal.ChainGetGenesis == nil {
return nil, ErrNotSupported
}
return s.Internal.ChainGetGenesis(p0)
}
func (s *GatewayStub) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) {
if s.Internal.ChainGetMessage == nil {
return nil, ErrNotSupported
@ -3043,6 +3085,28 @@ func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Me
return nil, ErrNotSupported
}
func (s *GatewayStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) {
if s.Internal.ChainGetParentMessages == nil {
return *new([]Message), ErrNotSupported
}
return s.Internal.ChainGetParentMessages(p0, p1)
}
func (s *GatewayStub) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) {
return *new([]Message), ErrNotSupported
}
func (s *GatewayStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) {
if s.Internal.ChainGetParentReceipts == nil {
return *new([]*types.MessageReceipt), ErrNotSupported
}
return s.Internal.ChainGetParentReceipts(p0, p1)
}
func (s *GatewayStub) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) {
return *new([]*types.MessageReceipt), ErrNotSupported
}
func (s *GatewayStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) {
if s.Internal.ChainGetPath == nil {
return *new([]*HeadChange), ErrNotSupported

View File

@ -5,11 +5,10 @@ import (
"fmt"
"time"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/lotus/chain/types"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
@ -194,4 +193,47 @@ type RetrievalInfo struct {
TransferChannelID *datatransfer.ChannelID
DataTransfer *DataTransferChannel
// optional event if part of ClientGetRetrievalUpdates
Event *retrievalmarket.ClientEvent
}
type RestrievalRes struct {
DealID retrievalmarket.DealID
}
// Selector specifies ipld selector string
// - if the string starts with '{', it's interpreted as json selector string
// see https://ipld.io/specs/selectors/ and https://ipld.io/specs/selectors/fixtures/selector-fixtures-1/
// - otherwise the string is interpreted as ipld-selector-text-lite (simple ipld path)
// see https://github.com/ipld/go-ipld-selector-text-lite
type Selector string
type DagSpec struct {
// DataSelector matches data to be retrieved
// - when using textselector, the path specifies subtree
// - the matched graph must have a single root
DataSelector *Selector
// ExportMerkleProof is applicable only when exporting to a CAR file via a path textselector
// When true, in addition to the selection target, the resulting CAR will contain every block along the
// path back to, and including the original root
// When false the resulting CAR contains only the blocks of the target subdag
ExportMerkleProof bool
}
type ExportRef struct {
Root cid.Cid
// DAGs array specifies a list of DAGs to export
// - If exporting into unixfs files, only one DAG is supported, DataSelector is only used to find the targeted root node
// - If exporting into a car file
// - When exactly one text-path DataSelector is specified exports the subgraph and its full merkle-path from the original root
// - Otherwise ( multiple paths and/or JSON selector specs) determines each individual subroot and exports the subtrees as a multi-root car
// - When not specified defaults to a single DAG:
// - Data - the entire DAG: `{"R":{"l":{"none":{}},":>":{"a":{">":{"@":{}}}}}}`
DAGs []DagSpec
FromLocalCAR string // if specified, get data from a local CARv2 file.
DealID retrievalmarket.DealID
}

View File

@ -12,6 +12,7 @@ import (
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid"
textselector "github.com/ipld/go-ipld-selector-text-lite"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/lotus/api"
@ -325,10 +326,10 @@ type FullNode interface {
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error //perm:admin
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error //perm:admin
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
// of status updates.
ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
// ClientListRetrievals returns information about retrievals made by the local client
ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write
@ -714,3 +715,37 @@ type FullNode interface {
// the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error //perm:admin
}
func OfferOrder(o api.QueryOffer, client address.Address) RetrievalOrder {
return RetrievalOrder{
Root: o.Root,
Piece: o.Piece,
Size: o.Size,
Total: o.MinPrice,
UnsealPrice: o.UnsealPrice,
PaymentInterval: o.PaymentInterval,
PaymentIntervalIncrease: o.PaymentIntervalIncrease,
Client: client,
Miner: o.Miner,
MinerPeer: &o.MinerPeer,
}
}
type RetrievalOrder struct {
// TODO: make this less unixfs specific
Root cid.Cid
Piece *cid.Cid
DatamodelPathSelector *textselector.Expression
Size uint64
FromLocalCAR string // if specified, get data from a local CARv2 file.
// TODO: support offset
Total types.BigInt
UnsealPrice types.BigInt
PaymentInterval uint64
PaymentIntervalIncrease uint64
Client address.Address
Miner address.Address
MinerPeer *retrievalmarket.RetrievalPeer
}

View File

@ -125,11 +125,11 @@ type FullNodeStruct struct {
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
ClientRetrieve func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error `perm:"admin"`
ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error `perm:"admin"`
ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
ClientRetrieveWithEvents func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
@ -965,14 +965,14 @@ func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatran
return ErrNotSupported
}
func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error {
func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error {
if s.Internal.ClientRetrieve == nil {
return ErrNotSupported
}
return s.Internal.ClientRetrieve(p0, p1, p2)
}
func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error {
func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error {
return ErrNotSupported
}
@ -987,14 +987,14 @@ func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Cont
return ErrNotSupported
}
func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
if s.Internal.ClientRetrieveWithEvents == nil {
return nil, ErrNotSupported
}
return s.Internal.ClientRetrieveWithEvents(p0, p1, p2)
}
func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
return nil, ErrNotSupported
}

View File

@ -21,6 +21,7 @@ import (
network "github.com/filecoin-project/go-state-types/network"
api "github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
v0api "github.com/filecoin-project/lotus/api/v0api"
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
types "github.com/filecoin-project/lotus/chain/types"
alerting "github.com/filecoin-project/lotus/journal/alerting"
@ -760,7 +761,7 @@ func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2,
}
// ClientRetrieve mocks base method.
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error {
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
@ -788,7 +789,7 @@ func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(ar
}
// ClientRetrieveWithEvents mocks base method.
func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2)
ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent)

View File

@ -3,7 +3,10 @@ package v0api
import (
"context"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/types"
@ -108,7 +111,7 @@ func (w *WrapperV1Full) MsigApproveTxnHash(ctx context.Context, msig address.Add
}
func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
p, err := w.FullNode.MsigCancel(ctx, msig, txID, to, amt, src, method, params)
p, err := w.FullNode.MsigCancelTxnHash(ctx, msig, txID, to, amt, src, method, params)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
@ -194,4 +197,144 @@ func (w *WrapperV1Full) ChainGetRandomnessFromBeacon(ctx context.Context, tsk ty
return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk)
}
func (w *WrapperV1Full) ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error {
events := make(chan marketevents.RetrievalEvent)
go w.clientRetrieve(ctx, order, ref, events)
for {
select {
case evt, ok := <-events:
if !ok { // done successfully
return nil
}
if evt.Err != "" {
return xerrors.Errorf("retrieval failed: %s", evt.Err)
}
case <-ctx.Done():
return xerrors.Errorf("retrieval timed out")
}
}
}
func (w *WrapperV1Full) ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
events := make(chan marketevents.RetrievalEvent)
go w.clientRetrieve(ctx, order, ref, events)
return events, nil
}
func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, subscribeEvents <-chan api.RetrievalInfo, events chan marketevents.RetrievalEvent) error {
for {
var subscribeEvent api.RetrievalInfo
var evt retrievalmarket.ClientEvent
select {
case <-ctx.Done():
return xerrors.New("Retrieval Timed Out")
case subscribeEvent = <-subscribeEvents:
if subscribeEvent.ID != dealID {
// we can't check the deal ID ahead of time because:
// 1. We need to subscribe before retrieving.
// 2. We won't know the deal ID until after retrieving.
continue
}
if subscribeEvent.Event != nil {
evt = *subscribeEvent.Event
}
}
select {
case <-ctx.Done():
return xerrors.New("Retrieval Timed Out")
case events <- marketevents.RetrievalEvent{
Event: evt,
Status: subscribeEvent.Status,
BytesReceived: subscribeEvent.BytesReceived,
FundsSpent: subscribeEvent.TotalPaid,
}:
}
switch subscribeEvent.Status {
case retrievalmarket.DealStatusCompleted:
return nil
case retrievalmarket.DealStatusRejected:
return xerrors.Errorf("Retrieval Proposal Rejected: %s", subscribeEvent.Message)
case
retrievalmarket.DealStatusDealNotFound,
retrievalmarket.DealStatusErrored:
return xerrors.Errorf("Retrieval Error: %s", subscribeEvent.Message)
}
}
}
func (w *WrapperV1Full) clientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) {
defer close(events)
finish := func(e error) {
if e != nil {
events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()}
}
}
var dealID retrievalmarket.DealID
if order.FromLocalCAR == "" {
// Subscribe to events before retrieving to avoid losing events.
subscribeCtx, cancel := context.WithCancel(ctx)
defer cancel()
retrievalEvents, err := w.ClientGetRetrievalUpdates(subscribeCtx)
if err != nil {
finish(xerrors.Errorf("GetRetrievalUpdates failed: %w", err))
return
}
retrievalRes, err := w.FullNode.ClientRetrieve(ctx, api.RetrievalOrder{
Root: order.Root,
Piece: order.Piece,
Size: order.Size,
Total: order.Total,
UnsealPrice: order.UnsealPrice,
PaymentInterval: order.PaymentInterval,
PaymentIntervalIncrease: order.PaymentIntervalIncrease,
Client: order.Client,
Miner: order.Miner,
MinerPeer: order.MinerPeer,
})
if err != nil {
finish(xerrors.Errorf("Retrieve failed: %w", err))
return
}
dealID = retrievalRes.DealID
err = readSubscribeEvents(ctx, retrievalRes.DealID, retrievalEvents, events)
if err != nil {
finish(xerrors.Errorf("Retrieve: %w", err))
return
}
}
// If ref is nil, it only fetches the data into the configured blockstore.
if ref == nil {
finish(nil)
return
}
eref := api.ExportRef{
Root: order.Root,
FromLocalCAR: order.FromLocalCAR,
DealID: dealID,
}
if order.DatamodelPathSelector != nil {
s := api.Selector(*order.DatamodelPathSelector)
eref.DAGs = append(eref.DAGs, api.DagSpec{
DataSelector: &s,
ExportMerkleProof: true,
})
}
finish(w.ClientExport(ctx, eref, *ref))
}
var _ FullNode = &WrapperV1Full{}

View File

@ -58,7 +58,7 @@ var (
FullAPIVersion1 = newVer(2, 1, 0)
MinerAPIVersion0 = newVer(1, 2, 0)
WorkerAPIVersion0 = newVer(1, 1, 0)
WorkerAPIVersion0 = newVer(1, 5, 0)
)
//nolint:varcheck,deadcode

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -37,7 +37,7 @@ func BuildTypeString() string {
}
// BuildVersion is the local build version
const BuildVersion = "1.13.1"
const BuildVersion = "1.13.2"
func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {

View File

@ -67,3 +67,22 @@ func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.
return 0, xerrors.Errorf("unsupported network version")
}
// WindowPoStProofTypeFromSectorSize returns preferred post proof type for creating
// new miner actors and new sectors
func WindowPoStProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredPoStProof, error) {
switch ssize {
case 2 << 10:
return abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, nil
case 8 << 20:
return abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, nil
case 512 << 20:
return abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, nil
case 32 << 30:
return abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, nil
case 64 << 30:
return abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, nil
default:
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
}
}

View File

@ -26,7 +26,6 @@ import (
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-cidutil/cidenc"
textselector "github.com/ipld/go-ipld-selector-text-lite"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multibase"
"github.com/urfave/cli/v2"
@ -94,6 +93,8 @@ var clientCmd = &cli.Command{
WithCategory("data", clientStat),
WithCategory("retrieval", clientFindCmd),
WithCategory("retrieval", clientRetrieveCmd),
WithCategory("retrieval", clientRetrieveCatCmd),
WithCategory("retrieval", clientRetrieveLsCmd),
WithCategory("retrieval", clientCancelRetrievalDealCmd),
WithCategory("retrieval", clientListRetrievalsCmd),
WithCategory("util", clientCommPCmd),
@ -1029,209 +1030,6 @@ var clientFindCmd = &cli.Command{
},
}
const DefaultMaxRetrievePrice = "0.01"
var clientRetrieveCmd = &cli.Command{
Name: "retrieve",
Usage: "Retrieve data from network",
ArgsUsage: "[dataCid outputPath]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "from",
Usage: "address to send transactions from",
},
&cli.BoolFlag{
Name: "car",
Usage: "export to a car file instead of a regular file",
},
&cli.StringFlag{
Name: "miner",
Usage: "miner address for retrieval, if not present it'll use local discovery",
},
&cli.StringFlag{
Name: "datamodel-path-selector",
Usage: "a rudimentary (DM-level-only) text-path selector, allowing for sub-selection within a deal",
},
&cli.StringFlag{
Name: "maxPrice",
Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice),
},
&cli.StringFlag{
Name: "pieceCid",
Usage: "require data to be retrieved from a specific Piece CID",
},
&cli.BoolFlag{
Name: "allow-local",
},
},
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 2 {
return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments"))
}
fapi, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
var payer address.Address
if cctx.String("from") != "" {
payer, err = address.NewFromString(cctx.String("from"))
} else {
payer, err = fapi.WalletDefaultAddress(ctx)
}
if err != nil {
return err
}
file, err := cid.Parse(cctx.Args().Get(0))
if err != nil {
return err
}
var pieceCid *cid.Cid
if cctx.String("pieceCid") != "" {
parsed, err := cid.Parse(cctx.String("pieceCid"))
if err != nil {
return err
}
pieceCid = &parsed
}
var order *lapi.RetrievalOrder
if cctx.Bool("allow-local") {
imports, err := fapi.ClientListImports(ctx)
if err != nil {
return err
}
for _, i := range imports {
if i.Root != nil && i.Root.Equals(file) {
order = &lapi.RetrievalOrder{
Root: file,
FromLocalCAR: i.CARPath,
Total: big.Zero(),
UnsealPrice: big.Zero(),
}
break
}
}
}
if order == nil {
var offer api.QueryOffer
minerStrAddr := cctx.String("miner")
if minerStrAddr == "" { // Local discovery
offers, err := fapi.ClientFindData(ctx, file, pieceCid)
var cleaned []api.QueryOffer
// filter out offers that errored
for _, o := range offers {
if o.Err == "" {
cleaned = append(cleaned, o)
}
}
offers = cleaned
// sort by price low to high
sort.Slice(offers, func(i, j int) bool {
return offers[i].MinPrice.LessThan(offers[j].MinPrice)
})
if err != nil {
return err
}
// TODO: parse offer strings from `client find`, make this smarter
if len(offers) < 1 {
fmt.Println("Failed to find file")
return nil
}
offer = offers[0]
} else { // Directed retrieval
minerAddr, err := address.NewFromString(minerStrAddr)
if err != nil {
return err
}
offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid)
if err != nil {
return err
}
}
if offer.Err != "" {
return fmt.Errorf("The received offer errored: %s", offer.Err)
}
maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice)
if cctx.String("maxPrice") != "" {
maxPrice, err = types.ParseFIL(cctx.String("maxPrice"))
if err != nil {
return xerrors.Errorf("parsing maxPrice: %w", err)
}
}
if offer.MinPrice.GreaterThan(big.Int(maxPrice)) {
return xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice)
}
o := offer.Order(payer)
order = &o
}
ref := &lapi.FileRef{
Path: cctx.Args().Get(1),
IsCAR: cctx.Bool("car"),
}
if sel := textselector.Expression(cctx.String("datamodel-path-selector")); sel != "" {
order.DatamodelPathSelector = &sel
}
updates, err := fapi.ClientRetrieveWithEvents(ctx, *order, ref)
if err != nil {
return xerrors.Errorf("error setting up retrieval: %w", err)
}
var prevStatus retrievalmarket.DealStatus
for {
select {
case evt, ok := <-updates:
if ok {
afmt.Printf("> Recv: %s, Paid %s, %s (%s)\n",
types.SizeStr(types.NewInt(evt.BytesReceived)),
types.FIL(evt.FundsSpent),
retrievalmarket.ClientEvents[evt.Event],
retrievalmarket.DealStatuses[evt.Status],
)
prevStatus = evt.Status
}
if evt.Err != "" {
return xerrors.Errorf("retrieval failed: %s", evt.Err)
}
if !ok {
if prevStatus == retrievalmarket.DealStatusCompleted {
afmt.Println("Success")
} else {
afmt.Printf("saw final deal state %s instead of expected success state DealStatusCompleted\n",
retrievalmarket.DealStatuses[prevStatus])
}
return nil
}
case <-ctx.Done():
return xerrors.Errorf("retrieval timed out")
}
}
},
}
var clientListRetrievalsCmd = &cli.Command{
Name: "list-retrievals",
Usage: "List retrieval market deals",

602
cli/client_retr.go Normal file
View File

@ -0,0 +1,602 @@
package cli
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"sort"
"strings"
"time"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
offline "github.com/ipfs/go-ipfs-exchange-offline"
"github.com/ipfs/go-merkledag"
carv2 "github.com/ipld/go-car/v2"
"github.com/ipld/go-car/v2/blockstore"
"github.com/ipld/go-ipld-prime"
"github.com/ipld/go-ipld-prime/codec/dagjson"
basicnode "github.com/ipld/go-ipld-prime/node/basic"
"github.com/ipld/go-ipld-prime/traversal"
"github.com/ipld/go-ipld-prime/traversal/selector"
"github.com/ipld/go-ipld-prime/traversal/selector/builder"
selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse"
textselector "github.com/ipld/go-ipld-selector-text-lite"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/big"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/markets/utils"
"github.com/filecoin-project/lotus/node/repo"
)
const DefaultMaxRetrievePrice = "0"
func retrieve(ctx context.Context, cctx *cli.Context, fapi lapi.FullNode, sel *lapi.Selector, printf func(string, ...interface{})) (*lapi.ExportRef, error) {
var payer address.Address
var err error
if cctx.String("from") != "" {
payer, err = address.NewFromString(cctx.String("from"))
} else {
payer, err = fapi.WalletDefaultAddress(ctx)
}
if err != nil {
return nil, err
}
file, err := cid.Parse(cctx.Args().Get(0))
if err != nil {
return nil, err
}
var pieceCid *cid.Cid
if cctx.String("pieceCid") != "" {
parsed, err := cid.Parse(cctx.String("pieceCid"))
if err != nil {
return nil, err
}
pieceCid = &parsed
}
var eref *lapi.ExportRef
if cctx.Bool("allow-local") {
imports, err := fapi.ClientListImports(ctx)
if err != nil {
return nil, err
}
for _, i := range imports {
if i.Root != nil && i.Root.Equals(file) {
eref = &lapi.ExportRef{
Root: file,
FromLocalCAR: i.CARPath,
}
break
}
}
}
// no local found, so make a retrieval
if eref == nil {
var offer lapi.QueryOffer
minerStrAddr := cctx.String("provider")
if minerStrAddr == "" { // Local discovery
offers, err := fapi.ClientFindData(ctx, file, pieceCid)
var cleaned []lapi.QueryOffer
// filter out offers that errored
for _, o := range offers {
if o.Err == "" {
cleaned = append(cleaned, o)
}
}
offers = cleaned
// sort by price low to high
sort.Slice(offers, func(i, j int) bool {
return offers[i].MinPrice.LessThan(offers[j].MinPrice)
})
if err != nil {
return nil, err
}
// TODO: parse offer strings from `client find`, make this smarter
if len(offers) < 1 {
fmt.Println("Failed to find file")
return nil, nil
}
offer = offers[0]
} else { // Directed retrieval
minerAddr, err := address.NewFromString(minerStrAddr)
if err != nil {
return nil, err
}
offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid)
if err != nil {
return nil, err
}
}
if offer.Err != "" {
return nil, fmt.Errorf("offer error: %s", offer.Err)
}
maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice)
if cctx.String("maxPrice") != "" {
maxPrice, err = types.ParseFIL(cctx.String("maxPrice"))
if err != nil {
return nil, xerrors.Errorf("parsing maxPrice: %w", err)
}
}
if offer.MinPrice.GreaterThan(big.Int(maxPrice)) {
return nil, xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice)
}
o := offer.Order(payer)
o.DataSelector = sel
subscribeEvents, err := fapi.ClientGetRetrievalUpdates(ctx)
if err != nil {
return nil, xerrors.Errorf("error setting up retrieval updates: %w", err)
}
retrievalRes, err := fapi.ClientRetrieve(ctx, o)
if err != nil {
return nil, xerrors.Errorf("error setting up retrieval: %w", err)
}
start := time.Now()
readEvents:
for {
var evt lapi.RetrievalInfo
select {
case <-ctx.Done():
return nil, xerrors.New("Retrieval Timed Out")
case evt = <-subscribeEvents:
if evt.ID != retrievalRes.DealID {
// we can't check the deal ID ahead of time because:
// 1. We need to subscribe before retrieving.
// 2. We won't know the deal ID until after retrieving.
continue
}
}
event := "New"
if evt.Event != nil {
event = retrievalmarket.ClientEvents[*evt.Event]
}
printf("Recv %s, Paid %s, %s (%s), %s\n",
types.SizeStr(types.NewInt(evt.BytesReceived)),
types.FIL(evt.TotalPaid),
strings.TrimPrefix(event, "ClientEvent"),
strings.TrimPrefix(retrievalmarket.DealStatuses[evt.Status], "DealStatus"),
time.Now().Sub(start).Truncate(time.Millisecond),
)
switch evt.Status {
case retrievalmarket.DealStatusCompleted:
break readEvents
case retrievalmarket.DealStatusRejected:
return nil, xerrors.Errorf("Retrieval Proposal Rejected: %s", evt.Message)
case
retrievalmarket.DealStatusDealNotFound,
retrievalmarket.DealStatusErrored:
return nil, xerrors.Errorf("Retrieval Error: %s", evt.Message)
}
}
eref = &lapi.ExportRef{
Root: file,
DealID: retrievalRes.DealID,
}
}
return eref, nil
}
var retrFlagsCommon = []cli.Flag{
&cli.StringFlag{
Name: "from",
Usage: "address to send transactions from",
},
&cli.StringFlag{
Name: "provider",
Usage: "provider to use for retrieval, if not present it'll use local discovery",
Aliases: []string{"miner"},
},
&cli.StringFlag{
Name: "maxPrice",
Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice),
},
&cli.StringFlag{
Name: "pieceCid",
Usage: "require data to be retrieved from a specific Piece CID",
},
&cli.BoolFlag{
Name: "allow-local",
// todo: default to true?
},
}
var clientRetrieveCmd = &cli.Command{
Name: "retrieve",
Usage: "Retrieve data from network",
ArgsUsage: "[dataCid outputPath]",
Description: `Retrieve data from the Filecoin network.
The retrieve command will attempt to find a provider make a retrieval deal with
them. In case a provider can't be found, it can be specified with the --provider
flag.
By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively
a CAR file containing the raw IPLD graph can be exported by setting the --car
flag.
Partial Retrieval:
The --data-selector flag can be used to specify a sub-graph to fetch. The
selector can be specified as either IPLD datamodel text-path selector, or IPLD
json selector.
In case of unixfs retrieval, the selector must point at a single root node, and
match the entire graph under that node.
In case of CAR retrieval, the selector must have one common "sub-root" node.
Examples:
- Retrieve a file by CID
$ lotus client retrieve Qm... my-file.txt
- Retrieve a file by CID from f0123
$ lotus client retrieve --provider f0123 Qm... my-file.txt
- Retrieve a first file from a specified directory
$ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt
`,
Flags: append([]cli.Flag{
&cli.BoolFlag{
Name: "car",
Usage: "Export to a car file instead of a regular file",
},
&cli.StringFlag{
Name: "data-selector",
Aliases: []string{"datamodel-path-selector"},
Usage: "IPLD datamodel text-path selector, or IPLD json selector",
},
&cli.BoolFlag{
Name: "car-export-merkle-proof",
Usage: "(requires --data-selector and --car) Export data-selector merkle proof",
},
}, retrFlagsCommon...),
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 2 {
return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments"))
}
if cctx.Bool("car-export-merkle-proof") {
if !cctx.Bool("car") || !cctx.IsSet("data-selector") {
return ShowHelp(cctx, fmt.Errorf("--car-export-merkle-proof requires --car and --data-selector"))
}
}
fapi, closer, err := GetFullNodeAPIV1(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
var s *lapi.Selector
if sel := lapi.Selector(cctx.String("data-selector")); sel != "" {
s = &sel
}
eref, err := retrieve(ctx, cctx, fapi, s, afmt.Printf)
if err != nil {
return err
}
if s != nil {
eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: s, ExportMerkleProof: cctx.Bool("car-export-merkle-proof")})
}
err = fapi.ClientExport(ctx, *eref, lapi.FileRef{
Path: cctx.Args().Get(1),
IsCAR: cctx.Bool("car"),
})
if err != nil {
return err
}
afmt.Println("Success")
return nil
},
}
func ClientExportStream(apiAddr string, apiAuth http.Header, eref lapi.ExportRef, car bool) (io.ReadCloser, error) {
rj, err := json.Marshal(eref)
if err != nil {
return nil, xerrors.Errorf("marshaling export ref: %w", err)
}
ma, err := multiaddr.NewMultiaddr(apiAddr)
if err == nil {
_, addr, err := manet.DialArgs(ma)
if err != nil {
return nil, err
}
// todo: make cliutil helpers for this
apiAddr = "http://" + addr
}
aa, err := url.Parse(apiAddr)
if err != nil {
return nil, xerrors.Errorf("parsing api address: %w", err)
}
switch aa.Scheme {
case "ws":
aa.Scheme = "http"
case "wss":
aa.Scheme = "https"
}
aa.Path = path.Join(aa.Path, "rest/v0/export")
req, err := http.NewRequest("GET", fmt.Sprintf("%s?car=%t&export=%s", aa, car, url.QueryEscape(string(rj))), nil)
if err != nil {
return nil, err
}
req.Header = apiAuth
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
em, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, xerrors.Errorf("reading error body: %w", err)
}
resp.Body.Close() // nolint
return nil, xerrors.Errorf("getting root car: http %d: %s", resp.StatusCode, string(em))
}
return resp.Body, nil
}
var clientRetrieveCatCmd = &cli.Command{
Name: "cat",
Usage: "Show data from network",
ArgsUsage: "[dataCid]",
Flags: append([]cli.Flag{
&cli.BoolFlag{
Name: "ipld",
Usage: "list IPLD datamodel links",
},
&cli.StringFlag{
Name: "data-selector",
Usage: "IPLD datamodel text-path selector, or IPLD json selector",
},
}, retrFlagsCommon...),
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments"))
}
ainfo, err := GetAPIInfo(cctx, repo.FullNode)
if err != nil {
return xerrors.Errorf("could not get API info: %w", err)
}
fapi, closer, err := GetFullNodeAPIV1(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
sel := lapi.Selector(cctx.String("data-selector"))
selp := &sel
if sel == "" {
selp = nil
}
eref, err := retrieve(ctx, cctx, fapi, selp, afmt.Printf)
if err != nil {
return err
}
fmt.Println() // separate retrieval events from results
if sel != "" {
eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: &sel})
}
rc, err := ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, false)
if err != nil {
return err
}
defer rc.Close() // nolint
_, err = io.Copy(os.Stdout, rc)
return err
},
}
func pathToSel(psel string, matchTraversal bool, sub builder.SelectorSpec) (lapi.Selector, error) {
rs, err := textselector.SelectorSpecFromPath(textselector.Expression(psel), matchTraversal, sub)
if err != nil {
return "", xerrors.Errorf("failed to parse path-selector: %w", err)
}
var b bytes.Buffer
if err := dagjson.Encode(rs.Node(), &b); err != nil {
return "", err
}
return lapi.Selector(b.String()), nil
}
var clientRetrieveLsCmd = &cli.Command{
Name: "ls",
Usage: "List object links",
ArgsUsage: "[dataCid]",
Flags: append([]cli.Flag{
&cli.BoolFlag{
Name: "ipld",
Usage: "list IPLD datamodel links",
},
&cli.IntFlag{
Name: "depth",
Usage: "list links recursively up to the specified depth",
Value: 1,
},
&cli.StringFlag{
Name: "data-selector",
Usage: "IPLD datamodel text-path selector, or IPLD json selector",
},
}, retrFlagsCommon...),
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments"))
}
ainfo, err := GetAPIInfo(cctx, repo.FullNode)
if err != nil {
return xerrors.Errorf("could not get API info: %w", err)
}
fapi, closer, err := GetFullNodeAPIV1(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
dataSelector := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth")))
if cctx.IsSet("data-selector") {
ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)
dataSelector, err = pathToSel(cctx.String("data-selector"), cctx.Bool("ipld"),
ssb.ExploreUnion(
ssb.Matcher(),
ssb.ExploreAll(
ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))),
)))
if err != nil {
return xerrors.Errorf("parsing datamodel path: %w", err)
}
}
eref, err := retrieve(ctx, cctx, fapi, &dataSelector, afmt.Printf)
if err != nil {
return xerrors.Errorf("retrieve: %w", err)
}
fmt.Println() // separate retrieval events from results
eref.DAGs = append(eref.DAGs, lapi.DagSpec{
DataSelector: &dataSelector,
})
rc, err := ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, true)
if err != nil {
return xerrors.Errorf("export: %w", err)
}
defer rc.Close() // nolint
var memcar bytes.Buffer
_, err = io.Copy(&memcar, rc)
if err != nil {
return err
}
cbs, err := blockstore.NewReadOnly(&bytesReaderAt{bytes.NewReader(memcar.Bytes())}, nil,
carv2.ZeroLengthSectionAsEOF(true),
blockstore.UseWholeCIDs(true))
if err != nil {
return xerrors.Errorf("opening car blockstore: %w", err)
}
roots, err := cbs.Roots()
if err != nil {
return xerrors.Errorf("getting roots: %w", err)
}
if len(roots) != 1 {
return xerrors.Errorf("expected 1 car root, got %d", len(roots))
}
dserv := merkledag.NewDAGService(blockservice.New(cbs, offline.Exchange(cbs)))
if !cctx.Bool("ipld") {
links, err := dserv.GetLinks(ctx, roots[0])
if err != nil {
return xerrors.Errorf("getting links: %w", err)
}
for _, link := range links {
fmt.Printf("%s %s\t%d\n", link.Cid, link.Name, link.Size)
}
} else {
jsel := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth")))
if cctx.IsSet("data-selector") {
ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)
jsel, err = pathToSel(cctx.String("data-selector"), false,
ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))),
)
}
sel, _ := selectorparse.ParseJSONSelector(string(jsel))
if err := utils.TraverseDag(
ctx,
dserv,
roots[0],
sel,
func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error {
if r == traversal.VisitReason_SelectionMatch {
fmt.Println(p.Path)
}
return nil
},
); err != nil {
return err
}
}
return err
},
}
type bytesReaderAt struct {
btr *bytes.Reader
}
func (b bytesReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
return b.btr.ReadAt(p, off)
}
var _ io.ReaderAt = &bytesReaderAt{}

View File

@ -51,6 +51,7 @@ var multisigCmd = &cli.Command{
msigProposeCmd,
msigRemoveProposeCmd,
msigApproveCmd,
msigCancelCmd,
msigAddProposeCmd,
msigAddApproveCmd,
msigAddCancelCmd,
@ -159,6 +160,8 @@ var msigCreateCmd = &cli.Command{
msgCid := sm.Cid()
fmt.Println("sent create in message: ", msgCid)
// wait for it to get mined into a block
wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
@ -448,7 +451,7 @@ var msigProposeCmd = &cli.Command{
msgCid := sm.Cid()
fmt.Println("send proposal in message: ", msgCid)
fmt.Println("sent proposal in message: ", msgCid)
wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
@ -612,6 +615,131 @@ var msigApproveCmd = &cli.Command{
},
}
var msigCancelCmd = &cli.Command{
Name: "cancel",
Usage: "Cancel a multisig message",
ArgsUsage: "<multisigAddress messageId> [destination value [methodId methodParams]]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "from",
Usage: "account to send the cancel message from",
},
},
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() < 2 {
return ShowHelp(cctx, fmt.Errorf("must pass at least multisig address and message ID"))
}
if cctx.Args().Len() > 2 && cctx.Args().Len() < 4 {
return ShowHelp(cctx, fmt.Errorf("usage: msig cancel <msig addr> <message ID> <desination> <value>"))
}
if cctx.Args().Len() > 4 && cctx.Args().Len() != 6 {
return ShowHelp(cctx, fmt.Errorf("usage: msig cancel <msig addr> <message ID> <desination> <value> [ <method> <params> ]"))
}
srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
defer srv.Close() //nolint:errcheck
api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
if err != nil {
return err
}
txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
if err != nil {
return err
}
var from address.Address
if cctx.IsSet("from") {
f, err := address.NewFromString(cctx.String("from"))
if err != nil {
return err
}
from = f
} else {
defaddr, err := api.WalletDefaultAddress(ctx)
if err != nil {
return err
}
from = defaddr
}
var msgCid cid.Cid
if cctx.Args().Len() == 2 {
proto, err := api.MsigCancel(ctx, msig, txid, from)
if err != nil {
return err
}
sm, err := InteractiveSend(ctx, cctx, srv, proto)
if err != nil {
return err
}
msgCid = sm.Cid()
} else {
dest, err := address.NewFromString(cctx.Args().Get(2))
if err != nil {
return err
}
value, err := types.ParseFIL(cctx.Args().Get(3))
if err != nil {
return err
}
var method uint64
var params []byte
if cctx.Args().Len() == 6 {
m, err := strconv.ParseUint(cctx.Args().Get(4), 10, 64)
if err != nil {
return err
}
method = m
p, err := hex.DecodeString(cctx.Args().Get(5))
if err != nil {
return err
}
params = p
}
proto, err := api.MsigCancelTxnHash(ctx, msig, txid, dest, types.BigInt(value), from, method, params)
if err != nil {
return err
}
sm, err := InteractiveSend(ctx, cctx, srv, proto)
if err != nil {
return err
}
msgCid = sm.Cid()
}
fmt.Println("sent cancel in message: ", msgCid)
wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
if wait.Receipt.ExitCode != 0 {
return fmt.Errorf("cancel returned exit %d", wait.Receipt.ExitCode)
}
return nil
},
}
var msigRemoveProposeCmd = &cli.Command{
Name: "propose-remove",
Usage: "Propose to remove a signer",
@ -1490,7 +1618,7 @@ var msigLockCancelCmd = &cli.Command{
return actErr
}
proto, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params)
proto, err := api.MsigCancelTxnHash(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params)
if err != nil {
return err
}

View File

@ -1,6 +1,7 @@
package cli
import (
"context"
"fmt"
"time"
@ -10,8 +11,22 @@ import (
var WaitApiCmd = &cli.Command{
Name: "wait-api",
Usage: "Wait for lotus api to come online",
Flags: []cli.Flag{
&cli.DurationFlag{
Name: "timeout",
Usage: "duration to wait till fail",
Value: time.Second * 30,
},
},
Action: func(cctx *cli.Context) error {
for i := 0; i < 30; i++ {
ctx := ReqContext(cctx)
ctx, cancel := context.WithTimeout(ctx, cctx.Duration("timeout"))
defer cancel()
for {
if ctx.Err() != nil {
break
}
api, closer, err := GetAPI(cctx)
if err != nil {
fmt.Printf("Not online yet... (%s)\n", err)
@ -20,8 +35,6 @@ var WaitApiCmd = &cli.Command{
}
defer closer()
ctx := ReqContext(cctx)
_, err = api.Version(ctx)
if err != nil {
return err
@ -29,6 +42,11 @@ var WaitApiCmd = &cli.Command{
return nil
}
return fmt.Errorf("timed out waiting for api to come online")
if ctx.Err() == context.DeadlineExceeded {
return fmt.Errorf("timed out waiting for api to come online")
}
return ctx.Err()
},
}

View File

@ -13,6 +13,8 @@ import (
"path/filepath"
"strconv"
power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power"
"github.com/docker/go-units"
"github.com/google/uuid"
"github.com/ipfs/go-datastore"
@ -644,11 +646,26 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID,
return address.Address{}, err
}
sender := owner
if fromstr := cctx.String("from"); fromstr != "" {
faddr, err := address.NewFromString(fromstr)
if err != nil {
return address.Undef, fmt.Errorf("could not parse from address: %w", err)
}
sender = faddr
}
// make sure the sender account exists on chain
_, err = api.StateLookupID(ctx, owner, types.EmptyTSK)
if err != nil {
return address.Undef, xerrors.Errorf("sender must exist on chain: %w", err)
}
// make sure the worker account exists on chain
_, err = api.StateLookupID(ctx, worker, types.EmptyTSK)
if err != nil {
signed, err := api.MpoolPushMessage(ctx, &types.Message{
From: owner,
From: sender,
To: worker,
Value: types.NewInt(0),
}, nil)
@ -668,35 +685,46 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID,
}
}
nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
// make sure the owner account exists on chain
_, err = api.StateLookupID(ctx, owner, types.EmptyTSK)
if err != nil {
return address.Undef, xerrors.Errorf("getting network version: %w", err)
signed, err := api.MpoolPushMessage(ctx, &types.Message{
From: sender,
To: owner,
Value: types.NewInt(0),
}, nil)
if err != nil {
return address.Undef, xerrors.Errorf("push owner init: %w", err)
}
log.Infof("Initializing owner account %s, message: %s", worker, signed.Cid())
log.Infof("Waiting for confirmation")
mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true)
if err != nil {
return address.Undef, xerrors.Errorf("waiting for owner init: %w", err)
}
if mw.Receipt.ExitCode != 0 {
return address.Undef, xerrors.Errorf("initializing owner account failed: exit code %d", mw.Receipt.ExitCode)
}
}
spt, err := miner.SealProofTypeFromSectorSize(abi.SectorSize(ssize), nv)
// Note: the correct thing to do would be to call SealProofTypeFromSectorSize if actors version is v3 or later, but this still works
spt, err := miner.WindowPoStProofTypeFromSectorSize(abi.SectorSize(ssize))
if err != nil {
return address.Undef, xerrors.Errorf("getting seal proof type: %w", err)
return address.Undef, xerrors.Errorf("getting post proof type: %w", err)
}
params, err := actors.SerializeParams(&power2.CreateMinerParams{
Owner: owner,
Worker: worker,
SealProofType: spt,
Peer: abi.PeerID(peerid),
params, err := actors.SerializeParams(&power6.CreateMinerParams{
Owner: owner,
Worker: worker,
WindowPoStProofType: spt,
Peer: abi.PeerID(peerid),
})
if err != nil {
return address.Undef, err
}
sender := owner
if fromstr := cctx.String("from"); fromstr != "" {
faddr, err := address.NewFromString(fromstr)
if err != nil {
return address.Undef, fmt.Errorf("could not parse from address: %w", err)
}
sender = faddr
}
createStorageMinerMsg := &types.Message{
To: power.Address,
From: sender,

View File

@ -6,6 +6,7 @@ import (
"text/tabwriter"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/lib/tablewriter"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
)
@ -48,6 +49,12 @@ var piecesListPiecesCmd = &cli.Command{
var piecesListCidInfosCmd = &cli.Command{
Name: "list-cids",
Usage: "list registered payload CIDs",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "verbose",
Aliases: []string{"v"},
},
},
Action: func(cctx *cli.Context) error {
nodeApi, closer, err := lcli.GetMarketsAPI(cctx)
if err != nil {
@ -61,9 +68,54 @@ var piecesListCidInfosCmd = &cli.Command{
return err
}
w := tablewriter.New(tablewriter.Col("CID"),
tablewriter.Col("Piece"),
tablewriter.Col("BlockOffset"),
tablewriter.Col("BlockLen"),
tablewriter.Col("Deal"),
tablewriter.Col("Sector"),
tablewriter.Col("DealOffset"),
tablewriter.Col("DealLen"),
)
for _, c := range cids {
fmt.Println(c)
if !cctx.Bool("verbose") {
fmt.Println(c)
continue
}
ci, err := nodeApi.PiecesGetCIDInfo(ctx, c)
if err != nil {
fmt.Printf("Error getting CID info: %s\n", err)
continue
}
for _, location := range ci.PieceBlockLocations {
pi, err := nodeApi.PiecesGetPieceInfo(ctx, location.PieceCID)
if err != nil {
fmt.Printf("Error getting piece info: %s\n", err)
continue
}
for _, deal := range pi.Deals {
w.Write(map[string]interface{}{
"CID": c,
"Piece": location.PieceCID,
"BlockOffset": location.RelOffset,
"BlockLen": location.BlockSize,
"Deal": deal.DealID,
"Sector": deal.SectorID,
"DealOffset": deal.Offset,
"DealLen": deal.Length,
})
}
}
}
if cctx.Bool("verbose") {
return w.Flush(os.Stdout)
}
return nil
},
}

View File

@ -4,6 +4,7 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"math"
"os"
"sort"
"strings"
@ -32,6 +33,17 @@ var sealingCmd = &cli.Command{
},
}
var barCols = float64(64)
func barString(total, y, g float64) string {
yBars := int(math.Round(y / total * barCols))
gBars := int(math.Round(g / total * barCols))
eBars := int(barCols) - yBars - gBars
return color.YellowString(strings.Repeat("|", yBars)) +
color.GreenString(strings.Repeat("|", gBars)) +
strings.Repeat(" ", eBars)
}
var sealingWorkersCmd = &cli.Command{
Name: "workers",
Usage: "list workers",
@ -77,7 +89,7 @@ var sealingWorkersCmd = &cli.Command{
for _, stat := range st {
gpuUse := "not "
gpuCol := color.FgBlue
if stat.GpuUsed {
if stat.GpuUsed > 0 {
gpuCol = color.FgGreen
gpuUse = ""
}
@ -89,56 +101,43 @@ var sealingWorkersCmd = &cli.Command{
fmt.Printf("Worker %s, host %s%s\n", stat.id, color.MagentaString(stat.Info.Hostname), disabled)
var barCols = uint64(64)
cpuBars := int(stat.CpuUse * barCols / stat.Info.Resources.CPUs)
cpuBar := strings.Repeat("|", cpuBars)
if int(barCols)-cpuBars >= 0 {
cpuBar += strings.Repeat(" ", int(barCols)-cpuBars)
}
fmt.Printf("\tCPU: [%s] %d/%d core(s) in use\n",
color.GreenString(cpuBar), stat.CpuUse, stat.Info.Resources.CPUs)
barString(float64(stat.Info.Resources.CPUs), 0, float64(stat.CpuUse)), stat.CpuUse, stat.Info.Resources.CPUs)
ramBarsRes := int(stat.Info.Resources.MemReserved * barCols / stat.Info.Resources.MemPhysical)
ramBarsUsed := int(stat.MemUsedMin * barCols / stat.Info.Resources.MemPhysical)
ramRepeatSpace := int(barCols) - (ramBarsUsed + ramBarsRes)
colorFunc := color.YellowString
if ramRepeatSpace < 0 {
ramRepeatSpace = 0
colorFunc = color.RedString
ramTotal := stat.Info.Resources.MemPhysical
ramTasks := stat.MemUsedMin
ramUsed := stat.Info.Resources.MemUsed
var ramReserved uint64 = 0
if ramUsed > ramTasks {
ramReserved = ramUsed - ramTasks
}
ramBar := colorFunc(strings.Repeat("|", ramBarsRes)) +
color.GreenString(strings.Repeat("|", ramBarsUsed)) +
strings.Repeat(" ", ramRepeatSpace)
vmem := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap
vmemBarsRes := int(stat.Info.Resources.MemReserved * barCols / vmem)
vmemBarsUsed := int(stat.MemUsedMax * barCols / vmem)
vmemRepeatSpace := int(barCols) - (vmemBarsUsed + vmemBarsRes)
colorFunc = color.YellowString
if vmemRepeatSpace < 0 {
vmemRepeatSpace = 0
colorFunc = color.RedString
}
vmemBar := colorFunc(strings.Repeat("|", vmemBarsRes)) +
color.GreenString(strings.Repeat("|", vmemBarsUsed)) +
strings.Repeat(" ", vmemRepeatSpace)
ramBar := barString(float64(ramTotal), float64(ramReserved), float64(ramTasks))
fmt.Printf("\tRAM: [%s] %d%% %s/%s\n", ramBar,
(stat.Info.Resources.MemReserved+stat.MemUsedMin)*100/stat.Info.Resources.MemPhysical,
types.SizeStr(types.NewInt(stat.Info.Resources.MemReserved+stat.MemUsedMin)),
(ramTasks+ramReserved)*100/stat.Info.Resources.MemPhysical,
types.SizeStr(types.NewInt(ramTasks+ramUsed)),
types.SizeStr(types.NewInt(stat.Info.Resources.MemPhysical)))
fmt.Printf("\tVMEM: [%s] %d%% %s/%s\n", vmemBar,
(stat.Info.Resources.MemReserved+stat.MemUsedMax)*100/vmem,
types.SizeStr(types.NewInt(stat.Info.Resources.MemReserved+stat.MemUsedMax)),
types.SizeStr(types.NewInt(vmem)))
vmemTotal := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap
vmemTasks := stat.MemUsedMax
vmemUsed := stat.Info.Resources.MemUsed + stat.Info.Resources.MemSwapUsed
var vmemReserved uint64 = 0
if vmemUsed > vmemTasks {
vmemReserved = vmemUsed - vmemTasks
}
vmemBar := barString(float64(vmemTotal), float64(vmemReserved), float64(vmemTasks))
fmt.Printf("\tVMEM: [%s] %d%% %s/%s\n", vmemBar,
(vmemTasks+vmemReserved)*100/vmemTotal,
types.SizeStr(types.NewInt(vmemTasks+vmemReserved)),
types.SizeStr(types.NewInt(vmemTotal)))
if len(stat.Info.Resources.GPUs) > 0 {
gpuBar := barString(float64(len(stat.Info.Resources.GPUs)), 0, stat.GpuUsed)
fmt.Printf("\tGPU: [%s] %.f%% %.2f/%d gpu(s) in use\n", color.GreenString(gpuBar),
stat.GpuUsed*100/float64(len(stat.Info.Resources.GPUs)),
stat.GpuUsed, len(stat.Info.Resources.GPUs))
}
for _, gpu := range stat.Info.Resources.GPUs {
fmt.Printf("\tGPU: %s\n", color.New(gpuCol).Sprintf("%s, %sused", gpu, gpuUse))
}

View File

@ -95,6 +95,14 @@ over time
Name: "max-storage",
Usage: "(for init) limit storage space for sectors (expensive for very large paths!)",
},
&cli.StringSliceFlag{
Name: "groups",
Usage: "path group names",
},
&cli.StringSliceFlag{
Name: "allow-to",
Usage: "path groups allowed to pull data from this path (allow all if not specified)",
},
},
Action: func(cctx *cli.Context) error {
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
@ -142,6 +150,8 @@ over time
CanSeal: cctx.Bool("seal"),
CanStore: cctx.Bool("store"),
MaxStorage: uint64(maxStor),
Groups: cctx.StringSlice("groups"),
AllowTo: cctx.StringSlice("allow-to"),
}
if !(cfg.CanStore || cfg.CanSeal) {
@ -322,10 +332,17 @@ var storageListCmd = &cli.Command{
if si.CanStore {
fmt.Print(color.CyanString("Store"))
}
fmt.Println("")
} else {
fmt.Print(color.HiYellowString("Use: ReadOnly"))
}
fmt.Println()
if len(si.Groups) > 0 {
fmt.Printf("\tGroups: %s\n", strings.Join(si.Groups, ", "))
}
if len(si.AllowTo) > 0 {
fmt.Printf("\tAllowTo: %s\n", strings.Join(si.AllowTo, ", "))
}
if localPath, ok := local[s.ID]; ok {
fmt.Printf("\tLocal: %s\n", color.GreenString(localPath))

View File

@ -17,6 +17,7 @@ import (
"time"
"github.com/filecoin-project/lotus/chain/actors/builtin"
lcli "github.com/filecoin-project/lotus/cli"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
@ -41,7 +42,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/tools/stats"
"github.com/filecoin-project/lotus/tools/stats/sync"
)
var log = logging.Logger("main")
@ -160,15 +161,15 @@ var findMinersCmd = &cli.Command{
},
Action: func(cctx *cli.Context) error {
ctx := context.Background()
api, closer, err := stats.GetFullNodeAPI(cctx.Context, cctx.String("lotus-path"))
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
log.Fatal(err)
return err
}
defer closer()
if !cctx.Bool("no-sync") {
if err := stats.WaitForSyncComplete(ctx, api); err != nil {
log.Fatal(err)
if err := sync.SyncWait(ctx, api); err != nil {
return err
}
}
@ -245,7 +246,7 @@ var recoverMinersCmd = &cli.Command{
},
Action: func(cctx *cli.Context) error {
ctx := context.Background()
api, closer, err := stats.GetFullNodeAPI(cctx.Context, cctx.String("lotus-path"))
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
log.Fatal(err)
}
@ -266,8 +267,8 @@ var recoverMinersCmd = &cli.Command{
}
if !cctx.Bool("no-sync") {
if err := stats.WaitForSyncComplete(ctx, api); err != nil {
log.Fatal(err)
if err := sync.SyncWait(ctx, api); err != nil {
return err
}
}
@ -427,7 +428,7 @@ var runCmd = &cli.Command{
}()
ctx := context.Background()
api, closer, err := stats.GetFullNodeAPI(cctx.Context, cctx.String("lotus-path"))
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
log.Fatal(err)
}
@ -448,12 +449,12 @@ var runCmd = &cli.Command{
}
if !cctx.Bool("no-sync") {
if err := stats.WaitForSyncComplete(ctx, api); err != nil {
log.Fatal(err)
if err := sync.SyncWait(ctx, api); err != nil {
return err
}
}
tipsetsCh, err := stats.GetTips(ctx, api, r.Height(), cctx.Int("head-delay"))
tipsetsCh, err := sync.BufferedTipsetChannel(ctx, api, r.Height(), cctx.Int("head-delay"))
if err != nil {
log.Fatal(err)
}

View File

@ -58,8 +58,11 @@ var infoCmd = &cli.Command{
fmt.Printf("Hostname: %s\n", info.Hostname)
fmt.Printf("CPUs: %d; GPUs: %v\n", info.Resources.CPUs, info.Resources.GPUs)
fmt.Printf("RAM: %s; Swap: %s\n", types.SizeStr(types.NewInt(info.Resources.MemPhysical)), types.SizeStr(types.NewInt(info.Resources.MemSwap)))
fmt.Printf("Reserved memory: %s\n", types.SizeStr(types.NewInt(info.Resources.MemReserved)))
fmt.Printf("RAM: %s/%s; Swap: %s/%s\n",
types.SizeStr(types.NewInt(info.Resources.MemUsed)),
types.SizeStr(types.NewInt(info.Resources.MemPhysical)),
types.SizeStr(types.NewInt(info.Resources.MemSwapUsed)),
types.SizeStr(types.NewInt(info.Resources.MemSwap)))
fmt.Printf("Task types: ")
for _, t := range ttList(tt) {

View File

@ -60,6 +60,7 @@ func main() {
storageCmd,
setCmd,
waitQuietCmd,
resourcesCmd,
tasksCmd,
}

View File

@ -0,0 +1,72 @@
package main
import (
"fmt"
"os"
"sort"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
)
var resourcesCmd = &cli.Command{
Name: "resources",
Usage: "Manage resource table overrides",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "all",
Usage: "print all resource envvars",
},
&cli.BoolFlag{
Name: "default",
Usage: "print default resource envvars",
},
},
Action: func(cctx *cli.Context) error {
def := map[string]string{}
set := map[string]string{}
all := map[string]string{}
_, err := storiface.ParseResourceEnv(func(key, d string) (string, bool) {
if d != "" {
all[key] = d
def[key] = d
}
s, ok := os.LookupEnv(key)
if ok {
all[key] = s
set[key] = s
}
return s, ok
})
if err != nil {
return err
}
printMap := func(m map[string]string) {
var arr []string
for k, v := range m {
arr = append(arr, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(arr)
for _, s := range arr {
fmt.Println(s)
}
}
if cctx.Bool("default") {
printMap(def)
} else {
if cctx.Bool("all") {
printMap(all)
} else {
printMap(set)
}
}
return nil
},
}

View File

@ -51,6 +51,14 @@ var storageAttachCmd = &cli.Command{
Name: "max-storage",
Usage: "(for init) limit storage space for sectors (expensive for very large paths!)",
},
&cli.StringSliceFlag{
Name: "groups",
Usage: "path group names",
},
&cli.StringSliceFlag{
Name: "allow-to",
Usage: "path groups allowed to pull data from this path (allow all if not specified)",
},
},
Action: func(cctx *cli.Context) error {
nodeApi, closer, err := lcli.GetWorkerAPI(cctx)
@ -98,6 +106,8 @@ var storageAttachCmd = &cli.Command{
CanSeal: cctx.Bool("seal"),
CanStore: cctx.Bool("store"),
MaxStorage: uint64(maxStor),
Groups: cctx.StringSlice("groups"),
AllowTo: cctx.StringSlice("allow-to"),
}
if !(cfg.CanStore || cfg.CanSeal) {

222
cmd/lotus-shed/balancer.go Normal file
View File

@ -0,0 +1,222 @@
package main
import (
"fmt"
"strings"
"time"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
)
var balancerCmd = &cli.Command{
Name: "balancer",
Usage: "Utility for balancing tokens between multiple wallets",
Description: `Tokens are balanced based on the specification provided in arguments
Each argument specifies an address, role, and role parameters separated by ';'
Supported roles:
- request;[addr];[low];[high] - request tokens when balance drops to [low], topping up to [high]
- provide;[addr];[min] - provide tokens to other addresses as long as the balance is above [min]
`,
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetFullNodeAPIV1(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
type request struct {
addr address.Address
low, high abi.TokenAmount
}
type provide struct {
addr address.Address
min abi.TokenAmount
}
var requests []request
var provides []provide
for i, s := range cctx.Args().Slice() {
ss := strings.Split(s, ";")
switch ss[0] {
case "request":
if len(ss) != 4 {
return xerrors.Errorf("request role needs 4 parameters (arg %d)", i)
}
addr, err := address.NewFromString(ss[1])
if err != nil {
return xerrors.Errorf("parsing address in arg %d: %w", i, err)
}
low, err := types.ParseFIL(ss[2])
if err != nil {
return xerrors.Errorf("parsing low in arg %d: %w", i, err)
}
high, err := types.ParseFIL(ss[3])
if err != nil {
return xerrors.Errorf("parsing high in arg %d: %w", i, err)
}
if abi.TokenAmount(low).GreaterThanEqual(abi.TokenAmount(high)) {
return xerrors.Errorf("low must be less than high in arg %d", i)
}
requests = append(requests, request{
addr: addr,
low: abi.TokenAmount(low),
high: abi.TokenAmount(high),
})
case "provide":
if len(ss) != 3 {
return xerrors.Errorf("provide role needs 3 parameters (arg %d)", i)
}
addr, err := address.NewFromString(ss[1])
if err != nil {
return xerrors.Errorf("parsing address in arg %d: %w", i, err)
}
min, err := types.ParseFIL(ss[2])
if err != nil {
return xerrors.Errorf("parsing min in arg %d: %w", i, err)
}
provides = append(provides, provide{
addr: addr,
min: abi.TokenAmount(min),
})
default:
return xerrors.Errorf("unknown role '%s' in arg %d", ss[0], i)
}
}
if len(provides) == 0 {
return xerrors.Errorf("no provides specified")
}
if len(requests) == 0 {
return xerrors.Errorf("no requests specified")
}
const confidence = 16
var notifs <-chan []*lapi.HeadChange
for {
if notifs == nil {
notifs, err = api.ChainNotify(ctx)
if err != nil {
return xerrors.Errorf("chain notify error: %w", err)
}
}
var ts *types.TipSet
loop:
for {
time.Sleep(150 * time.Millisecond)
select {
case n := <-notifs:
for _, change := range n {
if change.Type != store.HCApply {
continue
}
ts = change.Val
}
case <-ctx.Done():
return nil
default:
break loop
}
}
type send struct {
to address.Address
amt abi.TokenAmount
filled bool
}
var toSend []*send
for _, req := range requests {
bal, err := api.StateGetActor(ctx, req.addr, ts.Key())
if err != nil {
return err
}
if bal.Balance.LessThan(req.low) {
toSend = append(toSend, &send{
to: req.addr,
amt: big.Sub(req.high, bal.Balance),
})
}
}
for _, s := range toSend {
fmt.Printf("REQUEST %s for %s\n", types.FIL(s.amt), s.to)
}
var msgs []cid.Cid
for _, prov := range provides {
bal, err := api.StateGetActor(ctx, prov.addr, ts.Key())
if err != nil {
return err
}
avail := big.Sub(bal.Balance, prov.min)
for _, s := range toSend {
if s.filled {
continue
}
if avail.LessThan(s.amt) {
continue
}
m, err := api.MpoolPushMessage(ctx, &types.Message{
From: prov.addr,
To: s.to,
Value: s.amt,
}, nil)
if err != nil {
fmt.Printf("SEND ERROR %s\n", err.Error())
}
fmt.Printf("SEND %s; %s from %s TO %s\n", m.Cid(), types.FIL(s.amt), s.to, prov.addr)
msgs = append(msgs, m.Cid())
s.filled = true
avail = big.Sub(avail, s.amt)
}
}
if len(msgs) > 0 {
fmt.Printf("WAITING FOR %d MESSAGES\n", len(msgs))
}
for _, msg := range msgs {
ml, err := api.StateWaitMsg(ctx, msg, confidence, lapi.LookbackNoLimit, true)
if err != nil {
return err
}
if ml.Receipt.ExitCode != exitcode.Ok {
fmt.Printf("MSG %s NON-ZERO EXITCODE: %s\n", msg, ml.Receipt.ExitCode)
}
}
}
},
}

View File

@ -64,6 +64,7 @@ func main() {
splitstoreCmd,
fr32Cmd,
chainCmd,
balancerCmd,
}
app := &cli.App{

View File

@ -2,11 +2,29 @@ package main
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power"
"github.com/docker/go-units"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
@ -17,6 +35,231 @@ var minerCmd = &cli.Command{
Usage: "miner-related utilities",
Subcommands: []*cli.Command{
minerUnpackInfoCmd,
minerCreateCmd,
minerFaultsCmd,
},
}
var minerFaultsCmd = &cli.Command{
Name: "faults",
Usage: "Display a list of faulty sectors for a SP",
ArgsUsage: "[minerAddress]",
Flags: []cli.Flag{
&cli.Uint64Flag{
Name: "expiring-in",
Usage: "only list sectors that are expiring in the next <n> epochs",
Value: 0,
},
},
Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() {
return fmt.Errorf("must pass miner address")
}
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
m, err := address.NewFromString(cctx.Args().First())
if err != nil {
return err
}
faultBf, err := api.StateMinerFaults(ctx, m, types.EmptyTSK)
if err != nil {
return err
}
faults, err := faultBf.All(miner2.SectorsMax)
if err != nil {
return err
}
if len(faults) == 0 {
fmt.Println("no faults")
return nil
}
expEpoch := abi.ChainEpoch(cctx.Uint64("expiring-in"))
if expEpoch == 0 {
fmt.Print("faulty sectors: ")
for _, v := range faults {
fmt.Printf("%d ", v)
}
return nil
}
h, err := api.ChainHead(ctx)
if err != nil {
return err
}
fmt.Printf("faulty sectors expiring in the next %d epochs: ", expEpoch)
for _, v := range faults {
ss, err := api.StateSectorExpiration(ctx, m, abi.SectorNumber(v), types.EmptyTSK)
if err != nil {
return err
}
if ss.Early < h.Height()+expEpoch {
fmt.Printf("%d ", v)
}
}
return nil
},
}
var minerCreateCmd = &cli.Command{
Name: "create",
Usage: "sends a create miner msg",
ArgsUsage: "[sender] [owner] [worker] [sector size]",
Action: func(cctx *cli.Context) error {
wapi, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if cctx.Args().Len() != 4 {
return xerrors.Errorf("expected 4 args (sender owner worker sectorSize)")
}
sender, err := address.NewFromString(cctx.Args().First())
if err != nil {
return err
}
owner, err := address.NewFromString(cctx.Args().Get(1))
if err != nil {
return err
}
worker, err := address.NewFromString(cctx.Args().Get(2))
if err != nil {
return err
}
ssize, err := units.RAMInBytes(cctx.Args().Get(3))
if err != nil {
return fmt.Errorf("failed to parse sector size: %w", err)
}
// make sure the sender account exists on chain
_, err = wapi.StateLookupID(ctx, owner, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("sender must exist on chain: %w", err)
}
// make sure the worker account exists on chain
_, err = wapi.StateLookupID(ctx, worker, types.EmptyTSK)
if err != nil {
signed, err := wapi.MpoolPushMessage(ctx, &types.Message{
From: sender,
To: worker,
Value: types.NewInt(0),
}, nil)
if err != nil {
return xerrors.Errorf("push worker init: %w", err)
}
log.Infof("Initializing worker account %s, message: %s", worker, signed.Cid())
log.Infof("Waiting for confirmation")
mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence)
if err != nil {
return xerrors.Errorf("waiting for worker init: %w", err)
}
if mw.Receipt.ExitCode != 0 {
return xerrors.Errorf("initializing worker account failed: exit code %d", mw.Receipt.ExitCode)
}
}
// make sure the owner account exists on chain
_, err = wapi.StateLookupID(ctx, owner, types.EmptyTSK)
if err != nil {
signed, err := wapi.MpoolPushMessage(ctx, &types.Message{
From: sender,
To: owner,
Value: types.NewInt(0),
}, nil)
if err != nil {
return xerrors.Errorf("push owner init: %w", err)
}
log.Infof("Initializing owner account %s, message: %s", worker, signed.Cid())
log.Infof("Wating for confirmation")
mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence)
if err != nil {
return xerrors.Errorf("waiting for owner init: %w", err)
}
if mw.Receipt.ExitCode != 0 {
return xerrors.Errorf("initializing owner account failed: exit code %d", mw.Receipt.ExitCode)
}
}
// Note: the correct thing to do would be to call SealProofTypeFromSectorSize if actors version is v3 or later, but this still works
spt, err := miner.WindowPoStProofTypeFromSectorSize(abi.SectorSize(ssize))
if err != nil {
return xerrors.Errorf("getting post proof type: %w", err)
}
params, err := actors.SerializeParams(&power6.CreateMinerParams{
Owner: owner,
Worker: worker,
WindowPoStProofType: spt,
})
if err != nil {
return err
}
createStorageMinerMsg := &types.Message{
To: power.Address,
From: sender,
Value: big.Zero(),
Method: power.Methods.CreateMiner,
Params: params,
}
signed, err := wapi.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
if err != nil {
return xerrors.Errorf("pushing createMiner message: %w", err)
}
log.Infof("Pushed CreateMiner message: %s", signed.Cid())
log.Infof("Waiting for confirmation")
mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence)
if err != nil {
return xerrors.Errorf("waiting for createMiner message: %w", err)
}
if mw.Receipt.ExitCode != 0 {
return xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode)
}
var retval power6.CreateMinerReturn
if err := retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil {
return err
}
log.Infof("New miners address is: %s (%s)", retval.IDAddress, retval.RobustAddress)
return nil
},
}

View File

@ -148,6 +148,15 @@ func printMessage(cctx *cli.Context, msg *types.Message) error {
fmt.Println("Params:", p)
if msg, err := messageFromBytes(cctx, msg.Params); err == nil {
fmt.Println("---")
color.Red("Params message:")
if err := printMessage(cctx, msg.VMMessage()); err != nil {
return err
}
}
return nil
}

View File

@ -2,11 +2,14 @@ package main
import (
"bytes"
"context"
"encoding/base64"
"encoding/binary"
"fmt"
"image"
"image/color"
"image/png"
"io"
"os"
"sort"
"strconv"
@ -23,6 +26,7 @@ import (
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
@ -38,6 +42,7 @@ var sectorsCmd = &cli.Command{
terminateSectorCmd,
terminateSectorPenaltyEstimationCmd,
visAllocatedSectorsCmd,
dumpRLESectorCmd,
},
}
@ -275,6 +280,113 @@ var terminateSectorPenaltyEstimationCmd = &cli.Command{
},
}
func activeMiners(ctx context.Context, api v0api.FullNode) ([]address.Address, error) {
miners, err := api.StateListMiners(ctx, types.EmptyTSK)
if err != nil {
return nil, err
}
powCache := make(map[address.Address]types.BigInt)
var lk sync.Mutex
parmap.Par(32, miners, func(a address.Address) {
pow, err := api.StateMinerPower(ctx, a, types.EmptyTSK)
lk.Lock()
if err == nil {
powCache[a] = pow.MinerPower.QualityAdjPower
} else {
powCache[a] = types.NewInt(0)
}
lk.Unlock()
})
sort.Slice(miners, func(i, j int) bool {
return powCache[miners[i]].GreaterThan(powCache[miners[j]])
})
n := sort.Search(len(miners), func(i int) bool {
pow := powCache[miners[i]]
return pow.IsZero()
})
return append(miners[0:0:0], miners[:n]...), nil
}
var dumpRLESectorCmd = &cli.Command{
Name: "dump-rles",
Usage: "Dump AllocatedSectors RLEs from miners passed as arguments as run lengths in uint64 LE format.\nIf no arguments are passed, dumps all active miners in the state tree.",
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
var miners []address.Address
if cctx.NArg() == 0 {
miners, err = activeMiners(ctx, api)
if err != nil {
return xerrors.Errorf("getting active miners: %w", err)
}
} else {
for _, mS := range cctx.Args().Slice() {
mA, err := address.NewFromString(mS)
if err != nil {
return xerrors.Errorf("parsing address '%s': %w", mS, err)
}
miners = append(miners, mA)
}
}
wbuf := make([]byte, 8)
buf := &bytes.Buffer{}
for i := 0; i < len(miners); i++ {
buf.Reset()
err := func() error {
state, err := api.StateReadState(ctx, miners[i], types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting state: %+v", err)
}
allocSString := state.State.(map[string]interface{})["AllocatedSectors"].(map[string]interface{})["/"].(string)
allocCid, err := cid.Decode(allocSString)
if err != nil {
return xerrors.Errorf("decoding cid: %+v", err)
}
rle, err := api.ChainReadObj(ctx, allocCid)
if err != nil {
return xerrors.Errorf("reading AllocatedSectors: %+v", err)
}
var bf bitfield.BitField
err = bf.UnmarshalCBOR(bytes.NewReader(rle))
if err != nil {
return xerrors.Errorf("decoding bitfield: %w", err)
}
ri, err := bf.RunIterator()
if err != nil {
return xerrors.Errorf("creating iterator: %w", err)
}
for ri.HasNext() {
run, err := ri.NextRun()
if err != nil {
return xerrors.Errorf("getting run: %w", err)
}
binary.LittleEndian.PutUint64(wbuf, run.Len)
buf.Write(wbuf)
}
_, err = io.Copy(os.Stdout, buf)
if err != nil {
return xerrors.Errorf("copy: %w", err)
}
return nil
}()
if err != nil {
log.Errorf("miner %d: %s: %+v", i, miners[i], err)
}
}
return nil
},
}
var visAllocatedSectorsCmd = &cli.Command{
Name: "vis-allocated",
Usage: "Produces a html with visualisation of allocated sectors",
@ -287,32 +399,10 @@ var visAllocatedSectorsCmd = &cli.Command{
ctx := lcli.ReqContext(cctx)
var miners []address.Address
if cctx.NArg() == 0 {
miners, err = api.StateListMiners(ctx, types.EmptyTSK)
miners, err = activeMiners(ctx, api)
if err != nil {
return err
return xerrors.Errorf("getting active miners: %w", err)
}
powCache := make(map[address.Address]types.BigInt)
var lk sync.Mutex
parmap.Par(32, miners, func(a address.Address) {
pow, err := api.StateMinerPower(ctx, a, types.EmptyTSK)
lk.Lock()
if err == nil {
powCache[a] = pow.MinerPower.QualityAdjPower
} else {
powCache[a] = types.NewInt(0)
}
lk.Unlock()
})
sort.Slice(miners, func(i, j int) bool {
return powCache[miners[i]].GreaterThan(powCache[miners[j]])
})
n := sort.Search(len(miners), func(i int) bool {
pow := powCache[miners[i]]
log.Infof("pow @%d = %s", i, pow)
return pow.IsZero()
})
miners = miners[:n]
} else {
for _, mS := range cctx.Args().Slice() {
mA, err := address.NewFromString(mS)

View File

@ -2,7 +2,7 @@ version: '3'
services:
influxdb:
image: influxdb:latest
image: influxdb:1.8
container_name: influxdb
ports:
- "18086:8086"

View File

@ -2,18 +2,36 @@ package main
import (
"context"
"net/http"
_ "net/http/pprof"
"os"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/tools/stats"
"github.com/filecoin-project/lotus/tools/stats/influx"
"github.com/filecoin-project/lotus/tools/stats/ipldstore"
"github.com/filecoin-project/lotus/tools/stats/metrics"
"github.com/filecoin-project/lotus/tools/stats/points"
"github.com/filecoin-project/lotus/tools/stats/sync"
logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2"
"contrib.go.opencensus.io/exporter/prometheus"
stats "go.opencensus.io/stats"
"go.opencensus.io/stats/view"
)
var log = logging.Logger("stats")
func init() {
if err := view.Register(metrics.DefaultViews...); err != nil {
log.Fatal(err)
}
}
func main() {
local := []*cli.Command{
runCmd,
@ -37,7 +55,7 @@ func main() {
},
},
Before: func(cctx *cli.Context) error {
return logging.SetLogLevel("stats", cctx.String("log-level"))
return logging.SetLogLevelRegex("stats/*", cctx.String("log-level"))
},
Commands: local,
}
@ -104,6 +122,12 @@ var runCmd = &cli.Command{
Usage: "do not wait for chain sync to complete",
Value: false,
},
&cli.IntFlag{
Name: "ipld-store-cache-size",
Usage: "size of lru cache for ChainReadObj",
EnvVars: []string{"LOTUS_STATS_IPLD_STORE_CACHE_SIZE"},
Value: 2 << 15,
},
},
Action: func(cctx *cli.Context) error {
ctx := context.Background()
@ -118,30 +142,35 @@ var runCmd = &cli.Command{
influxPasswordFlag := cctx.String("influx-password")
influxDatabaseFlag := cctx.String("influx-database")
ipldStoreCacheSizeFlag := cctx.Int("ipld-store-cache-size")
log.Infow("opening influx client", "hostname", influxHostnameFlag, "username", influxUsernameFlag, "database", influxDatabaseFlag)
influx, err := stats.InfluxClient(influxHostnameFlag, influxUsernameFlag, influxPasswordFlag)
influxClient, err := influx.NewClient(influxHostnameFlag, influxUsernameFlag, influxPasswordFlag)
if err != nil {
log.Fatal(err)
return err
}
exporter, err := prometheus.NewExporter(prometheus.Options{
Namespace: "lotus_stats",
})
if err != nil {
return err
}
go func() {
http.Handle("/metrics", exporter)
if err := http.ListenAndServe(":6688", nil); err != nil {
log.Errorw("failed to start http server", "err", err)
}
}()
if resetFlag {
if err := stats.ResetDatabase(influx, influxDatabaseFlag); err != nil {
log.Fatal(err)
if err := influx.ResetDatabase(influxClient, influxDatabaseFlag); err != nil {
return err
}
}
height := int64(heightFlag)
if !resetFlag && height == 0 {
h, err := stats.GetLastRecordedHeight(influx, influxDatabaseFlag)
if err != nil {
log.Info(err)
}
height = h
}
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
@ -149,12 +178,89 @@ var runCmd = &cli.Command{
defer closer()
if !noSyncFlag {
if err := stats.WaitForSyncComplete(ctx, api); err != nil {
log.Fatal(err)
if err := sync.SyncWait(ctx, api); err != nil {
return err
}
}
stats.Collect(ctx, api, influx, influxDatabaseFlag, height, headLagFlag)
gtp, err := api.ChainGetGenesis(ctx)
if err != nil {
return err
}
genesisTime := time.Unix(int64(gtp.MinTimestamp()), 0)
// When height is set to `0` we will resume from the best height we can.
// The goal is to ensure we have data in the last 60 tipsets
height := int64(heightFlag)
if !resetFlag && height == 0 {
lastHeight, err := influx.GetLastRecordedHeight(influxClient, influxDatabaseFlag)
if err != nil {
return err
}
sinceGenesis := build.Clock.Now().Sub(genesisTime)
expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs)
startOfWindowHeight := expectedHeight - 60
if lastHeight > startOfWindowHeight {
height = lastHeight
} else {
height = startOfWindowHeight
}
ts, err := api.ChainHead(ctx)
if err != nil {
return err
}
headHeight := int64(ts.Height())
if headHeight < height {
height = headHeight
}
}
go func() {
t := time.NewTicker(time.Second)
for {
select {
case <-t.C:
sinceGenesis := build.Clock.Now().Sub(genesisTime)
expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs)
stats.Record(ctx, metrics.TipsetCollectionHeightExpected.M(expectedHeight))
}
}
}()
store, err := ipldstore.NewApiIpldStore(ctx, api, ipldStoreCacheSizeFlag)
if err != nil {
return err
}
collector, err := points.NewChainPointCollector(ctx, store, api)
if err != nil {
return err
}
tipsets, err := sync.BufferedTipsetChannel(ctx, api, abi.ChainEpoch(height), headLagFlag)
if err != nil {
return err
}
wq := influx.NewWriteQueue(ctx, influxClient)
defer wq.Close()
for tipset := range tipsets {
if nb, err := collector.Collect(ctx, tipset); err != nil {
log.Warnw("failed to collect points", "err", err)
} else {
nb.SetDatabase(influxDatabaseFlag)
wq.AddBatch(nb)
}
}
return nil
},

View File

@ -103,6 +103,7 @@ services:
# - FULLNODE_API_INFO=/dns/lotus/tcp/1234/http
# - LOTUS_JAEGER_AGENT_HOST=jaeger
# - LOTUS_JAEGER_AGENT_PORT=6831
# - DOCKER_LOTUS_MINER_INIT=true
# deploy:
# restart_policy:
# condition: on-failure

View File

@ -2148,7 +2148,9 @@ Inputs:
"Weight": 42,
"MaxStorage": 42,
"CanSeal": true,
"CanStore": true
"CanStore": true,
"Groups": null,
"AllowTo": null
},
{
"Capacity": 9,
@ -2258,7 +2260,9 @@ Response:
"Weight": 42,
"MaxStorage": 42,
"CanSeal": true,
"CanStore": true
"CanStore": true,
"Groups": null,
"AllowTo": null
}
```
@ -2449,18 +2453,595 @@ Response:
"IgnoreResources": false,
"Resources": {
"MemPhysical": 274877906944,
"MemUsed": 2147483648,
"MemSwap": 128849018880,
"MemReserved": 2147483648,
"MemSwapUsed": 2147483648,
"CPUs": 64,
"GPUs": [
"aGPU 1337"
]
],
"Resources": {
"seal/v0/addpiece": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"3": {
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"4": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"8": {
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"9": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
}
},
"seal/v0/commit/1": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"3": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"4": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"8": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"9": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
}
},
"seal/v0/commit/2": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1610612736,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10737418240
},
"3": {
"MinMemory": 32212254720,
"MaxMemory": 161061273600,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 34359738368
},
"4": {
"MinMemory": 64424509440,
"MaxMemory": 204010946560,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 68719476736
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1610612736,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10737418240
},
"8": {
"MinMemory": 32212254720,
"MaxMemory": 161061273600,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 34359738368
},
"9": {
"MinMemory": 64424509440,
"MaxMemory": 204010946560,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 68719476736
}
},
"seal/v0/fetch": {
"0": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"1": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"2": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"3": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"4": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"5": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"6": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"7": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"8": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"9": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
}
},
"seal/v0/precommit/1": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 805306368,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1048576
},
"3": {
"MinMemory": 60129542144,
"MaxMemory": 68719476736,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"4": {
"MinMemory": 120259084288,
"MaxMemory": 137438953472,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 805306368,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1048576
},
"8": {
"MinMemory": 60129542144,
"MaxMemory": 68719476736,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"9": {
"MinMemory": 120259084288,
"MaxMemory": 137438953472,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
}
},
"seal/v0/precommit/2": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1610612736,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"3": {
"MinMemory": 16106127360,
"MaxMemory": 16106127360,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 1073741824
},
"4": {
"MinMemory": 32212254720,
"MaxMemory": 32212254720,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 1073741824
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1610612736,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"8": {
"MinMemory": 16106127360,
"MaxMemory": 16106127360,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 1073741824
},
"9": {
"MinMemory": 32212254720,
"MaxMemory": 32212254720,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 1073741824
}
},
"seal/v0/unseal": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 805306368,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1048576
},
"3": {
"MinMemory": 60129542144,
"MaxMemory": 68719476736,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"4": {
"MinMemory": 120259084288,
"MaxMemory": 137438953472,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 805306368,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1048576
},
"8": {
"MinMemory": 60129542144,
"MaxMemory": 68719476736,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"9": {
"MinMemory": 120259084288,
"MaxMemory": 137438953472,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
}
}
}
}
},
"Enabled": true,
"MemUsedMin": 0,
"MemUsedMax": 0,
"GpuUsed": false,
"GpuUsed": 0,
"CpuUse": 0
}
}

View File

@ -92,10 +92,587 @@ Response:
"IgnoreResources": true,
"Resources": {
"MemPhysical": 42,
"MemUsed": 42,
"MemSwap": 42,
"MemReserved": 42,
"MemSwapUsed": 42,
"CPUs": 42,
"GPUs": null
"GPUs": null,
"Resources": {
"seal/v0/addpiece": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"3": {
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"4": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"8": {
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"9": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
}
},
"seal/v0/commit/1": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"3": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"4": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"8": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"9": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
}
},
"seal/v0/commit/2": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1610612736,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10737418240
},
"3": {
"MinMemory": 32212254720,
"MaxMemory": 161061273600,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 34359738368
},
"4": {
"MinMemory": 64424509440,
"MaxMemory": 204010946560,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 68719476736
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1610612736,
"GPUUtilization": 1,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10737418240
},
"8": {
"MinMemory": 32212254720,
"MaxMemory": 161061273600,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 34359738368
},
"9": {
"MinMemory": 64424509440,
"MaxMemory": 204010946560,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 68719476736
}
},
"seal/v0/fetch": {
"0": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"1": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"2": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"3": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"4": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"5": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"6": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"7": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"8": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
},
"9": {
"MinMemory": 1048576,
"MaxMemory": 1048576,
"GPUUtilization": 0,
"MaxParallelism": 0,
"MaxParallelismGPU": 0,
"BaseMinMemory": 0
}
},
"seal/v0/precommit/1": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 805306368,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1048576
},
"3": {
"MinMemory": 60129542144,
"MaxMemory": 68719476736,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"4": {
"MinMemory": 120259084288,
"MaxMemory": 137438953472,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 805306368,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1048576
},
"8": {
"MinMemory": 60129542144,
"MaxMemory": 68719476736,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"9": {
"MinMemory": 120259084288,
"MaxMemory": 137438953472,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
}
},
"seal/v0/precommit/2": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1610612736,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"3": {
"MinMemory": 16106127360,
"MaxMemory": 16106127360,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 1073741824
},
"4": {
"MinMemory": 32212254720,
"MaxMemory": 32212254720,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 1073741824
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1610612736,
"GPUUtilization": 0,
"MaxParallelism": -1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"8": {
"MinMemory": 16106127360,
"MaxMemory": 16106127360,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 1073741824
},
"9": {
"MinMemory": 32212254720,
"MaxMemory": 32212254720,
"GPUUtilization": 1,
"MaxParallelism": -1,
"MaxParallelismGPU": 6,
"BaseMinMemory": 1073741824
}
},
"seal/v0/unseal": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 805306368,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1048576
},
"3": {
"MinMemory": 60129542144,
"MaxMemory": 68719476736,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"4": {
"MinMemory": 120259084288,
"MaxMemory": 137438953472,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 805306368,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1048576
},
"8": {
"MinMemory": 60129542144,
"MaxMemory": 68719476736,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
},
"9": {
"MinMemory": 120259084288,
"MaxMemory": 137438953472,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 10485760
}
}
}
}
}
```

View File

@ -1269,7 +1269,8 @@ Response:
"Stages": {
"Stages": null
}
}
},
"Event": 5
}
```

View File

@ -41,6 +41,7 @@
* [ClientDataTransferUpdates](#ClientDataTransferUpdates)
* [ClientDealPieceCID](#ClientDealPieceCID)
* [ClientDealSize](#ClientDealSize)
* [ClientExport](#ClientExport)
* [ClientFindData](#ClientFindData)
* [ClientGenCar](#ClientGenCar)
* [ClientGetDealInfo](#ClientGetDealInfo)
@ -59,7 +60,7 @@
* [ClientRestartDataTransfer](#ClientRestartDataTransfer)
* [ClientRetrieve](#ClientRetrieve)
* [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds)
* [ClientRetrieveWithEvents](#ClientRetrieveWithEvents)
* [ClientRetrieveWait](#ClientRetrieveWait)
* [ClientStartDeal](#ClientStartDeal)
* [ClientStatelessDeal](#ClientStatelessDeal)
* [Create](#Create)
@ -108,6 +109,7 @@
* [MsigApprove](#MsigApprove)
* [MsigApproveTxnHash](#MsigApproveTxnHash)
* [MsigCancel](#MsigCancel)
* [MsigCancelTxnHash](#MsigCancelTxnHash)
* [MsigCreate](#MsigCreate)
* [MsigGetAvailableBalance](#MsigGetAvailableBalance)
* [MsigGetPending](#MsigGetPending)
@ -1054,6 +1056,32 @@ Response:
}
```
### ClientExport
ClientExport exports a file stored in the local filestore to a system file
Perms: admin
Inputs:
```json
[
{
"Root": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"DAGs": null,
"FromLocalCAR": "string value",
"DealID": 5
},
{
"Path": "string value",
"IsCAR": true
}
]
```
Response: `{}`
### ClientFindData
ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
@ -1281,7 +1309,8 @@ Response:
"Stages": {
"Stages": null
}
}
},
"Event": 5
}
```
@ -1481,9 +1510,8 @@ Inputs:
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"Piece": null,
"DatamodelPathSelector": "Links/21/Hash/Links/42/Hash",
"DataSelector": "Links/21/Hash/Links/42/Hash",
"Size": 42,
"FromLocalCAR": "string value",
"Total": "0",
"UnsealPrice": "0",
"PaymentInterval": 42,
@ -1495,15 +1523,16 @@ Inputs:
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"PieceCID": null
}
},
{
"Path": "string value",
"IsCAR": true
}
]
```
Response: `{}`
Response:
```json
{
"DealID": 5
}
```
### ClientRetrieveTryRestartInsufficientFunds
ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
@ -1521,9 +1550,8 @@ Inputs:
Response: `{}`
### ClientRetrieveWithEvents
ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
of status updates.
### ClientRetrieveWait
ClientRetrieveWait waits for retrieval to be complete
Perms: admin
@ -1531,43 +1559,11 @@ Perms: admin
Inputs:
```json
[
{
"Root": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"Piece": null,
"DatamodelPathSelector": "Links/21/Hash/Links/42/Hash",
"Size": 42,
"FromLocalCAR": "string value",
"Total": "0",
"UnsealPrice": "0",
"PaymentInterval": 42,
"PaymentIntervalIncrease": 42,
"Client": "f01234",
"Miner": "f01234",
"MinerPeer": {
"Address": "f01234",
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"PieceCID": null
}
},
{
"Path": "string value",
"IsCAR": true
}
5
]
```
Response:
```json
{
"Event": 5,
"Status": 0,
"BytesReceived": 42,
"FundsSpent": "0",
"Err": "string value"
}
```
Response: `{}`
### ClientStartDeal
ClientStartDeal proposes a deal with a miner.
@ -2702,6 +2698,44 @@ Response:
### MsigCancel
MsigCancel cancels a previously-proposed multisig message
It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
Perms: sign
Inputs:
```json
[
"f01234",
42,
"f01234"
]
```
Response:
```json
{
"Message": {
"Version": 42,
"To": "f01234",
"From": "f01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
"GasFeeCap": "0",
"GasPremium": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
"CID": {
"/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
}
},
"ValidNonce": true
}
```
### MsigCancelTxnHash
MsigCancel cancels a previously-proposed multisig message
It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
<sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>

View File

@ -7,7 +7,7 @@ USAGE:
lotus-miner [global options] command [command options] [arguments...]
VERSION:
1.13.1
1.13.2
COMMANDS:
init Initialize a lotus miner repo
@ -590,7 +590,8 @@ CATEGORY:
DEVELOPER
OPTIONS:
--help, -h show help (default: false)
--timeout value duration to wait till fail (default: 30s)
--help, -h show help (default: false)
```
@ -1459,7 +1460,8 @@ USAGE:
lotus-miner pieces list-cids [command options] [arguments...]
OPTIONS:
--help, -h show help (default: false)
--verbose, -v (default: false)
--help, -h show help (default: false)
```
@ -1982,6 +1984,8 @@ OPTIONS:
--seal (for init) use path for sealing (default: false)
--store (for init) use path for long-term storage (default: false)
--max-storage value (for init) limit storage space for sectors (expensive for very large paths!)
--groups value path group names
--allow-to value path groups allowed to pull data from this path (allow all if not specified)
--help, -h show help (default: false)
```

View File

@ -7,7 +7,7 @@ USAGE:
lotus-worker [global options] command [command options] [arguments...]
VERSION:
1.13.1
1.13.2
COMMANDS:
run Start lotus worker
@ -15,6 +15,7 @@ COMMANDS:
storage manage sector storage
set Manage worker settings
wait-quiet Block until all running tasks exit
resources Manage resource table overrides
tasks Manage task processing
help, h Shows a list of commands or help for one command
@ -94,6 +95,8 @@ OPTIONS:
--seal (for init) use path for sealing (default: false)
--store (for init) use path for long-term storage (default: false)
--max-storage value (for init) limit storage space for sectors (expensive for very large paths!)
--groups value path group names
--allow-to value path groups allowed to pull data from this path (allow all if not specified)
--help, -h show help (default: false)
```
@ -125,6 +128,21 @@ OPTIONS:
```
## lotus-worker resources
```
NAME:
lotus-worker resources - Manage resource table overrides
USAGE:
lotus-worker resources [command options] [arguments...]
OPTIONS:
--all print all resource envvars (default: false)
--default print default resource envvars (default: false)
--help, -h show help (default: false)
```
## lotus-worker tasks
```
NAME:

View File

@ -7,7 +7,7 @@ USAGE:
lotus [global options] command [command options] [arguments...]
VERSION:
1.13.1
1.13.2
COMMANDS:
daemon Start a lotus daemon process
@ -426,6 +426,8 @@ COMMANDS:
RETRIEVAL:
find Find data in the network
retrieve Retrieve data from network
cat Show data from network
ls List object links
cancel-retrieval Cancel a retrieval deal by deal ID; this also cancels the associated transfer
list-retrievals List retrieval market deals
STORAGE:
@ -544,12 +546,94 @@ USAGE:
CATEGORY:
RETRIEVAL
DESCRIPTION:
Retrieve data from the Filecoin network.
The retrieve command will attempt to find a provider make a retrieval deal with
them. In case a provider can't be found, it can be specified with the --provider
flag.
By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively
a CAR file containing the raw IPLD graph can be exported by setting the --car
flag.
Partial Retrieval:
The --data-selector flag can be used to specify a sub-graph to fetch. The
selector can be specified as either IPLD datamodel text-path selector, or IPLD
json selector.
In case of unixfs retrieval, the selector must point at a single root node, and
match the entire graph under that node.
In case of CAR retrieval, the selector must have one common "sub-root" node.
Examples:
- Retrieve a file by CID
$ lotus client retrieve Qm... my-file.txt
- Retrieve a file by CID from f0123
$ lotus client retrieve --provider f0123 Qm... my-file.txt
- Retrieve a first file from a specified directory
$ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt
OPTIONS:
--car Export to a car file instead of a regular file (default: false)
--data-selector value, --datamodel-path-selector value IPLD datamodel text-path selector, or IPLD json selector
--car-export-merkle-proof (requires --data-selector and --car) Export data-selector merkle proof (default: false)
--from value address to send transactions from
--provider value, --miner value provider to use for retrieval, if not present it'll use local discovery
--maxPrice value maximum price the client is willing to consider (default: 0 FIL)
--pieceCid value require data to be retrieved from a specific Piece CID
--allow-local (default: false)
--help, -h show help (default: false)
```
### lotus client cat
```
NAME:
lotus client cat - Show data from network
USAGE:
lotus client cat [command options] [dataCid]
CATEGORY:
RETRIEVAL
OPTIONS:
--ipld list IPLD datamodel links (default: false)
--data-selector value IPLD datamodel text-path selector, or IPLD json selector
--from value address to send transactions from
--car export to a car file instead of a regular file (default: false)
--miner value miner address for retrieval, if not present it'll use local discovery
--datamodel-path-selector value a rudimentary (DM-level-only) text-path selector, allowing for sub-selection within a deal
--maxPrice value maximum price the client is willing to consider (default: 0.01 FIL)
--provider value, --miner value provider to use for retrieval, if not present it'll use local discovery
--maxPrice value maximum price the client is willing to consider (default: 0 FIL)
--pieceCid value require data to be retrieved from a specific Piece CID
--allow-local (default: false)
--help, -h show help (default: false)
```
### lotus client ls
```
NAME:
lotus client ls - List object links
USAGE:
lotus client ls [command options] [dataCid]
CATEGORY:
RETRIEVAL
OPTIONS:
--ipld list IPLD datamodel links (default: false)
--depth value list links recursively up to the specified depth (default: 1)
--data-selector value IPLD datamodel text-path selector, or IPLD json selector
--from value address to send transactions from
--provider value, --miner value provider to use for retrieval, if not present it'll use local discovery
--maxPrice value maximum price the client is willing to consider (default: 0 FIL)
--pieceCid value require data to be retrieved from a specific Piece CID
--allow-local (default: false)
--help, -h show help (default: false)
@ -857,6 +941,7 @@ COMMANDS:
propose Propose a multisig transaction
propose-remove Propose to remove a signer
approve Approve a multisig message
cancel Cancel a multisig message
add-propose Propose to add a signer
add-approve Approve a message to add a signer
add-cancel Cancel a message to add a signer
@ -952,6 +1037,20 @@ OPTIONS:
```
### lotus msig cancel
```
NAME:
lotus msig cancel - Cancel a multisig message
USAGE:
lotus msig cancel [command options] <multisigAddress messageId> [destination value [methodId methodParams]]
OPTIONS:
--from value account to send the cancel message from
--help, -h show help (default: false)
```
### lotus msig add-propose
```
NAME:
@ -1580,8 +1679,18 @@ OPTIONS:
--help, -h show help (default: false)
```
# nage
### lotus mpool manage
```
NAME:
lotus mpool manage -
USAGE:
lotus mpool manage [command options] [arguments...]
OPTIONS:
--help, -h show help (default: false)
```
## lotus state
@ -2450,7 +2559,8 @@ CATEGORY:
DEVELOPER
OPTIONS:
--help, -h show help (default: false)
--timeout value duration to wait till fail (default: 30s)
--help, -h show help (default: false)
```

12
extern/sector-storage/cgroups.go vendored Normal file
View File

@ -0,0 +1,12 @@
//go:build !linux
// +build !linux
package sectorstorage
func cgroupV1Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) {
return 0, 0, 0, 0, nil
}
func cgroupV2Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) {
return 0, 0, 0, 0, nil
}

117
extern/sector-storage/cgroups_linux.go vendored Normal file
View File

@ -0,0 +1,117 @@
//go:build linux
// +build linux
package sectorstorage
import (
"bufio"
"bytes"
"math"
"os"
"path/filepath"
"github.com/containerd/cgroups"
cgroupv2 "github.com/containerd/cgroups/v2"
)
func cgroupV2MountPoint() (string, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return "", err
}
defer f.Close() //nolint
scanner := bufio.NewScanner(f)
for scanner.Scan() {
fields := bytes.Fields(scanner.Bytes())
if len(fields) >= 9 && bytes.Equal(fields[8], []byte("cgroup2")) {
return string(fields[4]), nil
}
}
return "", cgroups.ErrMountPointNotExist
}
func cgroupV1Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) {
path := cgroups.NestedPath("")
if pid := os.Getpid(); pid == 1 {
path = cgroups.RootPath
}
c, err := cgroups.Load(cgroups.SingleSubsystem(cgroups.V1, cgroups.Memory), path)
if err != nil {
return 0, 0, 0, 0, err
}
stats, err := c.Stat()
if err != nil {
return 0, 0, 0, 0, err
}
if stats.Memory == nil {
return 0, 0, 0, 0, nil
}
if stats.Memory.Usage != nil {
memoryMax = stats.Memory.Usage.Limit
// Exclude cached files
memoryUsed = stats.Memory.Usage.Usage - stats.Memory.InactiveFile - stats.Memory.ActiveFile
}
if stats.Memory.Swap != nil {
swapMax = stats.Memory.Swap.Limit
swapUsed = stats.Memory.Swap.Usage
}
return memoryMax, memoryUsed, swapMax, swapUsed, nil
}
func cgroupV2MemFromPath(mp, path string) (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) {
c, err := cgroupv2.LoadManager(mp, path)
if err != nil {
return 0, 0, 0, 0, err
}
stats, err := c.Stat()
if err != nil {
return 0, 0, 0, 0, err
}
if stats.Memory != nil {
memoryMax = stats.Memory.UsageLimit
// Exclude memory used caching files
memoryUsed = stats.Memory.Usage - stats.Memory.File
swapMax = stats.Memory.SwapLimit
swapUsed = stats.Memory.SwapUsage
}
return memoryMax, memoryUsed, swapMax, swapUsed, nil
}
func cgroupV2Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) {
memoryMax = math.MaxUint64
swapMax = math.MaxUint64
path, err := cgroupv2.PidGroupPath(os.Getpid())
if err != nil {
return 0, 0, 0, 0, err
}
mp, err := cgroupV2MountPoint()
if err != nil {
return 0, 0, 0, 0, err
}
for path != "/" {
cgMemoryMax, cgMemoryUsed, cgSwapMax, cgSwapUsed, err := cgroupV2MemFromPath(mp, path)
if err != nil {
return 0, 0, 0, 0, err
}
if cgMemoryMax != 0 && cgMemoryMax < memoryMax {
log.Debugf("memory limited by cgroup %s: %v", path, cgMemoryMax)
memoryMax = cgMemoryMax
memoryUsed = cgMemoryUsed
}
if cgSwapMax != 0 && cgSwapMax < swapMax {
log.Debugf("swap limited by cgroup %s: %v", path, cgSwapMax)
swapMax = cgSwapMax
swapUsed = cgSwapUsed
}
path = filepath.Dir(path)
}
return memoryMax, memoryUsed, swapMax, swapUsed, nil
}

View File

@ -7,6 +7,9 @@ import (
"bufio"
"bytes"
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"io"
"math/bits"
"os"
@ -530,9 +533,19 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef,
if err != nil {
return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
}
return p1o, nil
p1odec := map[string]interface{}{}
if err := json.Unmarshal(p1o, &p1odec); err != nil {
return nil, xerrors.Errorf("unmarshaling pc1 output: %w", err)
}
p1odec["_lotus_SealRandomness"] = ticket
return json.Marshal(&p1odec)
}
var PC2CheckRounds = 3
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing)
if err != nil {
@ -545,6 +558,50 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef,
return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
}
ssize, err := sector.ProofType.SectorSize()
if err != nil {
return storage.SectorCids{}, xerrors.Errorf("get ssize: %w", err)
}
p1odec := map[string]interface{}{}
if err := json.Unmarshal(phase1Out, &p1odec); err != nil {
return storage.SectorCids{}, xerrors.Errorf("unmarshaling pc1 output: %w", err)
}
var ticket abi.SealRandomness
ti, found := p1odec["_lotus_SealRandomness"]
if found {
ticket, err = base64.StdEncoding.DecodeString(ti.(string))
if err != nil {
return storage.SectorCids{}, xerrors.Errorf("decoding ticket: %w", err)
}
for i := 0; i < PC2CheckRounds; i++ {
var sd [32]byte
_, _ = rand.Read(sd[:])
_, err := ffi.SealCommitPhase1(
sector.ProofType,
sealedCID,
unsealedCID,
paths.Cache,
paths.Sealed,
sector.ID.Number,
sector.ID.Miner,
ticket,
sd[:],
[]abi.PieceInfo{{Size: abi.PaddedPieceSize(ssize), PieceCID: unsealedCID}},
)
if err != nil {
log.Warn("checking PreCommit failed: ", err)
log.Warnf("num:%d tkt:%v seed:%v sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, sd[:], sealedCID, unsealedCID)
return storage.SectorCids{}, xerrors.Errorf("checking PreCommit failed: %w", err)
}
}
}
return storage.SectorCids{
Unsealed: unsealedCID,
Sealed: sealedCID,

View File

@ -8,7 +8,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
)
var MTTresh = uint64(32 << 20)
var MTTresh = uint64(512 << 10)
func mtChunkCount(usz abi.PaddedPieceSize) uint64 {
threads := (uint64(usz)) / MTTresh

View File

@ -16,13 +16,21 @@ type unpadReader struct {
work []byte
}
func BufSize(sz abi.PaddedPieceSize) int {
return int(MTTresh * mtChunkCount(sz))
}
func NewUnpadReader(src io.Reader, sz abi.PaddedPieceSize) (io.Reader, error) {
buf := make([]byte, BufSize(sz))
return NewUnpadReaderBuf(src, sz, buf)
}
func NewUnpadReaderBuf(src io.Reader, sz abi.PaddedPieceSize, buf []byte) (io.Reader, error) {
if err := sz.Validate(); err != nil {
return nil, xerrors.Errorf("bad piece size: %w", err)
}
buf := make([]byte, MTTresh*mtChunkCount(sz))
return &unpadReader{
src: src,

View File

@ -51,13 +51,8 @@ type SectorManager interface {
FaultTracker
}
type WorkerID uuid.UUID // worker session UUID
var ClosedWorkerID = uuid.UUID{}
func (w WorkerID) String() string {
return uuid.UUID(w).String()
}
type Manager struct {
ls stores.LocalStorage
storage *stores.Remote

View File

@ -322,7 +322,7 @@ func TestRestartWorker(t *testing.T) {
defer cleanup()
localTasks := []sealtasks.TaskType{
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch,
sealtasks.TTAddPiece, sealtasks.TTFetch,
}
wds := datastore.NewMapDatastore()
@ -332,7 +332,7 @@ func TestRestartWorker(t *testing.T) {
return &testExec{apch: arch}, nil
}, WorkerConfig{
TaskTypes: localTasks,
}, stor, lstor, idx, m, statestore.New(wds))
}, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds))
err := m.AddWorker(ctx, w)
require.NoError(t, err)
@ -368,7 +368,7 @@ func TestRestartWorker(t *testing.T) {
return &testExec{apch: arch}, nil
}, WorkerConfig{
TaskTypes: localTasks,
}, stor, lstor, idx, m, statestore.New(wds))
}, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds))
err = m.AddWorker(ctx, w)
require.NoError(t, err)
@ -404,7 +404,7 @@ func TestReenableWorker(t *testing.T) {
return &testExec{apch: arch}, nil
}, WorkerConfig{
TaskTypes: localTasks,
}, stor, lstor, idx, m, statestore.New(wds))
}, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds))
err := m.AddWorker(ctx, w)
require.NoError(t, err)
@ -453,3 +453,123 @@ func TestReenableWorker(t *testing.T) {
i, _ = m.sched.Info(ctx)
require.Len(t, i.(SchedDiagInfo).OpenWindows, 2)
}
func TestResUse(t *testing.T) {
logging.SetAllLoggers(logging.LevelDebug)
ctx, done := context.WithCancel(context.Background())
defer done()
ds := datastore.NewMapDatastore()
m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds)
defer cleanup()
localTasks := []sealtasks.TaskType{
sealtasks.TTAddPiece, sealtasks.TTFetch,
}
wds := datastore.NewMapDatastore()
arch := make(chan chan apres)
w := newLocalWorker(func() (ffiwrapper.Storage, error) {
return &testExec{apch: arch}, nil
}, WorkerConfig{
TaskTypes: localTasks,
}, func(s string) (string, bool) {
return "", false
}, stor, lstor, idx, m, statestore.New(wds))
err := m.AddWorker(ctx, w)
require.NoError(t, err)
sid := storage.SectorRef{
ID: abi.SectorID{Miner: 1000, Number: 1},
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
}
go func() {
_, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
require.Error(t, err)
}()
l:
for {
st := m.WorkerStats()
require.Len(t, st, 1)
for _, w := range st {
if w.MemUsedMax > 0 {
break l
}
time.Sleep(time.Millisecond)
}
}
st := m.WorkerStats()
require.Len(t, st, 1)
for _, w := range st {
require.Equal(t, storiface.ResourceTable[sealtasks.TTAddPiece][abi.RegisteredSealProof_StackedDrg2KiBV1].MaxMemory, w.MemUsedMax)
}
}
func TestResOverride(t *testing.T) {
logging.SetAllLoggers(logging.LevelDebug)
ctx, done := context.WithCancel(context.Background())
defer done()
ds := datastore.NewMapDatastore()
m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds)
defer cleanup()
localTasks := []sealtasks.TaskType{
sealtasks.TTAddPiece, sealtasks.TTFetch,
}
wds := datastore.NewMapDatastore()
arch := make(chan chan apres)
w := newLocalWorker(func() (ffiwrapper.Storage, error) {
return &testExec{apch: arch}, nil
}, WorkerConfig{
TaskTypes: localTasks,
}, func(s string) (string, bool) {
if s == "AP_2K_MAX_MEMORY" {
return "99999", true
}
return "", false
}, stor, lstor, idx, m, statestore.New(wds))
err := m.AddWorker(ctx, w)
require.NoError(t, err)
sid := storage.SectorRef{
ID: abi.SectorID{Miner: 1000, Number: 1},
ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
}
go func() {
_, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
require.Error(t, err)
}()
l:
for {
st := m.WorkerStats()
require.Len(t, st, 1)
for _, w := range st {
if w.MemUsedMax > 0 {
break l
}
time.Sleep(time.Millisecond)
}
}
st := m.WorkerStats()
require.Len(t, st, 1)
for _, w := range st {
require.Equal(t, uint64(99999), w.MemUsedMax)
}
}

View File

@ -12,6 +12,7 @@ import (
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/dagstore/mount"
ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-state-types/abi"
@ -384,12 +385,22 @@ func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSea
}
}
func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) {
if offset != 0 {
func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) {
if uint64(offset) != 0 {
panic("implme")
}
return ioutil.NopCloser(bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size])), false, nil
br := bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size])
return struct {
io.ReadCloser
io.Seeker
io.ReaderAt
}{
ReadCloser: ioutil.NopCloser(br),
Seeker: br,
ReaderAt: br,
}, false, nil
}
func (mgr *SectorMgr) StageFakeData(mid abi.ActorID, spt abi.RegisteredSealProof) (storage.SectorRef, []abi.PieceInfo, error) {

View File

@ -8,6 +8,7 @@ import (
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/dagstore/mount"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
@ -23,7 +24,11 @@ type Unsealer interface {
type PieceProvider interface {
// ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector
ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error)
// pieceOffset + pieceSize specify piece bounds for unsealing (note: with SDR the entire sector will be unsealed by
// default in most cases, but this might matter with future PoRep)
// startOffset is added to the pieceOffset to get the starting reader offset.
// The number of bytes that can be read is pieceSize-startOffset
ReadPiece(ctx context.Context, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error)
IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error)
}
@ -67,50 +72,104 @@ func (p *pieceProvider) IsUnsealed(ctx context.Context, sector storage.SectorRef
// It will NOT try to schedule an Unseal of a sealed sector file for the read.
//
// Returns a nil reader if the piece does NOT exist in any unsealed file or there is no unsealed file for the given sector on any of the workers.
func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (io.ReadCloser, context.CancelFunc, error) {
func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, pc cid.Cid, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (mount.Reader, error) {
// acquire a lock purely for reading unsealed sectors
ctx, cancel := context.WithCancel(ctx)
if err := p.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil {
cancel()
return nil, nil, xerrors.Errorf("acquiring read sector lock: %w", err)
return nil, xerrors.Errorf("acquiring read sector lock: %w", err)
}
// Reader returns a reader for an unsealed piece at the given offset in the given sector.
// Reader returns a reader getter for an unsealed piece at the given offset in the given sector.
// The returned reader will be nil if none of the workers has an unsealed sector file containing
// the unsealed piece.
r, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(offset.Padded()), size.Padded())
rg, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(pieceOffset.Padded()), size.Padded())
if err != nil {
cancel()
log.Debugf("did not get storage reader;sector=%+v, err:%s", sector.ID, err)
cancel()
return nil, nil, err
return nil, err
}
if r == nil {
if rg == nil {
cancel()
return nil, nil
}
return r, cancel, nil
buf := make([]byte, fr32.BufSize(size.Padded()))
pr, err := (&pieceReader{
ctx: ctx,
getReader: func(ctx context.Context, startOffset uint64) (io.ReadCloser, error) {
startOffsetAligned := storiface.UnpaddedByteIndex(startOffset / 127 * 127) // floor to multiple of 127
r, err := rg(startOffsetAligned.Padded())
if err != nil {
return nil, xerrors.Errorf("getting reader at +%d: %w", startOffsetAligned, err)
}
upr, err := fr32.NewUnpadReaderBuf(r, size.Padded(), buf)
if err != nil {
r.Close() // nolint
return nil, xerrors.Errorf("creating unpadded reader: %w", err)
}
bir := bufio.NewReaderSize(upr, 127)
if startOffset > uint64(startOffsetAligned) {
if _, err := bir.Discard(int(startOffset - uint64(startOffsetAligned))); err != nil {
r.Close() // nolint
return nil, xerrors.Errorf("discarding bytes for startOffset: %w", err)
}
}
return struct {
io.Reader
io.Closer
}{
Reader: bir,
Closer: funcCloser(func() error {
return r.Close()
}),
}, nil
},
len: size,
onClose: cancel,
pieceCid: pc,
}).init()
if err != nil || pr == nil { // pr == nil to make sure we don't return typed nil
cancel()
return nil, err
}
return pr, err
}
type funcCloser func() error
func (f funcCloser) Close() error {
return f()
}
var _ io.Closer = funcCloser(nil)
// ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector
// If an Unsealed sector file exists with the Piece Unsealed in it, we'll use that for the read.
// Otherwise, we will Unseal a Sealed sector file for the given sector and read the Unsealed piece from it.
// If we do NOT have an existing unsealed file containing the given piece thus causing us to schedule an Unseal,
// the returned boolean parameter will be set to true.
// If we have an existing unsealed file containing the given piece, the returned boolean will be set to false.
func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) {
if err := offset.Valid(); err != nil {
return nil, false, xerrors.Errorf("offset is not valid: %w", err)
func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) {
if err := pieceOffset.Valid(); err != nil {
return nil, false, xerrors.Errorf("pieceOffset is not valid: %w", err)
}
if err := size.Validate(); err != nil {
return nil, false, xerrors.Errorf("size is not a valid piece size: %w", err)
}
r, unlock, err := p.tryReadUnsealedPiece(ctx, sector, offset, size)
r, err := p.tryReadUnsealedPiece(ctx, unsealed, sector, pieceOffset, size)
log.Debugf("result of first tryReadUnsealedPiece: r=%+v, err=%s", r, err)
if xerrors.Is(err, storiface.ErrSectorNotFound) {
log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size)
log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size)
err = nil
}
if err != nil {
@ -129,14 +188,14 @@ func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef,
if unsealed == cid.Undef {
commd = nil
}
if err := p.uns.SectorsUnsealPiece(ctx, sector, offset, size, ticket, commd); err != nil {
if err := p.uns.SectorsUnsealPiece(ctx, sector, pieceOffset, size, ticket, commd); err != nil {
log.Errorf("failed to SectorsUnsealPiece: %s", err)
return nil, false, xerrors.Errorf("unsealing piece: %w", err)
}
log.Debugf("unsealed a sector file to read the piece, sector=%+v, offset=%d, size=%d", sector, offset, size)
log.Debugf("unsealed a sector file to read the piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size)
r, unlock, err = p.tryReadUnsealedPiece(ctx, sector, offset, size)
r, err = p.tryReadUnsealedPiece(ctx, unsealed, sector, pieceOffset, size)
if err != nil {
log.Errorf("failed to tryReadUnsealedPiece after SectorsUnsealPiece: %s", err)
return nil, true, xerrors.Errorf("read after unsealing: %w", err)
@ -145,32 +204,12 @@ func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef,
log.Errorf("got no reader after unsealing piece")
return nil, true, xerrors.Errorf("got no reader after unsealing piece")
}
log.Debugf("got a reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size)
log.Debugf("got a reader to read unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size)
} else {
log.Debugf("unsealed piece already exists, no need to unseal, sector=%+v, offset=%d, size=%d", sector, offset, size)
log.Debugf("unsealed piece already exists, no need to unseal, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size)
}
upr, err := fr32.NewUnpadReader(r, size.Padded())
if err != nil {
unlock()
return nil, uns, xerrors.Errorf("creating unpadded reader: %w", err)
}
log.Debugf("returning reader to read unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size)
log.Debugf("returning reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size)
return &funcCloser{
Reader: bufio.NewReaderSize(upr, 127),
close: func() error {
err = r.Close()
unlock()
return err
},
}, uns, nil
return r, uns, nil
}
type funcCloser struct {
io.Reader
close func() error
}
func (fc *funcCloser) Close() error { return fc.close() }

View File

@ -7,6 +7,7 @@ import (
"math/rand"
"net"
"net/http"
"os"
"testing"
"github.com/filecoin-project/go-state-types/abi"
@ -286,7 +287,7 @@ func (p *pieceProviderTestHarness) addRemoteWorker(t *testing.T, tasks []sealtas
worker := newLocalWorker(nil, WorkerConfig{
TaskTypes: tasks,
}, remote, localStore, p.index, p.mgr, csts)
}, os.LookupEnv, remote, localStore, p.index, p.mgr, csts)
p.servers = append(p.servers, svc)
p.localStores = append(p.localStores, localStore)

180
extern/sector-storage/piece_reader.go vendored Normal file
View File

@ -0,0 +1,180 @@
package sectorstorage
import (
"bufio"
"context"
"io"
"github.com/ipfs/go-cid"
"go.opencensus.io/stats"
"golang.org/x/xerrors"
"github.com/filecoin-project/dagstore/mount"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/metrics"
)
// For small read skips, it's faster to "burn" some bytes than to setup new sector reader.
// Assuming 1ms stream seek latency, and 1G/s stream rate, we're willing to discard up to 1 MiB.
var MaxPieceReaderBurnBytes int64 = 1 << 20 // 1M
var ReadBuf = 128 * (127 * 8) // unpadded(128k)
type pieceGetter func(ctx context.Context, offset uint64) (io.ReadCloser, error)
type pieceReader struct {
ctx context.Context
getReader pieceGetter
pieceCid cid.Cid
len abi.UnpaddedPieceSize
onClose context.CancelFunc
closed bool
seqAt int64 // next byte to be read by io.Reader
r io.ReadCloser
br *bufio.Reader
rAt int64
}
func (p *pieceReader) init() (_ *pieceReader, err error) {
stats.Record(p.ctx, metrics.DagStorePRInitCount.M(1))
p.rAt = 0
p.r, err = p.getReader(p.ctx, uint64(p.rAt))
if err != nil {
return nil, err
}
if p.r == nil {
return nil, nil
}
p.br = bufio.NewReaderSize(p.r, ReadBuf)
return p, nil
}
func (p *pieceReader) check() error {
if p.closed {
return xerrors.Errorf("reader closed")
}
return nil
}
func (p *pieceReader) Close() error {
if err := p.check(); err != nil {
return err
}
if p.r != nil {
if err := p.r.Close(); err != nil {
return err
}
if err := p.r.Close(); err != nil {
return err
}
p.r = nil
}
p.onClose()
p.closed = true
return nil
}
func (p *pieceReader) Read(b []byte) (int, error) {
if err := p.check(); err != nil {
return 0, err
}
n, err := p.ReadAt(b, p.seqAt)
p.seqAt += int64(n)
return n, err
}
func (p *pieceReader) Seek(offset int64, whence int) (int64, error) {
if err := p.check(); err != nil {
return 0, err
}
switch whence {
case io.SeekStart:
p.seqAt = offset
case io.SeekCurrent:
p.seqAt += offset
case io.SeekEnd:
p.seqAt = int64(p.len) + offset
default:
return 0, xerrors.Errorf("bad whence")
}
return p.seqAt, nil
}
func (p *pieceReader) ReadAt(b []byte, off int64) (n int, err error) {
if err := p.check(); err != nil {
return 0, err
}
stats.Record(p.ctx, metrics.DagStorePRBytesRequested.M(int64(len(b))))
// 1. Get the backing reader into the correct position
// if the backing reader is ahead of the offset we want, or more than
// MaxPieceReaderBurnBytes behind, reset the reader
if p.r == nil || p.rAt > off || p.rAt+MaxPieceReaderBurnBytes < off {
if p.r != nil {
if err := p.r.Close(); err != nil {
return 0, xerrors.Errorf("closing backing reader: %w", err)
}
p.r = nil
p.br = nil
}
log.Debugw("pieceReader new stream", "piece", p.pieceCid, "at", p.rAt, "off", off-p.rAt, "n", len(b))
if off > p.rAt {
stats.Record(p.ctx, metrics.DagStorePRSeekForwardBytes.M(off-p.rAt), metrics.DagStorePRSeekForwardCount.M(1))
} else {
stats.Record(p.ctx, metrics.DagStorePRSeekBackBytes.M(p.rAt-off), metrics.DagStorePRSeekBackCount.M(1))
}
p.rAt = off
p.r, err = p.getReader(p.ctx, uint64(p.rAt))
p.br = bufio.NewReaderSize(p.r, ReadBuf)
if err != nil {
return 0, xerrors.Errorf("getting backing reader: %w", err)
}
}
// 2. Check if we need to burn some bytes
if off > p.rAt {
stats.Record(p.ctx, metrics.DagStorePRBytesDiscarded.M(off-p.rAt), metrics.DagStorePRDiscardCount.M(1))
n, err := io.CopyN(io.Discard, p.br, off-p.rAt)
p.rAt += n
if err != nil {
return 0, xerrors.Errorf("discarding read gap: %w", err)
}
}
// 3. Sanity check
if off != p.rAt {
return 0, xerrors.Errorf("bad reader offset; requested %d; at %d", off, p.rAt)
}
// 4. Read!
n, err = io.ReadFull(p.br, b)
if n < len(b) {
log.Debugw("pieceReader short read", "piece", p.pieceCid, "at", p.rAt, "toEnd", int64(p.len)-p.rAt, "n", len(b), "read", n, "err", err)
}
if err == io.ErrUnexpectedEOF {
err = io.EOF
}
p.rAt += int64(n)
return n, err
}
var _ mount.Reader = (*pieceReader)(nil)

View File

@ -53,7 +53,7 @@ type WorkerSelector interface {
type scheduler struct {
workersLk sync.RWMutex
workers map[WorkerID]*workerHandle
workers map[storiface.WorkerID]*workerHandle
schedule chan *workerRequest
windowRequests chan *schedWindowRequest
@ -95,7 +95,7 @@ type workerHandle struct {
}
type schedWindowRequest struct {
worker WorkerID
worker storiface.WorkerID
done chan *schedWindow
}
@ -107,14 +107,14 @@ type schedWindow struct {
type workerDisableReq struct {
activeWindows []*schedWindow
wid WorkerID
wid storiface.WorkerID
done func()
}
type activeResources struct {
memUsedMin uint64
memUsedMax uint64
gpuUsed bool
gpuUsed float64
cpuUse uint64
cond *sync.Cond
@ -145,7 +145,7 @@ type workerResponse struct {
func newScheduler() *scheduler {
return &scheduler{
workers: map[WorkerID]*workerHandle{},
workers: map[storiface.WorkerID]*workerHandle{},
schedule: make(chan *workerRequest),
windowRequests: make(chan *schedWindowRequest, 20),
@ -378,7 +378,6 @@ func (sh *scheduler) trySched() {
}()
task := (*sh.schedQueue)[sqi]
needRes := ResourceTable[task.taskType][task.sector.ProofType]
task.indexHeap = sqi
for wnd, windowRequest := range sh.openWindows {
@ -394,6 +393,8 @@ func (sh *scheduler) trySched() {
continue
}
needRes := worker.info.Resources.ResourceSpec(task.sector.ProofType, task.taskType)
// TODO: allow bigger windows
if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info) {
continue
@ -457,7 +458,6 @@ func (sh *scheduler) trySched() {
for sqi := 0; sqi < queueLen; sqi++ {
task := (*sh.schedQueue)[sqi]
needRes := ResourceTable[task.taskType][task.sector.ProofType]
selectedWindow := -1
for _, wnd := range acceptableWindows[task.indexHeap] {
@ -466,6 +466,8 @@ func (sh *scheduler) trySched() {
log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.ID.Number, wnd)
needRes := info.Resources.ResourceSpec(task.sector.ProofType, task.taskType)
// TODO: allow bigger windows
if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", info) {
continue

View File

@ -6,7 +6,7 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
)
func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerInfo, r Resources, locker sync.Locker, cb func() error) error {
func (a *activeResources) withResources(id storiface.WorkerID, wr storiface.WorkerInfo, r storiface.Resources, locker sync.Locker, cb func() error) error {
for !a.canHandleRequest(r, id, "withResources", wr) {
if a.cond == nil {
a.cond = sync.NewCond(locker)
@ -30,20 +30,20 @@ func (a *activeResources) hasWorkWaiting() bool {
return a.waiting > 0
}
func (a *activeResources) add(wr storiface.WorkerResources, r Resources) {
if r.CanGPU {
a.gpuUsed = true
func (a *activeResources) add(wr storiface.WorkerResources, r storiface.Resources) {
if r.GPUUtilization > 0 {
a.gpuUsed += r.GPUUtilization
}
a.cpuUse += r.Threads(wr.CPUs)
a.cpuUse += r.Threads(wr.CPUs, len(wr.GPUs))
a.memUsedMin += r.MinMemory
a.memUsedMax += r.MaxMemory
}
func (a *activeResources) free(wr storiface.WorkerResources, r Resources) {
if r.CanGPU {
a.gpuUsed = false
func (a *activeResources) free(wr storiface.WorkerResources, r storiface.Resources) {
if r.GPUUtilization > 0 {
a.gpuUsed -= r.GPUUtilization
}
a.cpuUse -= r.Threads(wr.CPUs)
a.cpuUse -= r.Threads(wr.CPUs, len(wr.GPUs))
a.memUsedMin -= r.MinMemory
a.memUsedMax -= r.MaxMemory
@ -54,35 +54,46 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) {
// canHandleRequest evaluates if the worker has enough available resources to
// handle the request.
func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, caller string, info storiface.WorkerInfo) bool {
func (a *activeResources) canHandleRequest(needRes storiface.Resources, wid storiface.WorkerID, caller string, info storiface.WorkerInfo) bool {
if info.IgnoreResources {
// shortcircuit; if this worker is ignoring resources, it can always handle the request.
return true
}
res := info.Resources
// TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running)
minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory
if minNeedMem > res.MemPhysical {
log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM", wid, caller, minNeedMem/mib, res.MemPhysical/mib)
memNeeded := needRes.MinMemory + needRes.BaseMinMemory
memUsed := a.memUsedMin
// assume that MemUsed can be swapped, so only check it in the vmem Check
memAvail := res.MemPhysical - memUsed
if memNeeded > memAvail {
log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM available", wid, caller, memNeeded/mib, memAvail/mib)
return false
}
maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory
vmemNeeded := needRes.MaxMemory + needRes.BaseMinMemory
vmemUsed := a.memUsedMax
workerMemoryReserved := res.MemUsed + res.MemSwapUsed // memory used outside lotus-worker (used by the OS, etc.)
if maxNeedMem > res.MemSwap+res.MemPhysical {
log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM", wid, caller, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
if vmemUsed < workerMemoryReserved {
vmemUsed = workerMemoryReserved
}
vmemAvail := (res.MemPhysical + res.MemSwap) - vmemUsed
if vmemNeeded > vmemAvail {
log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM available", wid, caller, vmemNeeded/mib, vmemAvail/mib)
return false
}
if a.cpuUse+needRes.Threads(res.CPUs) > res.CPUs {
log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs)
if a.cpuUse+needRes.Threads(res.CPUs, len(res.GPUs)) > res.CPUs {
log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs, len(res.GPUs)), a.cpuUse, res.CPUs)
return false
}
if len(res.GPUs) > 0 && needRes.CanGPU {
if a.gpuUsed {
log.Debugf("sched: not scheduling on worker %s for %s; GPU in use", wid, caller)
if len(res.GPUs) > 0 && needRes.GPUUtilization > 0 {
if a.gpuUsed+needRes.GPUUtilization > float64(len(res.GPUs)) {
log.Debugf("sched: not scheduling on worker %s for %s; GPU(s) in use", wid, caller)
return false
}
}
@ -96,12 +107,21 @@ func (a *activeResources) utilization(wr storiface.WorkerResources) float64 {
cpu := float64(a.cpuUse) / float64(wr.CPUs)
max = cpu
memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical)
memUsed := a.memUsedMin
if memUsed < wr.MemUsed {
memUsed = wr.MemUsed
}
memMin := float64(memUsed) / float64(wr.MemPhysical)
if memMin > max {
max = memMin
}
memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap)
vmemUsed := a.memUsedMax
if a.memUsedMax < wr.MemUsed+wr.MemSwapUsed {
vmemUsed = wr.MemUsed + wr.MemSwapUsed
}
memMax := float64(vmemUsed) / float64(wr.MemPhysical+wr.MemSwap)
if memMax > max {
max = memMax
}

View File

@ -41,14 +41,16 @@ func TestWithPriority(t *testing.T) {
var decentWorkerResources = storiface.WorkerResources{
MemPhysical: 128 << 30,
MemSwap: 200 << 30,
MemReserved: 2 << 30,
MemUsed: 1 << 30,
MemSwapUsed: 1 << 30,
CPUs: 32,
GPUs: []string{"a GPU"},
GPUs: []string{},
}
var constrainedWorkerResources = storiface.WorkerResources{
MemPhysical: 1 << 30,
MemReserved: 2 << 30,
MemUsed: 1 << 30,
MemSwapUsed: 1 << 30,
CPUs: 1,
}
@ -188,6 +190,9 @@ func TestSchedStartStop(t *testing.T) {
}
func TestSched(t *testing.T) {
storiface.ParallelNum = 1
storiface.ParallelDenom = 1
ctx, done := context.WithTimeout(context.Background(), 30*time.Second)
defer done()
@ -254,7 +259,9 @@ func TestSched(t *testing.T) {
return nil
}, noopAction)
require.NoError(t, err, fmt.Sprint(l, l2))
if err != context.Canceled {
require.NoError(t, err, fmt.Sprint(l, l2))
}
}()
<-sched.testSync
@ -299,9 +306,6 @@ func TestSched(t *testing.T) {
}
testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) {
ParallelNum = 1
ParallelDenom = 1
return func(t *testing.T) {
index := stores.NewIndex()
@ -558,7 +562,7 @@ func BenchmarkTrySched(b *testing.B) {
b.StopTimer()
sched := newScheduler()
sched.workers[WorkerID{}] = &workerHandle{
sched.workers[storiface.WorkerID{}] = &workerHandle{
workerRpc: nil,
info: storiface.WorkerInfo{
Hostname: "t",
@ -570,7 +574,7 @@ func BenchmarkTrySched(b *testing.B) {
for i := 0; i < windows; i++ {
sched.openWindows = append(sched.openWindows, &schedWindowRequest{
worker: WorkerID{},
worker: storiface.WorkerID{},
done: make(chan *schedWindow, 1000),
})
}
@ -616,7 +620,7 @@ func TestWindowCompact(t *testing.T) {
taskType: task,
sector: storage.SectorRef{ProofType: spt},
})
window.allocated.add(wh.info.Resources, ResourceTable[task][spt])
window.allocated.add(wh.info.Resources, storiface.ResourceTable[task][spt])
}
wh.activeWindows = append(wh.activeWindows, window)
@ -635,7 +639,7 @@ func TestWindowCompact(t *testing.T) {
for ti, task := range tasks {
require.Equal(t, task, wh.activeWindows[wi].todo[ti].taskType, "%d, %d", wi, ti)
expectRes.add(wh.info.Resources, ResourceTable[task][spt])
expectRes.add(wh.info.Resources, storiface.ResourceTable[task][spt])
}
require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].allocated.cpuUse, "%d", wi)

View File

@ -4,17 +4,18 @@ import (
"context"
"time"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
)
type schedWorker struct {
sched *scheduler
worker *workerHandle
wid WorkerID
wid storiface.WorkerID
heartbeatTimer *time.Ticker
scheduledWindows chan *schedWindow
@ -50,7 +51,7 @@ func (sh *scheduler) runWorker(ctx context.Context, w Worker) error {
closedMgr: make(chan struct{}),
}
wid := WorkerID(sessID)
wid := storiface.WorkerID(sessID)
sh.workersLk.Lock()
_, exist := sh.workers[wid]
@ -237,7 +238,7 @@ func (sw *schedWorker) checkSession(ctx context.Context) bool {
continue
}
if WorkerID(curSes) != sw.wid {
if storiface.WorkerID(curSes) != sw.wid {
if curSes != ClosedWorkerID {
// worker restarted
log.Warnw("worker session changed (worker restarted?)", "initial", sw.wid, "current", curSes)
@ -296,7 +297,7 @@ func (sw *schedWorker) workerCompactWindows() {
var moved []int
for ti, todo := range window.todo {
needRes := ResourceTable[todo.taskType][todo.sector.ProofType]
needRes := worker.info.Resources.ResourceSpec(todo.sector.ProofType, todo.taskType)
if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info) {
continue
}
@ -357,7 +358,7 @@ assignLoop:
worker.lk.Lock()
for t, todo := range firstWindow.todo {
needRes := ResourceTable[todo.taskType][todo.sector.ProofType]
needRes := worker.info.Resources.ResourceSpec(todo.sector.ProofType, todo.taskType)
if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) {
tidx = t
break
@ -418,7 +419,7 @@ assignLoop:
continue
}
needRes := ResourceTable[todo.taskType][todo.sector.ProofType]
needRes := storiface.ResourceTable[todo.taskType][todo.sector.ProofType]
if worker.active.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) {
tidx = t
break
@ -456,7 +457,7 @@ assignLoop:
func (sw *schedWorker) startProcessingTask(req *workerRequest) error {
w, sh := sw.worker, sw.sched
needRes := ResourceTable[req.taskType][req.sector.ProofType]
needRes := w.info.Resources.ResourceSpec(req.sector.ProofType, req.taskType)
w.lk.Lock()
w.preparing.add(w.info.Resources, needRes)
@ -539,7 +540,7 @@ func (sw *schedWorker) startProcessingTask(req *workerRequest) error {
func (sw *schedWorker) startProcessingReadyTask(req *workerRequest) error {
w, sh := sw.worker, sw.sched
needRes := ResourceTable[req.taskType][req.sector.ProofType]
needRes := w.info.Resources.ResourceSpec(req.sector.ProofType, req.taskType)
w.active.add(w.info.Resources, needRes)
@ -579,7 +580,7 @@ func (sw *schedWorker) startProcessingReadyTask(req *workerRequest) error {
return nil
}
func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) {
func (sh *scheduler) workerCleanup(wid storiface.WorkerID, w *workerHandle) {
select {
case <-w.closingMgr:
default:

View File

@ -2,7 +2,6 @@ package stores
import (
"encoding/json"
"io"
"net/http"
"os"
"strconv"
@ -85,7 +84,6 @@ func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request
// remoteGetSector returns the sector file/tared directory byte stream for the sectorID and sector file type sent in the request.
// returns an error if it does NOT have the required sector file/dir.
func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) {
log.Infof("SERVE GET %s", r.URL)
vars := mux.Vars(r)
id, err := storiface.ParseSectorID(vars["id"])
@ -139,17 +137,12 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ
return
}
rd, err := tarutil.TarDirectory(path)
if err != nil {
log.Errorf("%+v", err)
w.WriteHeader(500)
return
}
w.Header().Set("Content-Type", "application/x-tar")
w.WriteHeader(200)
if _, err := io.CopyBuffer(w, rd, make([]byte, CopyBuf)); err != nil {
log.Errorf("%+v", err)
err := tarutil.TarDirectory(path, w, make([]byte, CopyBuf))
if err != nil {
log.Errorf("send tar: %+v", err)
return
}
} else {

View File

@ -29,6 +29,8 @@ var SkippedHeartbeatThresh = HeartbeatInterval * 5
// filesystem, local or networked / shared by multiple machines
type ID string
type Group = string
type StorageInfo struct {
ID ID
URLs []string // TODO: Support non-http transports
@ -37,6 +39,9 @@ type StorageInfo struct {
CanSeal bool
CanStore bool
Groups []Group
AllowTo []Group
}
type HealthReport struct {
@ -168,6 +173,8 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsS
i.stores[si.ID].info.MaxStorage = si.MaxStorage
i.stores[si.ID].info.CanSeal = si.CanSeal
i.stores[si.ID].info.CanStore = si.CanStore
i.stores[si.ID].info.Groups = si.Groups
i.stores[si.ID].info.AllowTo = si.AllowTo
return nil
}
@ -292,6 +299,8 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif
storageIDs := map[ID]uint64{}
isprimary := map[ID]bool{}
allowTo := map[Group]struct{}{}
for _, pathType := range storiface.PathTypes {
if ft&pathType == 0 {
continue
@ -323,6 +332,14 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif
urls[k] = rl.String()
}
if allowTo != nil && len(st.info.AllowTo) > 0 {
for _, group := range st.info.AllowTo {
allowTo[group] = struct{}{}
}
} else {
allowTo = nil // allow to any
}
out = append(out, SectorStorageInfo{
ID: id,
URLs: urls,
@ -365,6 +382,22 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif
continue
}
if allowTo != nil {
allow := false
for _, group := range st.info.Groups {
if _, found := allowTo[group]; found {
log.Debugf("path %s in allowed group %s", st.info.ID, group)
allow = true
break
}
}
if !allow {
log.Debugf("not selecting on %s, not in allowed group, allow %+v; path has %+v", st.info.ID, allowTo, st.info.Groups)
continue
}
}
urls := make([]string, len(st.info.URLs))
for k, u := range st.info.URLs {
rl, err := url.Parse(u)

View File

@ -0,0 +1,154 @@
package stores
import (
"context"
"testing"
"github.com/google/uuid"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
)
func init() {
logging.SetLogLevel("stores", "DEBUG")
}
func newTestStorage() StorageInfo {
return StorageInfo{
ID: ID(uuid.New().String()),
CanSeal: true,
CanStore: true,
Groups: nil,
AllowTo: nil,
}
}
var bigFsStat = fsutil.FsStat{
Capacity: 1 << 40,
Available: 1 << 40,
FSAvailable: 1 << 40,
Reserved: 0,
Max: 0,
Used: 0,
}
const s32g = 32 << 30
func TestFindSimple(t *testing.T) {
ctx := context.Background()
i := NewIndex()
stor1 := newTestStorage()
stor2 := newTestStorage()
require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat))
require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat))
s1 := abi.SectorID{
Miner: 12,
Number: 34,
}
{
si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true)
require.NoError(t, err)
require.Len(t, si, 0)
}
require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true))
{
si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false)
require.NoError(t, err)
require.Len(t, si, 1)
require.Equal(t, stor1.ID, si[0].ID)
}
{
si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true)
require.NoError(t, err)
require.Len(t, si, 2)
}
}
func TestFindNoAllow(t *testing.T) {
ctx := context.Background()
i := NewIndex()
stor1 := newTestStorage()
stor1.AllowTo = []Group{"grp1"}
stor2 := newTestStorage()
require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat))
require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat))
s1 := abi.SectorID{
Miner: 12,
Number: 34,
}
require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true))
{
si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false)
require.NoError(t, err)
require.Len(t, si, 1)
require.Equal(t, stor1.ID, si[0].ID)
}
{
si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true)
require.NoError(t, err)
require.Len(t, si, 1)
require.Equal(t, stor1.ID, si[0].ID)
}
}
func TestFindAllow(t *testing.T) {
ctx := context.Background()
i := NewIndex()
stor1 := newTestStorage()
stor1.AllowTo = []Group{"grp1"}
stor2 := newTestStorage()
stor2.Groups = []Group{"grp1"}
stor3 := newTestStorage()
stor3.Groups = []Group{"grp2"}
require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat))
require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat))
require.NoError(t, i.StorageAttach(ctx, stor3, bigFsStat))
s1 := abi.SectorID{
Miner: 12,
Number: 34,
}
require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true))
{
si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false)
require.NoError(t, err)
require.Len(t, si, 1)
require.Equal(t, stor1.ID, si[0].ID)
}
{
si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true)
require.NoError(t, err)
require.Len(t, si, 2)
if si[0].ID == stor1.ID {
require.Equal(t, stor1.ID, si[0].ID)
require.Equal(t, stor2.ID, si[1].ID)
} else {
require.Equal(t, stor1.ID, si[1].ID)
require.Equal(t, stor2.ID, si[0].ID)
}
}
}

View File

@ -46,6 +46,13 @@ type LocalStorageMeta struct {
// MaxStorage specifies the maximum number of bytes to use for sector storage
// (0 = unlimited)
MaxStorage uint64
// List of storage groups this path belongs to
Groups []string
// List of storage groups to which data from this path can be moved. If none
// are specified, allow to all
AllowTo []string
}
// StorageConfig .lotusstorage/storage.json
@ -212,6 +219,8 @@ func (st *Local) OpenPath(ctx context.Context, p string) error {
MaxStorage: meta.MaxStorage,
CanSeal: meta.CanSeal,
CanStore: meta.CanStore,
Groups: meta.Groups,
AllowTo: meta.AllowTo,
}, fst)
if err != nil {
return xerrors.Errorf("declaring storage in index: %w", err)
@ -276,6 +285,8 @@ func (st *Local) Redeclare(ctx context.Context) error {
MaxStorage: meta.MaxStorage,
CanSeal: meta.CanSeal,
CanStore: meta.CanStore,
Groups: meta.Groups,
AllowTo: meta.AllowTo,
}, fst)
if err != nil {
return xerrors.Errorf("redeclaring storage in index: %w", err)

View File

@ -281,7 +281,7 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error {
switch mediatype {
case "application/x-tar":
return tarutil.ExtractTar(resp.Body, outname)
return tarutil.ExtractTar(resp.Body, outname, make([]byte, CopyBuf))
case "application/octet-stream":
f, err := os.Create(outname)
if err != nil {
@ -305,7 +305,6 @@ func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.Registe
return false, xerrors.Errorf("request: %w", err)
}
req.Header = r.auth.Clone()
fmt.Printf("req using header: %#v \n", r.auth)
req = req.WithContext(ctx)
resp, err := http.DefaultClient.Do(req)
@ -586,7 +585,7 @@ func (r *Remote) CheckIsUnsealed(ctx context.Context, s storage.SectorRef, offse
// 1. no worker(local worker included) has an unsealed file for the given sector OR
// 2. no worker(local worker included) has the unsealed piece in their unsealed sector file.
// Will return a nil reader and a nil error in such a case.
func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) {
func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error), error) {
ft := storiface.FTUnsealed
// check if we have the unsealed sector file locally
@ -624,7 +623,52 @@ func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size a
if has {
log.Infof("returning piece reader for local unsealed piece sector=%+v, (offset=%d, size=%d)", s.ID, offset, size)
return r.pfHandler.Reader(pf, storiface.PaddedByteIndex(offset), size)
return func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error) {
// don't reuse between readers unless closed
f := pf
pf = nil
if f == nil {
f, err = r.pfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path)
if err != nil {
return nil, xerrors.Errorf("opening partial file: %w", err)
}
log.Debugf("local partial file (re)opened %s (+%d,%d)", path, offset, size)
}
r, err := r.pfHandler.Reader(f, storiface.PaddedByteIndex(offset)+startOffsetAligned, size-abi.PaddedPieceSize(startOffsetAligned))
if err != nil {
return nil, err
}
return struct {
io.Reader
io.Closer
}{
Reader: r,
Closer: funcCloser(func() error {
// if we already have a reader cached, close this one
if pf != nil {
if f == nil {
return nil
}
if pf == f {
pf = nil
}
tmp := f
f = nil
return tmp.Close()
}
// otherwise stash it away for reuse
pf = f
return nil
}),
}, nil
}, nil
}
log.Debugf("miner has unsealed file but not unseal piece, %s (+%d,%d)", path, offset, size)
@ -667,16 +711,18 @@ func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size a
continue
}
// readRemote fetches a reader that we can use to read the unsealed piece from the remote worker.
// It uses a ranged HTTP query to ensure we ONLY read the unsealed piece and not the entire unsealed file.
rd, err := r.readRemote(ctx, url, offset, size)
if err != nil {
log.Warnw("reading from remote", "url", url, "error", err)
lastErr = err
continue
}
log.Infof("Read remote %s (+%d,%d)", url, offset, size)
return rd, nil
return func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error) {
// readRemote fetches a reader that we can use to read the unsealed piece from the remote worker.
// It uses a ranged HTTP query to ensure we ONLY read the unsealed piece and not the entire unsealed file.
rd, err := r.readRemote(ctx, url, offset+abi.PaddedPieceSize(startOffsetAligned), size)
if err != nil {
log.Warnw("reading from remote", "url", url, "error", err)
return nil, err
}
return rd, err
}, nil
}
}
@ -693,3 +739,11 @@ func (r *Remote) Reserve(ctx context.Context, sid storage.SectorRef, ft storifac
}
var _ Store = &Remote{}
type funcCloser func() error
func (f funcCloser) Close() error {
return f()
}
var _ io.Closer = funcCloser(nil)

View File

@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
@ -470,12 +471,20 @@ func TestReader(t *testing.T) {
remoteStore := stores.NewRemote(lstore, index, nil, 6000, pfhandler)
rd, err := remoteStore.Reader(ctx, sectorRef, offset, size)
rdg, err := remoteStore.Reader(ctx, sectorRef, offset, size)
var rd io.ReadCloser
if tc.errStr != "" {
require.Error(t, err)
require.Nil(t, rd)
require.Contains(t, err.Error(), tc.errStr)
if rdg == nil {
require.Error(t, err)
require.Nil(t, rdg)
require.Contains(t, err.Error(), tc.errStr)
} else {
rd, err = rdg(0)
require.Error(t, err)
require.Nil(t, rd)
require.Contains(t, err.Error(), tc.errStr)
}
} else {
require.NoError(t, err)
}
@ -483,7 +492,10 @@ func TestReader(t *testing.T) {
if !tc.expectedNonNilReader {
require.Nil(t, rd)
} else {
require.NotNil(t, rd)
require.NotNil(t, rdg)
rd, err := rdg(0)
require.NoError(t, err)
defer func() {
require.NoError(t, rd.Close())
}()

View File

@ -1,19 +1,31 @@
package sectorstorage
package storiface
import (
"github.com/filecoin-project/go-state-types/abi"
"fmt"
"reflect"
"strconv"
"strings"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
)
type Resources struct {
MinMemory uint64 // What Must be in RAM for decent perf
MaxMemory uint64 // Memory required (swap + ram)
MinMemory uint64 `envname:"MIN_MEMORY"` // What Must be in RAM for decent perf
MaxMemory uint64 `envname:"MAX_MEMORY"` // Memory required (swap + ram; peak memory usage during task execution)
MaxParallelism int // -1 = multithread
CanGPU bool
// GPUUtilization specifes the number of GPUs a task can use
GPUUtilization float64 `envname:"GPU_UTILIZATION"`
BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads)
// MaxParallelism specifies the number of CPU cores when GPU is NOT in use
MaxParallelism int `envname:"MAX_PARALLELISM"` // -1 = multithread
// MaxParallelismGPU specifies the number of CPU cores when GPU is in use
MaxParallelismGPU int `envname:"MAX_PARALLELISM_GPU"` // when 0, inherits MaxParallelism
BaseMinMemory uint64 `envname:"BASE_MIN_MEMORY"` // What Must be in RAM for decent perf (shared between threads)
}
/*
@ -32,8 +44,14 @@ var ParallelNum uint64 = 92
var ParallelDenom uint64 = 100
// TODO: Take NUMA into account
func (r Resources) Threads(wcpus uint64) uint64 {
if r.MaxParallelism == -1 {
func (r Resources) Threads(wcpus uint64, gpus int) uint64 {
mp := r.MaxParallelism
if r.GPUUtilization > 0 && gpus > 0 && r.MaxParallelismGPU != 0 { // task can use GPUs and worker has some
mp = r.MaxParallelismGPU
}
if mp == -1 {
n := (wcpus * ParallelNum) / ParallelDenom
if n == 0 {
return wcpus
@ -41,7 +59,7 @@ func (r Resources) Threads(wcpus uint64) uint64 {
return n
}
return uint64(r.MaxParallelism)
return uint64(mp)
}
var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{
@ -134,8 +152,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 30 << 30,
MinMemory: 30 << 30,
MaxParallelism: -1,
CanGPU: true,
MaxParallelism: -1,
MaxParallelismGPU: 6,
GPUUtilization: 1.0,
BaseMinMemory: 1 << 30,
},
@ -143,8 +162,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 15 << 30,
MinMemory: 15 << 30,
MaxParallelism: -1,
CanGPU: true,
MaxParallelism: -1,
MaxParallelismGPU: 6,
GPUUtilization: 1.0,
BaseMinMemory: 1 << 30,
},
@ -220,8 +240,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 190 << 30, // TODO: Confirm
MinMemory: 60 << 30,
MaxParallelism: -1,
CanGPU: true,
MaxParallelism: -1,
MaxParallelismGPU: 6,
GPUUtilization: 1.0,
BaseMinMemory: 64 << 30, // params
},
@ -229,8 +250,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory
MinMemory: 30 << 30,
MaxParallelism: -1,
CanGPU: true,
MaxParallelism: -1,
MaxParallelismGPU: 6,
GPUUtilization: 1.0,
BaseMinMemory: 32 << 30, // params
},
@ -239,7 +261,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MinMemory: 1 << 30,
MaxParallelism: 1, // This is fine
CanGPU: true,
GPUUtilization: 1.0,
BaseMinMemory: 10 << 30,
},
@ -248,7 +270,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MinMemory: 2 << 10,
MaxParallelism: 1,
CanGPU: true,
GPUUtilization: 1.0,
BaseMinMemory: 2 << 10,
},
@ -257,7 +279,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MinMemory: 8 << 20,
MaxParallelism: 1,
CanGPU: true,
GPUUtilization: 1.0,
BaseMinMemory: 8 << 20,
},
@ -268,7 +290,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MinMemory: 1 << 20,
MaxParallelism: 0,
CanGPU: false,
GPUUtilization: 0,
BaseMinMemory: 0,
},
@ -277,7 +299,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MinMemory: 1 << 20,
MaxParallelism: 0,
CanGPU: false,
GPUUtilization: 0,
BaseMinMemory: 0,
},
@ -286,7 +308,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MinMemory: 1 << 20,
MaxParallelism: 0,
CanGPU: false,
GPUUtilization: 0,
BaseMinMemory: 0,
},
@ -295,7 +317,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MinMemory: 1 << 20,
MaxParallelism: 0,
CanGPU: false,
GPUUtilization: 0,
BaseMinMemory: 0,
},
@ -304,7 +326,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MinMemory: 1 << 20,
MaxParallelism: 0,
CanGPU: false,
GPUUtilization: 0,
BaseMinMemory: 0,
},
@ -323,3 +345,83 @@ func init() {
m[abi.RegisteredSealProof_StackedDrg64GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg64GiBV1]
}
}
func ParseResourceEnv(lookup func(key, def string) (string, bool)) (map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources, error) {
out := map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{}
for taskType, defTT := range ResourceTable {
out[taskType] = map[abi.RegisteredSealProof]Resources{}
for spt, defRes := range defTT {
r := defRes // copy
spsz, err := spt.SectorSize()
if err != nil {
return nil, xerrors.Errorf("getting sector size: %w", err)
}
shortSize := strings.TrimSuffix(spsz.ShortString(), "iB")
rr := reflect.ValueOf(&r)
for i := 0; i < rr.Elem().Type().NumField(); i++ {
f := rr.Elem().Type().Field(i)
envname := f.Tag.Get("envname")
if envname == "" {
return nil, xerrors.Errorf("no envname for field '%s'", f.Name)
}
envval, found := lookup(taskType.Short()+"_"+shortSize+"_"+envname, fmt.Sprint(rr.Elem().Field(i).Interface()))
if !found {
// special multicore SDR handling
if (taskType == sealtasks.TTPreCommit1 || taskType == sealtasks.TTUnseal) && envname == "MAX_PARALLELISM" {
v, ok := rr.Elem().Field(i).Addr().Interface().(*int)
if !ok {
// can't happen, but let's not panic
return nil, xerrors.Errorf("res.MAX_PARALLELISM is not int (!?): %w", err)
}
*v, err = getSDRThreads(lookup)
if err != nil {
return nil, err
}
}
continue
}
v := rr.Elem().Field(i).Addr().Interface()
switch fv := v.(type) {
case *uint64:
*fv, err = strconv.ParseUint(envval, 10, 64)
case *int:
*fv, err = strconv.Atoi(envval)
case *float64:
*fv, err = strconv.ParseFloat(envval, 64)
default:
return nil, xerrors.Errorf("unknown resource field type")
}
}
out[taskType][spt] = r
}
}
return out, nil
}
func getSDRThreads(lookup func(key, def string) (string, bool)) (_ int, err error) {
producers := 0
if v, _ := lookup("FIL_PROOFS_USE_MULTICORE_SDR", ""); v == "1" {
producers = 3
if penv, found := lookup("FIL_PROOFS_MULTICORE_SDR_PRODUCERS", ""); found {
producers, err = strconv.Atoi(penv)
if err != nil {
return 0, xerrors.Errorf("parsing (atoi) FIL_PROOFS_MULTICORE_SDR_PRODUCERS: %w", err)
}
}
}
// producers + the one core actually doing the work
return producers + 1, nil
}

View File

@ -0,0 +1,75 @@
package storiface
import (
"fmt"
"testing"
stabi "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/stretchr/testify/require"
)
func TestListResourceVars(t *testing.T) {
_, err := ParseResourceEnv(func(key, def string) (string, bool) {
if def != "" {
fmt.Printf("%s=%s\n", key, def)
}
return "", false
})
require.NoError(t, err)
}
func TestListResourceOverride(t *testing.T) {
rt, err := ParseResourceEnv(func(key, def string) (string, bool) {
if key == "UNS_2K_MAX_PARALLELISM" {
return "2", true
}
if key == "PC2_2K_GPU_UTILIZATION" {
return "0.4", true
}
if key == "PC2_2K_MAX_MEMORY" {
return "2222", true
}
return "", false
})
require.NoError(t, err)
require.Equal(t, 2, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
require.Equal(t, 0.4, rt[sealtasks.TTPreCommit2][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].GPUUtilization)
require.Equal(t, uint64(2222), rt[sealtasks.TTPreCommit2][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxMemory)
// check that defaults don't get mutated
require.Equal(t, 1, ResourceTable[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
}
func TestListResourceSDRMulticoreOverride(t *testing.T) {
rt, err := ParseResourceEnv(func(key, def string) (string, bool) {
if key == "FIL_PROOFS_USE_MULTICORE_SDR" {
return "1", true
}
return "", false
})
require.NoError(t, err)
require.Equal(t, 4, rt[sealtasks.TTPreCommit1][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
require.Equal(t, 4, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
rt, err = ParseResourceEnv(func(key, def string) (string, bool) {
if key == "FIL_PROOFS_USE_MULTICORE_SDR" {
return "1", true
}
if key == "FIL_PROOFS_MULTICORE_SDR_PRODUCERS" {
return "9000", true
}
return "", false
})
require.NoError(t, err)
require.Equal(t, 9001, rt[sealtasks.TTPreCommit1][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
require.Equal(t, 9001, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism)
}

View File

@ -15,6 +15,12 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
)
type WorkerID uuid.UUID // worker session UUID
func (w WorkerID) String() string {
return uuid.UUID(w).String()
}
type WorkerInfo struct {
Hostname string
@ -28,12 +34,35 @@ type WorkerInfo struct {
type WorkerResources struct {
MemPhysical uint64
MemUsed uint64
MemSwap uint64
MemReserved uint64 // Used by system / other processes
MemSwapUsed uint64
CPUs uint64 // Logical cores
GPUs []string
// if nil use the default resource table
Resources map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
}
func (wr WorkerResources) ResourceSpec(spt abi.RegisteredSealProof, tt sealtasks.TaskType) Resources {
res := ResourceTable[tt][spt]
// if the worker specifies custom resource table, prefer that
if wr.Resources != nil {
tr, ok := wr.Resources[tt]
if !ok {
return res
}
r, ok := tr[spt]
if ok {
return r
}
}
// otherwise, use the default resource table
return res
}
type WorkerStats struct {
@ -42,8 +71,8 @@ type WorkerStats struct {
MemUsedMin uint64
MemUsedMax uint64
GpuUsed bool // nolint
CpuUse uint64 // nolint
GpuUsed float64 // nolint
CpuUse uint64 // nolint
}
const (

View File

@ -14,7 +14,7 @@ import (
var log = logging.Logger("tarutil") // nolint
func ExtractTar(body io.Reader, dir string) error {
func ExtractTar(body io.Reader, dir string, buf []byte) error {
if err := os.MkdirAll(dir, 0755); err != nil { // nolint
return xerrors.Errorf("mkdir: %w", err)
}
@ -38,7 +38,7 @@ func ExtractTar(body io.Reader, dir string) error {
// This data is coming from a trusted source, no need to check the size.
//nolint:gosec
if _, err := io.Copy(f, tr); err != nil {
if _, err := io.CopyBuffer(f, tr, buf); err != nil {
return err
}
@ -48,17 +48,7 @@ func ExtractTar(body io.Reader, dir string) error {
}
}
func TarDirectory(dir string) (io.ReadCloser, error) {
r, w := io.Pipe()
go func() {
_ = w.CloseWithError(writeTarDirectory(dir, w))
}()
return r, nil
}
func writeTarDirectory(dir string, w io.Writer) error {
func TarDirectory(dir string, w io.Writer, buf []byte) error {
tw := tar.NewWriter(w)
files, err := ioutil.ReadDir(dir)
@ -81,7 +71,7 @@ func writeTarDirectory(dir string, w io.Writer) error {
return xerrors.Errorf("opening %s for reading: %w", file.Name(), err)
}
if _, err := io.Copy(tw, f); err != nil {
if _, err := io.CopyBuffer(tw, f, buf); err != nil {
return xerrors.Errorf("copy data for file %s: %w", file.Name(), err)
}

View File

@ -102,14 +102,15 @@ func (t *testWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
}
func (t *testWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) {
res := ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredSealProof_StackedDrg2KiBV1]
res := storiface.ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredSealProof_StackedDrg2KiBV1]
return storiface.WorkerInfo{
Hostname: "testworkerer",
Resources: storiface.WorkerResources{
MemPhysical: res.MinMemory * 3,
MemUsed: res.MinMemory,
MemSwapUsed: 0,
MemSwap: 0,
MemReserved: res.MinMemory,
CPUs: 32,
GPUs: nil,
},

View File

@ -42,6 +42,7 @@ type WorkerConfig struct {
// used do provide custom proofs impl (mostly used in testing)
type ExecutorFunc func() (ffiwrapper.Storage, error)
type EnvFunc func(string) (string, bool)
type LocalWorker struct {
storage stores.Store
@ -50,6 +51,7 @@ type LocalWorker struct {
ret storiface.WorkerReturn
executor ExecutorFunc
noSwap bool
envLookup EnvFunc
// see equivalent field on WorkerConfig.
ignoreResources bool
@ -64,7 +66,7 @@ type LocalWorker struct {
closing chan struct{}
}
func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker {
func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, envLookup EnvFunc, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker {
acceptTasks := map[sealtasks.TaskType]struct{}{}
for _, taskType := range wcfg.TaskTypes {
acceptTasks[taskType] = struct{}{}
@ -82,6 +84,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store
acceptTasks: acceptTasks,
executor: executor,
noSwap: wcfg.NoSwap,
envLookup: envLookup,
ignoreResources: wcfg.IgnoreResourceFiltering,
session: uuid.New(),
closing: make(chan struct{}),
@ -115,7 +118,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store
}
func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker {
return newLocalWorker(nil, wcfg, store, local, sindex, ret, cst)
return newLocalWorker(nil, wcfg, os.LookupEnv, store, local, sindex, ret, cst)
}
type localWorkerPathProvider struct {
@ -482,6 +485,52 @@ func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
return l.localStore.Local(ctx)
}
func (l *LocalWorker) memInfo() (memPhysical, memUsed, memSwap, memSwapUsed uint64, err error) {
h, err := sysinfo.Host()
if err != nil {
return 0, 0, 0, 0, err
}
mem, err := h.Memory()
if err != nil {
return 0, 0, 0, 0, err
}
memPhysical = mem.Total
// mem.Available is memory available without swapping, it is more relevant for this calculation
memUsed = mem.Total - mem.Available
memSwap = mem.VirtualTotal
memSwapUsed = mem.VirtualUsed
if cgMemMax, cgMemUsed, cgSwapMax, cgSwapUsed, err := cgroupV1Mem(); err == nil {
if cgMemMax > 0 && cgMemMax < memPhysical {
memPhysical = cgMemMax
memUsed = cgMemUsed
}
if cgSwapMax > 0 && cgSwapMax < memSwap {
memSwap = cgSwapMax
memSwapUsed = cgSwapUsed
}
}
if cgMemMax, cgMemUsed, cgSwapMax, cgSwapUsed, err := cgroupV2Mem(); err == nil {
if cgMemMax > 0 && cgMemMax < memPhysical {
memPhysical = cgMemMax
memUsed = cgMemUsed
}
if cgSwapMax > 0 && cgSwapMax < memSwap {
memSwap = cgSwapMax
memSwapUsed = cgSwapUsed
}
}
if l.noSwap {
memSwap = 0
memSwapUsed = 0
}
return memPhysical, memUsed, memSwap, memSwapUsed, nil
}
func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) {
hostname, err := os.Hostname() // TODO: allow overriding from config
if err != nil {
@ -493,30 +542,29 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) {
log.Errorf("getting gpu devices failed: %+v", err)
}
h, err := sysinfo.Host()
if err != nil {
return storiface.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err)
}
mem, err := h.Memory()
memPhysical, memUsed, memSwap, memSwapUsed, err := l.memInfo()
if err != nil {
return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err)
}
memSwap := mem.VirtualTotal
if l.noSwap {
memSwap = 0
resEnv, err := storiface.ParseResourceEnv(func(key, def string) (string, bool) {
return l.envLookup(key)
})
if err != nil {
return storiface.WorkerInfo{}, xerrors.Errorf("interpreting resource env vars: %w", err)
}
return storiface.WorkerInfo{
Hostname: hostname,
IgnoreResources: l.ignoreResources,
Resources: storiface.WorkerResources{
MemPhysical: mem.Total,
MemPhysical: memPhysical,
MemUsed: memUsed,
MemSwap: memSwap,
MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process
MemSwapUsed: memSwapUsed,
CPUs: uint64(runtime.NumCPU()),
GPUs: gpus,
Resources: resEnv,
},
}, nil
}

View File

@ -20,7 +20,7 @@ import (
type trackedWork struct {
job storiface.WorkerJob
worker WorkerID
worker storiface.WorkerID
workerHostname string
}
@ -58,7 +58,7 @@ func (wt *workTracker) onDone(ctx context.Context, callID storiface.CallID) {
delete(wt.running, callID)
}
func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid WorkerID, wi storiface.WorkerInfo, sid storage.SectorRef, task sealtasks.TaskType, cb func() (storiface.CallID, error)) (storiface.CallID, error) {
func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid storiface.WorkerID, wi storiface.WorkerInfo, sid storage.SectorRef, task sealtasks.TaskType, cb func() (storiface.CallID, error)) (storiface.CallID, error) {
tracked := func(rw int, callID storiface.CallID) trackedWork {
return trackedWork{
job: storiface.WorkerJob{
@ -122,7 +122,7 @@ func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid Worke
return callID, err
}
func (wt *workTracker) worker(wid WorkerID, wi storiface.WorkerInfo, w Worker) *trackedWorker {
func (wt *workTracker) worker(wid storiface.WorkerID, wi storiface.WorkerInfo, w Worker) *trackedWorker {
return &trackedWorker{
Worker: w,
wid: wid,
@ -152,7 +152,7 @@ func (wt *workTracker) Running() ([]trackedWork, []trackedWork) {
type trackedWorker struct {
Worker
wid WorkerID
wid storiface.WorkerID
workerInfo storiface.WorkerInfo
execute chan struct{} // channel blocking execution in case we're waiting for resources but the task is ready to execute

View File

@ -595,7 +595,7 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo)
}
if err := m.checkCommit(ctx.Context(), sector, proof, tok); err != nil {
return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("commit check error: %w", err)})
return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)})
}
}

View File

@ -33,6 +33,8 @@ const (
// (to make it easy to mock for tests)
type TargetAPI interface {
Version(context.Context) (api.APIVersion, error)
ChainGetParentMessages(context.Context, cid.Cid) ([]api.Message, error)
ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error)
@ -44,6 +46,7 @@ type TargetAPI interface {
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error)
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
ChainGetGenesis(context.Context) (*types.TipSet, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
@ -144,6 +147,14 @@ func (gw *Node) Version(ctx context.Context) (api.APIVersion, error) {
return gw.target.Version(ctx)
}
func (gw *Node) ChainGetParentMessages(ctx context.Context, c cid.Cid) ([]api.Message, error) {
return gw.target.ChainGetParentMessages(ctx, c)
}
func (gw *Node) ChainGetParentReceipts(ctx context.Context, c cid.Cid) ([]*types.MessageReceipt, error) {
return gw.target.ChainGetParentReceipts(ctx, c)
}
func (gw *Node) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) {
return gw.target.ChainGetBlockMessages(ctx, c)
}
@ -231,6 +242,10 @@ func (gw *Node) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*
return gw.target.ChainGetPath(ctx, from, to)
}
func (gw *Node) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) {
return gw.target.ChainGetGenesis(ctx)
}
func (gw *Node) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) {
return gw.target.ChainReadObj(ctx, c)
}

View File

@ -10,7 +10,6 @@ import (
"path/filepath"
"strings"
"text/template"
"unicode"
"golang.org/x/xerrors"
)
@ -71,9 +70,6 @@ func typeName(e ast.Expr, pkg string) (string, error) {
return t.X.(*ast.Ident).Name + "." + t.Sel.Name, nil
case *ast.Ident:
pstr := t.Name
if !unicode.IsLower(rune(pstr[0])) && pkg != "api" {
pstr = "api." + pstr // todo src pkg name
}
return pstr, nil
case *ast.ArrayType:
subt, err := typeName(t.Elt, pkg)

23
go.mod
View File

@ -15,6 +15,7 @@ require (
github.com/buger/goterm v1.0.3
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327
github.com/coreos/go-systemd/v22 v22.3.2
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e
github.com/dgraph-io/badger/v2 v2.2007.2
@ -36,11 +37,11 @@ require (
github.com/filecoin-project/go-data-transfer v1.11.4
github.com/filecoin-project/go-fil-commcid v0.1.0
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0
github.com/filecoin-project/go-fil-markets v1.13.3
github.com/filecoin-project/go-fil-markets v1.13.4
github.com/filecoin-project/go-jsonrpc v0.1.5
github.com/filecoin-project/go-padreader v0.0.1
github.com/filecoin-project/go-paramfetch v0.0.2
github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379
github.com/filecoin-project/go-state-types v0.1.1
github.com/filecoin-project/go-statemachine v1.0.1
github.com/filecoin-project/go-statestore v0.1.1
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
@ -77,7 +78,7 @@ require (
github.com/ipfs/go-ds-measure v0.1.0
github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459
github.com/ipfs/go-fs-lock v0.0.6
github.com/ipfs/go-graphsync v0.10.4
github.com/ipfs/go-graphsync v0.10.6
github.com/ipfs/go-ipfs-blockstore v1.0.4
github.com/ipfs/go-ipfs-blocksutil v0.0.1
github.com/ipfs/go-ipfs-chunker v0.0.5
@ -90,6 +91,7 @@ require (
github.com/ipfs/go-ipfs-util v0.0.2
github.com/ipfs/go-ipld-cbor v0.0.5
github.com/ipfs/go-ipld-format v0.2.0
github.com/ipfs/go-ipld-legacy v0.1.1 // indirect
github.com/ipfs/go-log/v2 v2.3.0
github.com/ipfs/go-merkledag v0.4.1
github.com/ipfs/go-metrics-interface v0.0.1
@ -98,10 +100,10 @@ require (
github.com/ipfs/go-unixfs v0.2.6
github.com/ipfs/interface-go-ipfs-core v0.4.0
github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823
github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7
github.com/ipld/go-car/v2 v2.1.0
github.com/ipld/go-codec-dagpb v1.3.0
github.com/ipld/go-ipld-prime v0.12.3
github.com/ipld/go-ipld-selector-text-lite v0.0.0
github.com/ipld/go-ipld-prime v0.14.2
github.com/ipld/go-ipld-selector-text-lite v0.0.1
github.com/kelseyhightower/envconfig v1.4.0
github.com/libp2p/go-buffer-pool v0.0.2
github.com/libp2p/go-eventbus v0.2.1
@ -110,7 +112,6 @@ require (
github.com/libp2p/go-libp2p-core v0.9.0
github.com/libp2p/go-libp2p-discovery v0.5.1
github.com/libp2p/go-libp2p-kad-dht v0.13.0
github.com/libp2p/go-libp2p-mplex v0.4.1
github.com/libp2p/go-libp2p-noise v0.2.2
github.com/libp2p/go-libp2p-peerstore v0.3.0
github.com/libp2p/go-libp2p-pubsub v0.5.6
@ -128,7 +129,7 @@ require (
github.com/multiformats/go-multiaddr v0.4.1
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/multiformats/go-multibase v0.0.3
github.com/multiformats/go-multihash v0.0.16
github.com/multiformats/go-multihash v0.1.0
github.com/multiformats/go-varint v0.0.6
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
github.com/opentracing/opentracing-go v1.2.0
@ -151,14 +152,16 @@ require (
go.uber.org/fx v1.9.0
go.uber.org/multierr v1.7.0
go.uber.org/zap v1.19.1
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b // indirect
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678
golang.org/x/sys v0.0.0-20211209171907-798191bca915
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
golang.org/x/tools v0.1.5
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
gopkg.in/cheggaaa/pb.v1 v1.0.28
gotest.tools v2.2.0+incompatible
lukechampine.com/blake3 v1.1.7 // indirect
)
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi

54
go.sum
View File

@ -174,6 +174,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.2.0 h1:Fv93L3KKckEcEHR3oApXVzyBTDA8WAm6VXhPE00N3f8=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
@ -338,8 +339,8 @@ github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo=
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8=
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
github.com/filecoin-project/go-fil-markets v1.13.3 h1:iMCpG7I4fb+YLcgDnMaqZiZiyFZWNvrwHqiFPHB0/tQ=
github.com/filecoin-project/go-fil-markets v1.13.3/go.mod h1:38zuj8AgDvOfdakFLpC/syYIYgXTzkq7xqBJ6T1AuG4=
github.com/filecoin-project/go-fil-markets v1.13.4 h1:NAu+ACelR2mYsj+yJ4iLu8FGqWK50OnU5VF8axkLsSc=
github.com/filecoin-project/go-fil-markets v1.13.4/go.mod h1:aANjXD2XMHWnT2zWpyGWLsWLC24C4mHm0gRm85OpPWE=
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
@ -363,8 +364,8 @@ github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 h1:UmKkt13NrtulubqfNXhG7SQ7Pjza8BeKdNBxngqAo64=
github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1 h1:LR260vya4p++atgf256W6yV3Lxl5mKrBFcEZePWQrdg=
github.com/filecoin-project/go-state-types v0.1.1/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
github.com/filecoin-project/go-statemachine v1.0.1 h1:LQ60+JDVjMdLxXmVFM2jjontzOYnfVE7u02CXV3WKSw=
github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54=
@ -407,8 +408,9 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
@ -743,8 +745,8 @@ github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CE
github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0=
github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY=
github.com/ipfs/go-graphsync v0.10.0/go.mod h1:cKIshzTaa5rCZjryH5xmSKZVGX9uk1wvwGvz2WEha5Y=
github.com/ipfs/go-graphsync v0.10.4 h1:1WZhyOPxgxLvHTIC2GoLltaBrjZ+JuXC2oKAEiX8f3Y=
github.com/ipfs/go-graphsync v0.10.4/go.mod h1:oei4tnWAKnZ6LPnapZGPYVVbyiKV1UP3f8BeLU7Z4JQ=
github.com/ipfs/go-graphsync v0.10.6 h1:GkYan4EoDslceHaqYo/hxktWtuZ7VmsyRXLdSmoCcBQ=
github.com/ipfs/go-graphsync v0.10.6/go.mod h1:tQMjWNDD/vSz80YLT/VvzrUmy58aF9lR1uCwSLzjWzI=
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
@ -803,8 +805,9 @@ github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dC
github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k=
github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA=
github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs=
github.com/ipfs/go-ipld-legacy v0.1.0 h1:wxkkc4k8cnvIGIjPO0waJCe7SHEyFgl+yQdafdjGrpA=
github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI=
github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc=
github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg=
github.com/ipfs/go-ipns v0.1.2 h1:O/s/0ht+4Jl9+VoxoUo0zaHjnZUS+aBQIKTuzdZ/ucI=
github.com/ipfs/go-ipns v0.1.2/go.mod h1:ioQ0j02o6jdIVW+bmi18f4k2gRf0AV3kZ9KeHYHICnQ=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
@ -865,8 +868,8 @@ github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 h1:8JMSJ0k71fU9lIUrp
github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823/go.mod h1:jSlTph+i/q1jLFoiKKeN69KGG0fXpwrcD0izu5C1Tpo=
github.com/ipld/go-car/v2 v2.0.0-beta1.0.20210721090610-5a9d1b217d25/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU=
github.com/ipld/go-car/v2 v2.0.2/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU=
github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 h1:6Z0beJSZNsRY+7udoqUl4gQ/tqtrPuRvDySrlsvbqZA=
github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU=
github.com/ipld/go-car/v2 v2.1.0 h1:t8R/WXUSkfu1K1gpPk76mytCxsEdMjGcMIgpOq3/Cnw=
github.com/ipld/go-car/v2 v2.1.0/go.mod h1:Xr6GwkDhv8dtOtgHzOynAkIOg0t0YiPc5DxBPppWqZA=
github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s=
github.com/ipld/go-codec-dagpb v1.3.0 h1:czTcaoAuNNyIYWs6Qe01DJ+sEX7B+1Z0LcXjSatMGe8=
github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA=
@ -879,14 +882,15 @@ github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvB
github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8=
github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8=
github.com/ipld/go-ipld-prime v0.12.3-0.20210930132912-0b3aef3ca569/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8=
github.com/ipld/go-ipld-prime v0.12.3 h1:furVobw7UBLQZwlEwfE26tYORy3PAK8VYSgZOSr3JMQ=
github.com/ipld/go-ipld-prime v0.12.3/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8=
github.com/ipld/go-ipld-prime v0.14.2 h1:P5fO2usnisXwrN/1sR5exCgEvINg/w/27EuYPKB/zx8=
github.com/ipld/go-ipld-prime v0.14.2/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0=
github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU=
github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs=
github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0=
github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE=
github.com/ipld/go-ipld-selector-text-lite v0.0.0 h1:MLU1YUAgd3Z+RfVCXUbvxH1RQjEe+larJ9jmlW1aMgA=
github.com/ipld/go-ipld-selector-text-lite v0.0.0/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM=
github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y=
github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM=
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c=
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4=
github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
@ -972,8 +976,9 @@ github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJ
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@ -1482,8 +1487,9 @@ github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPw
github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4=
github.com/multiformats/go-multicodec v0.2.1-0.20210713081508-b421db6850ae/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ=
github.com/multiformats/go-multicodec v0.2.1-0.20210714093213-b2b5bd6fe68b/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ=
github.com/multiformats/go-multicodec v0.3.0 h1:tstDwfIjiHbnIjeM5Lp+pMrSeN+LCMsEwOrkPmWm03A=
github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ=
github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61 h1:ZrUuMKNgJ52qHPoQ+bx0h0uBfcWmN7Px+4uKSZeesiI=
github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ=
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
@ -1492,8 +1498,8 @@ github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpK
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg=
github.com/multiformats/go-multihash v0.0.16 h1:D2qsyy1WVculJbGv69pWmQ36ehxFoA5NiIUr1OEs6qI=
github.com/multiformats/go-multihash v0.0.16/go.mod h1:zhfEIgVnB/rPMfxgFw15ZmGoNaKyNUIE4IWHG/kC+Ag=
github.com/multiformats/go-multihash v0.1.0 h1:CgAgwqk3//SVEw3T+6DqI4mWMyRuDwZtOWcJT0q9+EA=
github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84=
github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
@ -1670,6 +1676,8 @@ github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
@ -1987,8 +1995,9 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 h1:3erb+vDS8lU1sxfDHF4/hhWyaXnhIaO+7RgL4fDZORA=
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b h1:QAqMVf3pSa6eeTsuklijukjXBlj7Es2QQplab+/RbQ4=
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -2102,8 +2111,9 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf h1:R150MpwJIv1MpS0N/pc+NhTM8ajzvlmxlY5OYsrevXQ=
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -2225,8 +2235,9 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211209171907-798191bca915 h1:P+8mCzuEpyszAT6T42q0sxU+eveBAF/cJ2Kp0x6/8+0=
golang.org/x/sys v0.0.0-20211209171907-798191bca915/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M=
@ -2484,6 +2495,9 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=

View File

@ -0,0 +1,252 @@
package itests
import (
"context"
"fmt"
"io"
"io/ioutil"
"testing"
"time"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
api0 "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/itests/kit"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/ipld/go-car"
textselector "github.com/ipld/go-ipld-selector-text-lite"
"github.com/stretchr/testify/require"
)
// please talk to @ribasushi or @mikeal before modifying these test: there are
// downstream dependencies on ADL-less operation
var (
adlFixtureCar = "fixtures/adl_test.car"
adlFixtureRoot, _ = cid.Parse("bafybeiaigxwanoxyeuzyiknhrg6io6kobfbm37ozcips6qdwumub2gaomy")
adlFixtureCommp, _ = cid.Parse("baga6ea4seaqjnmnrv4qsfz2rnda54mvo5al22dwpguhn2pmep63gl7bbqqqraai")
adlFixturePieceSize = abi.PaddedPieceSize(1024)
dmSelector = api.Selector("Links/0/Hash")
dmTextSelector = textselector.Expression(dmSelector)
dmExpectedResult = "NO ADL"
dmExpectedCarBlockCount = 4
dmDagSpec = []api.DagSpec{{DataSelector: &dmSelector, ExportMerkleProof: true}}
)
func TestDMLevelPartialRetrieval(t *testing.T) {
ctx := context.Background()
policy.SetPreCommitChallengeDelay(2)
kit.QuietMiningLogs()
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MockProofs())
dh := kit.NewDealHarness(t, client, miner, miner)
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
_, err := client.ClientImport(ctx, api.FileRef{Path: adlFixtureCar, IsCAR: true})
require.NoError(t, err)
caddr, err := client.WalletDefaultAddress(ctx)
require.NoError(t, err)
//
// test retrieval from local car 1st
require.NoError(t, testDMExportAsCar(
ctx, client, api.ExportRef{
FromLocalCAR: adlFixtureCar,
Root: adlFixtureRoot,
DAGs: dmDagSpec,
}, t.TempDir(),
))
require.NoError(t, testDMExportAsFile(
ctx, client, api.ExportRef{
FromLocalCAR: adlFixtureCar,
Root: adlFixtureRoot,
DAGs: dmDagSpec,
}, t.TempDir(),
))
//
// ensure V0 continues functioning as expected
require.NoError(t, tesV0RetrievalAsCar(
ctx, client, api0.RetrievalOrder{
FromLocalCAR: adlFixtureCar,
Root: adlFixtureRoot,
DatamodelPathSelector: &dmTextSelector,
}, t.TempDir(),
))
require.NoError(t, testV0RetrievalAsFile(
ctx, client, api0.RetrievalOrder{
FromLocalCAR: adlFixtureCar,
Root: adlFixtureRoot,
DatamodelPathSelector: &dmTextSelector,
}, t.TempDir(),
))
//
// now perform a storage/retrieval deal as well, and retest
dp := dh.DefaultStartDealParams()
dp.Data = &storagemarket.DataRef{
Root: adlFixtureRoot,
PieceCid: &adlFixtureCommp,
PieceSize: adlFixturePieceSize.Unpadded(),
}
proposalCid := dh.StartDeal(ctx, dp)
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
require.NoError(t, err)
require.Eventually(t, func() bool {
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
return cd.State == storagemarket.StorageDealCheckForAcceptance
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
dh.WaitDealSealed(ctx, proposalCid, false, false, nil)
offers, err := client.ClientFindData(ctx, adlFixtureRoot, nil)
require.NoError(t, err)
require.NotEmpty(t, offers, "no offers")
retOrder := offers[0].Order(caddr)
retOrder.DataSelector = &dmSelector
rr, err := client.ClientRetrieve(ctx, retOrder)
require.NoError(t, err)
err = client.ClientRetrieveWait(ctx, rr.DealID)
require.NoError(t, err)
require.NoError(t, testDMExportAsCar(
ctx, client, api.ExportRef{
DealID: rr.DealID,
Root: adlFixtureRoot,
DAGs: dmDagSpec,
}, t.TempDir(),
))
require.NoError(t, testDMExportAsFile(
ctx, client, api.ExportRef{
DealID: rr.DealID,
Root: adlFixtureRoot,
DAGs: dmDagSpec,
}, t.TempDir(),
))
}
func testDMExportAsFile(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error {
out, err := ioutil.TempFile(tempDir, "exp-test")
if err != nil {
return err
}
defer out.Close() //nolint:errcheck
fileDest := api.FileRef{
Path: out.Name(),
}
err = client.ClientExport(ctx, expDirective, fileDest)
if err != nil {
return err
}
return validateDMUnixFile(out)
}
func testV0RetrievalAsFile(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error {
out, err := ioutil.TempFile(tempDir, "exp-test")
if err != nil {
return err
}
defer out.Close() //nolint:errcheck
cv0 := &api0.WrapperV1Full{client.FullNode} //nolint:govet
err = cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{
Path: out.Name(),
})
if err != nil {
return err
}
return validateDMUnixFile(out)
}
func validateDMUnixFile(r io.Reader) error {
data, err := io.ReadAll(r)
if err != nil {
return err
}
if string(data) != dmExpectedResult {
return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data)
}
return nil
}
func testDMExportAsCar(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error {
out, err := ioutil.TempFile(tempDir, "exp-test")
if err != nil {
return err
}
defer out.Close() //nolint:errcheck
carDest := api.FileRef{
IsCAR: true,
Path: out.Name(),
}
err = client.ClientExport(ctx, expDirective, carDest)
if err != nil {
return err
}
return validateDMCar(out)
}
func tesV0RetrievalAsCar(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error {
out, err := ioutil.TempFile(tempDir, "exp-test")
if err != nil {
return err
}
defer out.Close() //nolint:errcheck
cv0 := &api0.WrapperV1Full{client.FullNode} //nolint:govet
err = cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{
Path: out.Name(),
IsCAR: true,
})
if err != nil {
return err
}
return validateDMCar(out)
}
func validateDMCar(r io.Reader) error {
cr, err := car.NewCarReader(r)
if err != nil {
return err
}
if len(cr.Header.Roots) != 1 {
return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots))
} else if cr.Header.Roots[0].String() != adlFixtureRoot.String() {
return fmt.Errorf("expected root cid '%s', got '%s'", adlFixtureRoot.String(), cr.Header.Roots[0].String())
}
blks := make([]blocks.Block, 0)
for {
b, err := cr.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
blks = append(blks, b)
}
if len(blks) != dmExpectedCarBlockCount {
return fmt.Errorf("expected a car file with %d blocks, got one with %d instead", dmExpectedCarBlockCount, len(blks))
}
data := fmt.Sprintf("%s%s", blks[2].RawData(), blks[3].RawData())
if data != dmExpectedResult {
return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data)
}
return nil
}

View File

@ -9,6 +9,8 @@ import (
"testing"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
@ -18,7 +20,6 @@ import (
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/ipld/go-car"
textselector "github.com/ipld/go-ipld-selector-text-lite"
"github.com/stretchr/testify/require"
)
@ -28,10 +29,11 @@ var (
sourceCar = "../build/genesis/mainnet.car"
carRoot, _ = cid.Parse("bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2")
carCommp, _ = cid.Parse("baga6ea4seaqmrivgzei3fmx5qxtppwankmtou6zvigyjaveu3z2zzwhysgzuina")
selectedCid, _ = cid.Parse("bafkqaetgnfwc6mjpon2g64tbm5sxa33xmvza")
carPieceSize = abi.PaddedPieceSize(2097152)
textSelector = textselector.Expression("8/1/8/1/0/1/0")
textSelectorNonLink = textselector.Expression("8/1/8/1/0/1")
textSelectorNonexistent = textselector.Expression("42")
textSelector = api.Selector("8/1/8/1/0/1/0")
textSelectorNonLink = api.Selector("8/1/8/1/0/1")
textSelectorNonexistent = api.Selector("42")
expectedResult = "fil/1/storagepower"
)
@ -53,74 +55,79 @@ func TestPartialRetrieval(t *testing.T) {
require.NoError(t, err)
// first test retrieval from local car, then do an actual deal
for _, fullCycle := range []bool{false, true} {
for _, exportMerkleProof := range []bool{false, true} {
for _, fullCycle := range []bool{false, true} {
var retOrder api.RetrievalOrder
var retOrder api.RetrievalOrder
var eref api.ExportRef
if !fullCycle {
if !fullCycle {
eref.FromLocalCAR = sourceCar
} else {
dp := dh.DefaultStartDealParams()
dp.Data = &storagemarket.DataRef{
// FIXME: figure out how to do this with an online partial transfer
TransferType: storagemarket.TTManual,
Root: carRoot,
PieceCid: &carCommp,
PieceSize: carPieceSize.Unpadded(),
}
proposalCid := dh.StartDeal(ctx, dp)
retOrder.FromLocalCAR = sourceCar
retOrder.Root = carRoot
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
require.NoError(t, err)
require.Eventually(t, func() bool {
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
return cd.State == storagemarket.StorageDealCheckForAcceptance
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
} else {
err = miner.DealsImportData(ctx, *proposalCid, sourceCar)
require.NoError(t, err)
dp := dh.DefaultStartDealParams()
dp.Data = &storagemarket.DataRef{
// FIXME: figure out how to do this with an online partial transfer
TransferType: storagemarket.TTManual,
Root: carRoot,
PieceCid: &carCommp,
PieceSize: carPieceSize.Unpadded(),
// Wait for the deal to be published, we should be able to start retrieval right away
dh.WaitDealPublished(ctx, proposalCid)
offers, err := client.ClientFindData(ctx, carRoot, nil)
require.NoError(t, err)
require.NotEmpty(t, offers, "no offers")
retOrder = offers[0].Order(caddr)
}
proposalCid := dh.StartDeal(ctx, dp)
// Wait for the deal to reach StorageDealCheckForAcceptance on the client
cd, err := client.ClientGetDealInfo(ctx, *proposalCid)
require.NoError(t, err)
require.Eventually(t, func() bool {
cd, _ := client.ClientGetDealInfo(ctx, *proposalCid)
return cd.State == storagemarket.StorageDealCheckForAcceptance
}, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State])
retOrder.DataSelector = &textSelector
eref.DAGs = append(eref.DAGs, api.DagSpec{
DataSelector: &textSelector,
ExportMerkleProof: exportMerkleProof,
})
eref.Root = carRoot
err = miner.DealsImportData(ctx, *proposalCid, sourceCar)
require.NoError(t, err)
// test retrieval of either data or constructing a partial selective-car
for _, retrieveAsCar := range []bool{false, true} {
outFile, err := ioutil.TempFile(t.TempDir(), "ret-file")
require.NoError(t, err)
defer outFile.Close() //nolint:errcheck
// Wait for the deal to be published, we should be able to start retrieval right away
dh.WaitDealPublished(ctx, proposalCid)
require.NoError(t, testGenesisRetrieval(
ctx,
client,
retOrder,
eref,
&api.FileRef{
Path: outFile.Name(),
IsCAR: retrieveAsCar,
},
outFile,
))
offers, err := client.ClientFindData(ctx, carRoot, nil)
require.NoError(t, err)
require.NotEmpty(t, offers, "no offers")
retOrder = offers[0].Order(caddr)
}
retOrder.DatamodelPathSelector = &textSelector
// test retrieval of either data or constructing a partial selective-car
for _, retrieveAsCar := range []bool{false, true} {
outFile, err := ioutil.TempFile(t.TempDir(), "ret-file")
require.NoError(t, err)
defer outFile.Close() //nolint:errcheck
require.NoError(t, testGenesisRetrieval(
ctx,
client,
retOrder,
&api.FileRef{
Path: outFile.Name(),
IsCAR: retrieveAsCar,
},
outFile,
))
// UGH if I do not sleep here, I get things like:
/*
retrieval failed: Retrieve failed: there is an active retrieval deal with peer 12D3KooWK9fB9a3HZ4PQLVmEQ6pweMMn5CAyKtumB71CPTnuBDi6 for payload CID bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2 (retrieval deal ID 1631259332180384709, state DealStatusFinalizingBlockstore) - existing deal must be cancelled before starting a new retrieval deal:
github.com/filecoin-project/lotus/node/impl/client.(*API).ClientRetrieve
/home/circleci/project/node/impl/client/client.go:774
*/
time.Sleep(time.Second)
// UGH if I do not sleep here, I get things like:
/*
retrieval failed: Retrieve failed: there is an active retrieval deal with peer 12D3KooWK9fB9a3HZ4PQLVmEQ6pweMMn5CAyKtumB71CPTnuBDi6 for payload CID bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2 (retrieval deal ID 1631259332180384709, state DealStatusFinalizingBlockstore) - existing deal must be cancelled before starting a new retrieval deal:
github.com/filecoin-project/lotus/node/impl/client.(*API).ClientRetrieve
/home/circleci/project/node/impl/client/client.go:774
*/
time.Sleep(time.Second)
}
}
}
@ -131,14 +138,18 @@ func TestPartialRetrieval(t *testing.T) {
ctx,
client,
api.RetrievalOrder{
FromLocalCAR: sourceCar,
Root: carRoot,
DatamodelPathSelector: &textSelectorNonexistent,
Root: carRoot,
DataSelector: &textSelectorNonexistent,
},
api.ExportRef{
Root: carRoot,
FromLocalCAR: sourceCar,
DAGs: []api.DagSpec{{DataSelector: &textSelectorNonexistent}},
},
&api.FileRef{},
nil,
),
fmt.Sprintf("retrieval failed: path selection '%s' does not match a node within %s", textSelectorNonexistent, carRoot),
fmt.Sprintf("parsing dag spec: path selection does not match a node within %s", carRoot),
)
// ensure non-boundary retrievals fail
@ -148,18 +159,22 @@ func TestPartialRetrieval(t *testing.T) {
ctx,
client,
api.RetrievalOrder{
FromLocalCAR: sourceCar,
Root: carRoot,
DatamodelPathSelector: &textSelectorNonLink,
Root: carRoot,
DataSelector: &textSelectorNonLink,
},
api.ExportRef{
Root: carRoot,
FromLocalCAR: sourceCar,
DAGs: []api.DagSpec{{DataSelector: &textSelectorNonLink}},
},
&api.FileRef{},
nil,
),
fmt.Sprintf("retrieval failed: error while locating partial retrieval sub-root: unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", textSelectorNonLink),
fmt.Sprintf("parsing dag spec: error while locating partial retrieval sub-root: unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", textSelectorNonLink),
)
}
func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrder api.RetrievalOrder, retRef *api.FileRef, outFile *os.File) error {
func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrder api.RetrievalOrder, eref api.ExportRef, retRef *api.FileRef, outFile *os.File) error {
if retOrder.Total.Nil() {
retOrder.Total = big.Zero()
@ -168,7 +183,19 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde
retOrder.UnsealPrice = big.Zero()
}
err := client.ClientRetrieve(ctx, retOrder, retRef)
if eref.FromLocalCAR == "" {
rr, err := client.ClientRetrieve(ctx, retOrder)
if err != nil {
return err
}
eref.DealID = rr.DealID
if err := client.ClientRetrieveWait(ctx, rr.DealID); err != nil {
return xerrors.Errorf("retrieval wait: %w", err)
}
}
err := client.ClientExport(ctx, eref, *retRef)
if err != nil {
return err
}
@ -190,8 +217,10 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde
if len(cr.Header.Roots) != 1 {
return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots))
} else if cr.Header.Roots[0].String() != carRoot.String() {
} else if eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != carRoot.String() {
return fmt.Errorf("expected root cid '%s', got '%s'", carRoot.String(), cr.Header.Roots[0].String())
} else if !eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != selectedCid.String() {
return fmt.Errorf("expected root cid '%s', got '%s'", selectedCid.String(), cr.Header.Roots[0].String())
}
blks := make([]blocks.Block, 0)
@ -206,11 +235,11 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde
blks = append(blks, b)
}
if len(blks) != 3 {
return fmt.Errorf("expected a car file with 3 blocks, got one with %d instead", len(blks))
if (eref.DAGs[0].ExportMerkleProof && len(blks) != 3) || (!eref.DAGs[0].ExportMerkleProof && len(blks) != 1) {
return fmt.Errorf("expected a car file with 3/1 blocks, got one with %d instead", len(blks))
}
data = blks[2].RawData()
data = blks[len(blks)-1].RawData()
}
if string(data) != expectedResult {

Binary file not shown.

View File

@ -10,6 +10,7 @@ import (
"testing"
"time"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
@ -320,17 +321,45 @@ func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root
caddr, err := dh.client.WalletDefaultAddress(ctx)
require.NoError(dh.t, err)
ref := &api.FileRef{
Path: carFile.Name(),
IsCAR: carExport,
}
updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
updatesCtx, cancel := context.WithCancel(ctx)
updates, err := dh.client.ClientGetRetrievalUpdates(updatesCtx)
require.NoError(dh.t, err)
for update := range updates {
require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err)
retrievalRes, err := dh.client.ClientRetrieve(ctx, offers[0].Order(caddr))
require.NoError(dh.t, err)
consumeEvents:
for {
var evt api.RetrievalInfo
select {
case <-updatesCtx.Done():
dh.t.Fatal("Retrieval Timed Out")
case evt = <-updates:
if evt.ID != retrievalRes.DealID {
continue
}
}
switch evt.Status {
case retrievalmarket.DealStatusCompleted:
break consumeEvents
case retrievalmarket.DealStatusRejected:
dh.t.Fatalf("Retrieval Proposal Rejected: %s", evt.Message)
case
retrievalmarket.DealStatusDealNotFound,
retrievalmarket.DealStatusErrored:
dh.t.Fatalf("Retrieval Error: %s", evt.Message)
}
}
cancel()
require.NoError(dh.t, dh.client.ClientExport(ctx,
api.ExportRef{
Root: root,
DealID: retrievalRes.DealID,
},
api.FileRef{
Path: carFile.Name(),
IsCAR: carExport,
}))
ret := carFile.Name()
if carExport {

View File

@ -3569,9 +3569,9 @@
"integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU="
},
"color-string": {
"version": "1.5.3",
"resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.3.tgz",
"integrity": "sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==",
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/color-string/-/color-string-1.6.0.tgz",
"integrity": "sha512-c/hGS+kRWJutUBEngKKmk4iH3sD59MBkoxVapS/0wgpCz2u7XsNloxknyvBhzwEs1IbV36D9PwqLPJ2DTu3vMA==",
"requires": {
"color-name": "^1.0.0",
"simple-swizzle": "^0.2.2"
@ -7814,9 +7814,9 @@
"integrity": "sha512-T/zvzYRfbVojPWahDsE5evJdHb3oJoQfFbsrKM7w5Zcs++Tr257tia3BmMP8XYVjp1S9RZXQMh7gao96BlqZOw=="
},
"ws": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz",
"integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==",
"version": "5.2.3",
"resolved": "https://registry.npmjs.org/ws/-/ws-5.2.3.tgz",
"integrity": "sha512-jZArVERrMsKUatIdnLzqvcfydI85dvd/Fp1u/VOpfdDWQ4c9qWXe+VIeAbQ5FrDwciAkr+lzofXLz3Kuf26AOA==",
"requires": {
"async-limiter": "~1.0.0"
}
@ -9194,9 +9194,9 @@
"integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A="
},
"path-parse": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz",
"integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw=="
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
},
"path-to-regexp": {
"version": "0.1.7",
@ -9228,6 +9228,11 @@
"resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz",
"integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns="
},
"picocolors": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz",
"integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA=="
},
"pify": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
@ -9354,27 +9359,18 @@
"integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs="
},
"postcss": {
"version": "7.0.17",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.17.tgz",
"integrity": "sha512-546ZowA+KZ3OasvQZHsbuEpysvwTZNGJv9EfyCQdsIDltPSWHAeTQ5fQy/Npi2ZDtLI3zs7Ps/p6wThErhm9fQ==",
"version": "7.0.39",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz",
"integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==",
"requires": {
"chalk": "^2.4.2",
"source-map": "^0.6.1",
"supports-color": "^6.1.0"
"picocolors": "^0.2.1",
"source-map": "^0.6.1"
},
"dependencies": {
"source-map": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
},
"supports-color": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz",
"integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==",
"requires": {
"has-flag": "^3.0.0"
}
}
}
},
@ -11057,9 +11053,9 @@
}
},
"ws": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz",
"integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==",
"version": "5.2.3",
"resolved": "https://registry.npmjs.org/ws/-/ws-5.2.3.tgz",
"integrity": "sha512-jZArVERrMsKUatIdnLzqvcfydI85dvd/Fp1u/VOpfdDWQ4c9qWXe+VIeAbQ5FrDwciAkr+lzofXLz3Kuf26AOA==",
"requires": {
"async-limiter": "~1.0.0"
}
@ -12203,9 +12199,9 @@
}
},
"tmpl": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.4.tgz",
"integrity": "sha1-I2QN17QtAEM5ERQIIOXPRA5SHdE="
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz",
"integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw=="
},
"to-arraybuffer": {
"version": "1.0.1",
@ -12523,9 +12519,9 @@
}
},
"url-parse": {
"version": "1.4.7",
"resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.4.7.tgz",
"integrity": "sha512-d3uaVyzDB9tQoSXFvuSUNFibTd9zxd2bkVrDRvF5TmvWWQwqE4lgYJ5m+x1DbecWkw+LK4RNl2CU1hHuOKPVlg==",
"version": "1.5.3",
"resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.3.tgz",
"integrity": "sha512-IIORyIQD9rvj0A4CLWsHkBBJuNqWpFQe224b6j9t/ABmquIS0qDU2pY6kl6AuOrL5OkCXHMCFNe1jBcuAggjvQ==",
"requires": {
"querystringify": "^2.1.1",
"requires-port": "^1.0.0"
@ -13164,9 +13160,9 @@
}
},
"ws": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz",
"integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==",
"version": "6.2.2",
"resolved": "https://registry.npmjs.org/ws/-/ws-6.2.2.tgz",
"integrity": "sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw==",
"requires": {
"async-limiter": "~1.0.0"
}

View File

@ -3,34 +3,43 @@ package dagstore
import (
"context"
"fmt"
"io"
"github.com/filecoin-project/dagstore/throttle"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/dagstore/mount"
"github.com/filecoin-project/dagstore/throttle"
"github.com/filecoin-project/go-fil-markets/piecestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/shared"
"github.com/filecoin-project/go-state-types/abi"
)
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_lotus_accessor.go -package=mock_dagstore . MinerAPI
type MinerAPI interface {
FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error)
FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error)
GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error)
IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error)
Start(ctx context.Context) error
}
type SectorAccessor interface {
retrievalmarket.SectorAccessor
UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error)
}
type minerAPI struct {
pieceStore piecestore.PieceStore
sa retrievalmarket.SectorAccessor
sa SectorAccessor
throttle throttle.Throttler
readyMgr *shared.ReadyManager
}
var _ MinerAPI = (*minerAPI)(nil)
func NewMinerAPI(store piecestore.PieceStore, sa retrievalmarket.SectorAccessor, concurrency int) MinerAPI {
func NewMinerAPI(store piecestore.PieceStore, sa SectorAccessor, concurrency int) MinerAPI {
return &minerAPI{
pieceStore: store,
sa: sa,
@ -91,7 +100,7 @@ func (m *minerAPI) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, erro
return false, nil
}
func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) {
func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error) {
err := m.readyMgr.AwaitReady()
if err != nil {
return nil, err
@ -117,7 +126,7 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io
deal := deal
// Throttle this path to avoid flooding the storage subsystem.
var reader io.ReadCloser
var reader mount.Reader
err := m.throttle.Do(ctx, func(ctx context.Context) (err error) {
isUnsealed, err := m.sa.IsUnsealed(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
if err != nil {
@ -127,7 +136,7 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io
return nil
}
// Because we know we have an unsealed copy, this UnsealSector call will actually not perform any unsealing.
reader, err = m.sa.UnsealSector(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
reader, err = m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
return err
})
@ -149,7 +158,7 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io
// block for a long time with the current PoRep
//
// This path is unthrottled.
reader, err := m.sa.UnsealSector(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
reader, err := m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
if err != nil {
lastErr = xerrors.Errorf("failed to unseal deal %d: %w", deal.DealID, err)
log.Warn(lastErr.Error())

View File

@ -15,6 +15,7 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"github.com/filecoin-project/dagstore/mount"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
@ -203,6 +204,10 @@ type mockRPN struct {
}
func (m *mockRPN) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) {
return m.UnsealSectorAt(ctx, sectorID, offset, length)
}
func (m *mockRPN) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) {
atomic.AddInt32(&m.calls, 1)
m.lk.RLock()
defer m.lk.RUnlock()
@ -211,7 +216,13 @@ func (m *mockRPN) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, o
if !ok {
panic("sector not found")
}
return io.NopCloser(bytes.NewBuffer([]byte(data))), nil
return struct {
io.ReadCloser
io.ReaderAt
io.Seeker
}{
ReadCloser: io.NopCloser(bytes.NewBuffer([]byte(data[:]))),
}, nil
}
func (m *mockRPN) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) {

View File

@ -1,96 +1,96 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: lotusaccessor.go
// Source: github.com/filecoin-project/lotus/markets/dagstore (interfaces: MinerAPI)
// Package mock_dagstore is a generated GoMock package.
package mock_dagstore
import (
context "context"
io "io"
reflect "reflect"
mount "github.com/filecoin-project/dagstore/mount"
gomock "github.com/golang/mock/gomock"
cid "github.com/ipfs/go-cid"
)
// MockLotusAccessor is a mock of LotusAccessor interface.
type MockLotusAccessor struct {
// MockMinerAPI is a mock of MinerAPI interface.
type MockMinerAPI struct {
ctrl *gomock.Controller
recorder *MockLotusAccessorMockRecorder
recorder *MockMinerAPIMockRecorder
}
// MockLotusAccessorMockRecorder is the mock recorder for MockLotusAccessor.
type MockLotusAccessorMockRecorder struct {
mock *MockLotusAccessor
// MockMinerAPIMockRecorder is the mock recorder for MockMinerAPI.
type MockMinerAPIMockRecorder struct {
mock *MockMinerAPI
}
// NewMockLotusAccessor creates a new mock instance.
func NewMockLotusAccessor(ctrl *gomock.Controller) *MockLotusAccessor {
mock := &MockLotusAccessor{ctrl: ctrl}
mock.recorder = &MockLotusAccessorMockRecorder{mock}
// NewMockMinerAPI creates a new mock instance.
func NewMockMinerAPI(ctrl *gomock.Controller) *MockMinerAPI {
mock := &MockMinerAPI{ctrl: ctrl}
mock.recorder = &MockMinerAPIMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockLotusAccessor) EXPECT() *MockLotusAccessorMockRecorder {
func (m *MockMinerAPI) EXPECT() *MockMinerAPIMockRecorder {
return m.recorder
}
// FetchUnsealedPiece mocks base method.
func (m *MockLotusAccessor) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) {
func (m *MockMinerAPI) FetchUnsealedPiece(arg0 context.Context, arg1 cid.Cid) (mount.Reader, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchUnsealedPiece", ctx, pieceCid)
ret0, _ := ret[0].(io.ReadCloser)
ret := m.ctrl.Call(m, "FetchUnsealedPiece", arg0, arg1)
ret0, _ := ret[0].(mount.Reader)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FetchUnsealedPiece indicates an expected call of FetchUnsealedPiece.
func (mr *MockLotusAccessorMockRecorder) FetchUnsealedPiece(ctx, pieceCid interface{}) *gomock.Call {
func (mr *MockMinerAPIMockRecorder) FetchUnsealedPiece(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUnsealedPiece", reflect.TypeOf((*MockLotusAccessor)(nil).FetchUnsealedPiece), ctx, pieceCid)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUnsealedPiece", reflect.TypeOf((*MockMinerAPI)(nil).FetchUnsealedPiece), arg0, arg1)
}
// GetUnpaddedCARSize mocks base method.
func (m *MockLotusAccessor) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) {
func (m *MockMinerAPI) GetUnpaddedCARSize(arg0 context.Context, arg1 cid.Cid) (uint64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetUnpaddedCARSize", ctx, pieceCid)
ret := m.ctrl.Call(m, "GetUnpaddedCARSize", arg0, arg1)
ret0, _ := ret[0].(uint64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetUnpaddedCARSize indicates an expected call of GetUnpaddedCARSize.
func (mr *MockLotusAccessorMockRecorder) GetUnpaddedCARSize(ctx, pieceCid interface{}) *gomock.Call {
func (mr *MockMinerAPIMockRecorder) GetUnpaddedCARSize(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnpaddedCARSize", reflect.TypeOf((*MockLotusAccessor)(nil).GetUnpaddedCARSize), ctx, pieceCid)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnpaddedCARSize", reflect.TypeOf((*MockMinerAPI)(nil).GetUnpaddedCARSize), arg0, arg1)
}
// IsUnsealed mocks base method.
func (m *MockLotusAccessor) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) {
func (m *MockMinerAPI) IsUnsealed(arg0 context.Context, arg1 cid.Cid) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsUnsealed", ctx, pieceCid)
ret := m.ctrl.Call(m, "IsUnsealed", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IsUnsealed indicates an expected call of IsUnsealed.
func (mr *MockLotusAccessorMockRecorder) IsUnsealed(ctx, pieceCid interface{}) *gomock.Call {
func (mr *MockMinerAPIMockRecorder) IsUnsealed(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockLotusAccessor)(nil).IsUnsealed), ctx, pieceCid)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockMinerAPI)(nil).IsUnsealed), arg0, arg1)
}
// Start mocks base method.
func (m *MockLotusAccessor) Start(ctx context.Context) error {
func (m *MockMinerAPI) Start(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Start", ctx)
ret := m.ctrl.Call(m, "Start", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Start indicates an expected call of Start.
func (mr *MockLotusAccessorMockRecorder) Start(ctx interface{}) *gomock.Call {
func (mr *MockMinerAPIMockRecorder) Start(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockLotusAccessor)(nil).Start), ctx)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockMinerAPI)(nil).Start), arg0)
}

View File

@ -2,7 +2,6 @@ package dagstore
import (
"context"
"io"
"net/url"
"github.com/ipfs/go-cid"
@ -57,19 +56,15 @@ func (l *LotusMount) Deserialize(u *url.URL) error {
}
func (l *LotusMount) Fetch(ctx context.Context) (mount.Reader, error) {
r, err := l.API.FetchUnsealedPiece(ctx, l.PieceCid)
if err != nil {
return nil, xerrors.Errorf("failed to fetch unsealed piece %s: %w", l.PieceCid, err)
}
return &readCloser{r}, nil
return l.API.FetchUnsealedPiece(ctx, l.PieceCid)
}
func (l *LotusMount) Info() mount.Info {
return mount.Info{
Kind: mount.KindRemote,
AccessSequential: true,
AccessSeek: false,
AccessRandom: false,
AccessSeek: true,
AccessRandom: true,
}
}
@ -94,17 +89,3 @@ func (l *LotusMount) Stat(ctx context.Context) (mount.Stat, error) {
Ready: isUnsealed,
}, nil
}
type readCloser struct {
io.ReadCloser
}
var _ mount.Reader = (*readCloser)(nil)
func (r *readCloser) ReadAt(p []byte, off int64) (n int, err error) {
return 0, xerrors.Errorf("ReadAt called but not implemented")
}
func (r *readCloser) Seek(offset int64, whence int) (int64, error) {
return 0, xerrors.Errorf("Seek called but not implemented")
}

View File

@ -2,6 +2,7 @@ package dagstore
import (
"context"
"io"
"io/ioutil"
"net/url"
"strings"
@ -12,7 +13,6 @@ import (
"github.com/stretchr/testify/require"
"github.com/filecoin-project/dagstore/mount"
mock_dagstore "github.com/filecoin-project/lotus/markets/dagstore/mocks"
)
@ -26,12 +26,31 @@ func TestLotusMount(t *testing.T) {
defer mockCtrl.Finish()
// create a mock lotus api that returns the reader we want
mockLotusMountAPI := mock_dagstore.NewMockLotusAccessor(mockCtrl)
mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl)
mockLotusMountAPI.EXPECT().IsUnsealed(gomock.Any(), cid).Return(true, nil).Times(1)
mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(&readCloser{ioutil.NopCloser(strings.NewReader("testing"))}, nil).Times(1)
mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(&readCloser{ioutil.NopCloser(strings.NewReader("testing"))}, nil).Times(1)
mr1 := struct {
io.ReadCloser
io.ReaderAt
io.Seeker
}{
ReadCloser: ioutil.NopCloser(strings.NewReader("testing")),
ReaderAt: nil,
Seeker: nil,
}
mr2 := struct {
io.ReadCloser
io.ReaderAt
io.Seeker
}{
ReadCloser: ioutil.NopCloser(strings.NewReader("testing")),
ReaderAt: nil,
Seeker: nil,
}
mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr1, nil).Times(1)
mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr2, nil).Times(1)
mockLotusMountAPI.EXPECT().GetUnpaddedCARSize(ctx, cid).Return(uint64(100), nil).Times(1)
mnt, err := NewLotusMount(cid, mockLotusMountAPI)
@ -109,7 +128,7 @@ func TestLotusMountRegistration(t *testing.T) {
// when test is done, assert expectations on all mock objects.
defer mockCtrl.Finish()
mockLotusMountAPI := mock_dagstore.NewMockLotusAccessor(mockCtrl)
mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl)
registry := mount.NewRegistry()
err = registry.Register(lotusScheme, mountTemplate(mockLotusMountAPI))
require.NoError(t, err)

View File

@ -2,13 +2,16 @@ package dagstore
import (
"context"
"io"
"testing"
"github.com/filecoin-project/dagstore"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/dagstore"
"github.com/filecoin-project/dagstore/mount"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes"
tut "github.com/filecoin-project/go-fil-markets/shared_testutil"
"github.com/filecoin-project/go-fil-markets/storagemarket"
@ -93,7 +96,7 @@ func TestShardRegistration(t *testing.T) {
cfg := config.DefaultStorageMiner().DAGStore
cfg.RootDir = t.TempDir()
mapi := NewMinerAPI(ps, sa, 10)
mapi := NewMinerAPI(ps, &wrappedSA{sa}, 10)
dagst, w, err := NewDAGStore(cfg, mapi)
require.NoError(t, err)
require.NotNil(t, dagst)
@ -119,3 +122,25 @@ func TestShardRegistration(t *testing.T) {
// ps.VerifyExpectations(t)
}
type wrappedSA struct {
retrievalmarket.SectorAccessor
}
func (w *wrappedSA) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) {
r, err := w.UnsealSector(ctx, sectorID, pieceOffset, length)
if err != nil {
return nil, err
}
return struct {
io.ReadCloser
io.Seeker
io.ReaderAt
}{
ReadCloser: r,
Seeker: nil,
ReaderAt: nil,
}, err
}
var _ SectorAccessor = &wrappedSA{}

View File

@ -3,21 +3,19 @@ package dagstore
import (
"bytes"
"context"
"io"
"os"
"testing"
"time"
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/dagstore"
"github.com/filecoin-project/dagstore/mount"
"github.com/filecoin-project/dagstore/shard"
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/node/config"
)
// TestWrapperAcquireRecovery verifies that if acquire shard returns a "not found"
@ -191,7 +189,7 @@ func (m mockLotusMount) Start(ctx context.Context) error {
return nil
}
func (m mockLotusMount) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) {
func (m mockLotusMount) FetchUnsealedPiece(context.Context, cid.Cid) (mount.Reader, error) {
panic("implement me")
}

View File

@ -4,23 +4,24 @@ import (
"context"
"io"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/dagstore/mount"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/abi"
specstorage "github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/types"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/markets/dagstore"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/storage/sectorblocks"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/abi"
specstorage "github.com/filecoin-project/specs-storage/storage"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
)
var log = logging.Logger("sectoraccessor")
@ -34,12 +35,16 @@ type sectorAccessor struct {
var _ retrievalmarket.SectorAccessor = (*sectorAccessor)(nil)
func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.SectorAccessor {
func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) dagstore.SectorAccessor {
return &sectorAccessor{address.Address(maddr), secb, pp, full}
}
func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) {
log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length)
func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) {
return sa.UnsealSectorAt(ctx, sectorID, pieceOffset, length)
}
func (sa *sectorAccessor) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) {
log.Debugf("get sector %d, pieceOffset %d, length %d", sectorID, pieceOffset, length)
si, err := sa.sectorsStatus(ctx, sectorID, false)
if err != nil {
return nil, err
@ -64,8 +69,8 @@ func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorN
}
// Get a reader for the piece, unsealing the piece if necessary
log.Debugf("read piece in sector %d, offset %d, length %d from miner %d", sectorID, offset, length, mid)
r, unsealed, err := sa.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.Ticket.Value, commD)
log.Debugf("read piece in sector %d, pieceOffset %d, length %d from miner %d", sectorID, pieceOffset, length, mid)
r, unsealed, err := sa.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(pieceOffset), length, si.Ticket.Value, commD)
if err != nil {
return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err)
}

View File

@ -128,6 +128,15 @@ var (
StorageLimitUsedBytes = stats.Int64("storage/path_limit_used_bytes", "used optional storage limit bytes", stats.UnitBytes)
StorageLimitMaxBytes = stats.Int64("storage/path_limit_max_bytes", "optional storage limit", stats.UnitBytes)
DagStorePRInitCount = stats.Int64("dagstore/pr_init_count", "PieceReader init count", stats.UnitDimensionless)
DagStorePRBytesRequested = stats.Int64("dagstore/pr_requested_bytes", "PieceReader requested bytes", stats.UnitBytes)
DagStorePRBytesDiscarded = stats.Int64("dagstore/pr_discarded_bytes", "PieceReader discarded bytes", stats.UnitBytes)
DagStorePRDiscardCount = stats.Int64("dagstore/pr_discard_count", "PieceReader discard count", stats.UnitDimensionless)
DagStorePRSeekBackCount = stats.Int64("dagstore/pr_seek_back_count", "PieceReader seek back count", stats.UnitDimensionless)
DagStorePRSeekForwardCount = stats.Int64("dagstore/pr_seek_forward_count", "PieceReader seek forward count", stats.UnitDimensionless)
DagStorePRSeekBackBytes = stats.Int64("dagstore/pr_seek_back_bytes", "PieceReader seek back bytes", stats.UnitBytes)
DagStorePRSeekForwardBytes = stats.Int64("dagstore/pr_seek_forward_bytes", "PieceReader seek forward bytes", stats.UnitBytes)
// splitstore
SplitstoreMiss = stats.Int64("splitstore/miss", "Number of misses in hotstre access", stats.UnitDimensionless)
SplitstoreCompactionTimeSeconds = stats.Float64("splitstore/compaction_time", "Compaction time in seconds", stats.UnitSeconds)
@ -142,7 +151,7 @@ var (
Description: "Lotus node information",
Measure: LotusInfo,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{Version, Commit},
TagKeys: []tag.Key{Version, Commit, NodeType},
}
ChainNodeHeightView = &view.View{
Measure: ChainNodeHeight,
@ -383,6 +392,39 @@ var (
TagKeys: []tag.Key{StorageID},
}
DagStorePRInitCountView = &view.View{
Measure: DagStorePRInitCount,
Aggregation: view.Count(),
}
DagStorePRBytesRequestedView = &view.View{
Measure: DagStorePRBytesRequested,
Aggregation: view.Sum(),
}
DagStorePRBytesDiscardedView = &view.View{
Measure: DagStorePRBytesDiscarded,
Aggregation: view.Sum(),
}
DagStorePRDiscardCountView = &view.View{
Measure: DagStorePRDiscardCount,
Aggregation: view.Count(),
}
DagStorePRSeekBackCountView = &view.View{
Measure: DagStorePRSeekBackCount,
Aggregation: view.Count(),
}
DagStorePRSeekForwardCountView = &view.View{
Measure: DagStorePRSeekForwardCount,
Aggregation: view.Count(),
}
DagStorePRSeekBackBytesView = &view.View{
Measure: DagStorePRSeekBackBytes,
Aggregation: view.Sum(),
}
DagStorePRSeekForwardBytesView = &view.View{
Measure: DagStorePRSeekForwardBytes,
Aggregation: view.Sum(),
}
// splitstore
SplitstoreMissView = &view.View{
Measure: SplitstoreMiss,
@ -539,6 +581,14 @@ var MinerNodeViews = append([]*view.View{
StorageReservedBytesView,
StorageLimitUsedBytesView,
StorageLimitMaxBytesView,
DagStorePRInitCountView,
DagStorePRBytesRequestedView,
DagStorePRBytesDiscardedView,
DagStorePRDiscardCountView,
DagStorePRSeekBackCountView,
DagStorePRSeekForwardCountView,
DagStorePRSeekBackBytesView,
DagStorePRSeekForwardBytesView,
}, DefaultViews...)
// SinceInMilliseconds returns the duration of time since the provide time as a float64.

View File

@ -177,7 +177,7 @@ var LibP2P = Options(
// Host settings
Override(DefaultTransportsKey, lp2p.DefaultTransports),
Override(AddrsFactoryKey, lp2p.AddrsFactory(nil, nil)),
Override(SmuxTransportKey, lp2p.SmuxTransport(true)),
Override(SmuxTransportKey, lp2p.SmuxTransport()),
Override(RelayKey, lp2p.NoRelay()),
Override(SecurityKey, lp2p.Security(true, false)),

View File

@ -155,7 +155,8 @@ func ConfigStorageMiner(c interface{}) Option {
Override(DAGStoreKey, modules.DAGStore),
// Markets (retrieval)
Override(new(retrievalmarket.SectorAccessor), sectoraccessor.NewSectorAccessor),
Override(new(dagstore.SectorAccessor), sectoraccessor.NewSectorAccessor),
Override(new(retrievalmarket.SectorAccessor), From(new(dagstore.SectorAccessor))),
Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode),
Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork),
Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider),

View File

@ -4,17 +4,22 @@ import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"sort"
"strings"
"time"
bstore "github.com/ipfs/go-ipfs-blockstore"
format "github.com/ipfs/go-ipld-format"
unixfile "github.com/ipfs/go-unixfs/file"
"github.com/ipld/go-car"
"github.com/ipld/go-car/util"
carv2 "github.com/ipld/go-car/v2"
carv2bs "github.com/ipld/go-car/v2/blockstore"
"github.com/ipld/go-ipld-prime/datamodel"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-padreader"
@ -58,7 +63,6 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-actors/v3/actors/builtin/market"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/repo/imports"
@ -760,325 +764,405 @@ func (a *API) ClientCancelRetrievalDeal(ctx context.Context, dealID rm.DealID) e
}
}
func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error {
events := make(chan marketevents.RetrievalEvent)
go a.clientRetrieve(ctx, order, ref, events)
for {
select {
case evt, ok := <-events:
if !ok { // done successfully
return nil
}
if evt.Err != "" {
return xerrors.Errorf("retrieval failed: %s", evt.Err)
}
case <-ctx.Done():
return xerrors.Errorf("retrieval timed out")
}
}
}
func (a *API) ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
events := make(chan marketevents.RetrievalEvent)
go a.clientRetrieve(ctx, order, ref, events)
return events, nil
}
type retrievalSubscribeEvent struct {
event rm.ClientEvent
state rm.ClientDealState
}
func consumeAllEvents(ctx context.Context, dealID rm.DealID, subscribeEvents chan retrievalSubscribeEvent, events chan marketevents.RetrievalEvent) error {
for {
var subscribeEvent retrievalSubscribeEvent
select {
case <-ctx.Done():
return xerrors.New("Retrieval Timed Out")
case subscribeEvent = <-subscribeEvents:
if subscribeEvent.state.ID != dealID {
// we can't check the deal ID ahead of time because:
// 1. We need to subscribe before retrieving.
// 2. We won't know the deal ID until after retrieving.
continue
}
}
select {
case <-ctx.Done():
return xerrors.New("Retrieval Timed Out")
case events <- marketevents.RetrievalEvent{
Event: subscribeEvent.event,
Status: subscribeEvent.state.Status,
BytesReceived: subscribeEvent.state.TotalReceived,
FundsSpent: subscribeEvent.state.FundsSpent,
}:
}
state := subscribeEvent.state
switch state.Status {
case rm.DealStatusCompleted:
return nil
case rm.DealStatusRejected:
return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message)
case rm.DealStatusCancelled:
return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message)
case
rm.DealStatusDealNotFound,
rm.DealStatusErrored:
return xerrors.Errorf("Retrieval Error: %s", state.Message)
}
}
}
func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) {
defer close(events)
finish := func(e error) {
if e != nil {
events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()}
}
}
func getDataSelector(dps *api.Selector, matchPath bool) (datamodel.Node, error) {
sel := selectorparse.CommonSelector_ExploreAllRecursively
if order.DatamodelPathSelector != nil {
if dps != nil {
ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)
if strings.HasPrefix(string(*dps), "{") {
var err error
sel, err = selectorparse.ParseJSONSelector(string(*dps))
if err != nil {
return nil, xerrors.Errorf("failed to parse json-selector '%s': %w", *dps, err)
}
} else {
ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)
selspec, err := textselector.SelectorSpecFromPath(
selspec, err := textselector.SelectorSpecFromPath(
textselector.Expression(*dps), matchPath,
*order.DatamodelPathSelector,
ssb.ExploreRecursive(
selector.RecursionLimitNone(),
ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreAll(ssb.ExploreRecursiveEdge())),
),
)
if err != nil {
return nil, xerrors.Errorf("failed to parse text-selector '%s': %w", *dps, err)
}
// URGH - this is a direct copy from https://github.com/filecoin-project/go-fil-markets/blob/v1.12.0/shared/selectors.go#L10-L16
// Unable to use it because we need the SelectorSpec, and markets exposes just a reified node
ssb.ExploreRecursive(
selector.RecursionLimitNone(),
ssb.ExploreAll(ssb.ExploreRecursiveEdge()),
),
)
if err != nil {
finish(xerrors.Errorf("failed to parse text-selector '%s': %w", *order.DatamodelPathSelector, err))
return
sel = selspec.Node()
log.Infof("partial retrieval of datamodel-path-selector %s/*", *dps)
}
sel = selspec.Node()
log.Infof("partial retrieval of datamodel-path-selector %s/*", *order.DatamodelPathSelector)
}
// summary:
// 1. if we're retrieving from an import, FromLocalCAR will be set.
// Skip the retrieval itself, and use the provided car as a blockstore further down
// to extract a CAR or UnixFS export from.
// 2. if we're using an IPFS blockstore for retrieval, retrieve into it,
// then use the virtual blockstore to extract a CAR or UnixFS export from it.
// 3. if we have to retrieve, perform a CARv2 retrieval, then either
// extract the CARv1 (with ExtractV1File) or use it as a blockstore further down.
return sel, nil
}
// this indicates we're proxying to IPFS.
func (a *API) ClientRetrieve(ctx context.Context, params api.RetrievalOrder) (*api.RestrievalRes, error) {
sel, err := getDataSelector(params.DataSelector, false)
if err != nil {
return nil, err
}
di, err := a.doRetrieval(ctx, params, sel)
if err != nil {
return nil, err
}
return &api.RestrievalRes{
DealID: di,
}, nil
}
func (a *API) doRetrieval(ctx context.Context, order api.RetrievalOrder, sel datamodel.Node) (rm.DealID, error) {
if order.MinerPeer == nil || order.MinerPeer.ID == "" {
mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK)
if err != nil {
return 0, err
}
order.MinerPeer = &rm.RetrievalPeer{
ID: *mi.PeerId,
Address: order.Miner,
}
}
if order.Total.Int == nil {
return 0, xerrors.Errorf("cannot make retrieval deal for null total")
}
if order.Size == 0 {
return 0, xerrors.Errorf("cannot make retrieval deal for zero bytes")
}
ppb := types.BigDiv(order.Total, types.NewInt(order.Size))
params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, sel, order.Piece, order.UnsealPrice)
if err != nil {
return 0, xerrors.Errorf("Error in retrieval params: %s", err)
}
id := a.Retrieval.NextID()
id, err = a.Retrieval.Retrieve(
ctx,
id,
order.Root,
params,
order.Total,
*order.MinerPeer,
order.Client,
order.Miner,
)
if err != nil {
return 0, xerrors.Errorf("Retrieve failed: %w", err)
}
return id, nil
}
func (a *API) ClientRetrieveWait(ctx context.Context, deal rm.DealID) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
subscribeEvents := make(chan rm.ClientDealState, 1)
unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) {
// We'll check the deal IDs inside consumeAllEvents.
if state.ID != deal {
return
}
select {
case <-ctx.Done():
case subscribeEvents <- state:
}
})
defer unsubscribe()
{
state, err := a.Retrieval.GetDeal(deal)
if err != nil {
return xerrors.Errorf("getting deal state: %w", err)
}
select {
case subscribeEvents <- state:
default: // already have an event queued from the subscription
}
}
for {
select {
case <-ctx.Done():
return xerrors.New("Retrieval Timed Out")
case state := <-subscribeEvents:
switch state.Status {
case rm.DealStatusCompleted:
return nil
case rm.DealStatusRejected:
return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message)
case rm.DealStatusCancelled:
return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message)
case
rm.DealStatusDealNotFound,
rm.DealStatusErrored:
return xerrors.Errorf("Retrieval Error: %s", state.Message)
}
}
}
}
type ExportDest struct {
Writer io.Writer
Path string
}
func (ed *ExportDest) doWrite(cb func(io.Writer) error) error {
if ed.Writer != nil {
return cb(ed.Writer)
}
f, err := os.OpenFile(ed.Path, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
if err := cb(f); err != nil {
_ = f.Close()
return err
}
return f.Close()
}
func (a *API) ClientExport(ctx context.Context, exportRef api.ExportRef, ref api.FileRef) error {
return a.ClientExportInto(ctx, exportRef, ref.IsCAR, ExportDest{Path: ref.Path})
}
func (a *API) ClientExportInto(ctx context.Context, exportRef api.ExportRef, car bool, dest ExportDest) error {
proxyBss, retrieveIntoIPFS := a.RtvlBlockstoreAccessor.(*retrievaladapter.ProxyBlockstoreAccessor)
carBss, retrieveIntoCAR := a.RtvlBlockstoreAccessor.(*retrievaladapter.CARBlockstoreAccessor)
carPath := exportRef.FromLocalCAR
carPath := order.FromLocalCAR
// we actually need to retrieve from the network
if carPath == "" {
if !retrieveIntoIPFS && !retrieveIntoCAR {
// we don't recognize the blockstore accessor.
finish(xerrors.Errorf("unsupported retrieval blockstore accessor"))
return
}
if order.MinerPeer == nil || order.MinerPeer.ID == "" {
mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK)
if err != nil {
finish(err)
return
}
order.MinerPeer = &rm.RetrievalPeer{
ID: *mi.PeerId,
Address: order.Miner,
}
}
if order.Total.Int == nil {
finish(xerrors.Errorf("cannot make retrieval deal for null total"))
return
}
if order.Size == 0 {
finish(xerrors.Errorf("cannot make retrieval deal for zero bytes"))
return
}
ppb := types.BigDiv(order.Total, types.NewInt(order.Size))
params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, sel, order.Piece, order.UnsealPrice)
if err != nil {
finish(xerrors.Errorf("Error in retrieval params: %s", err))
return
}
// Subscribe to events before retrieving to avoid losing events.
subscribeEvents := make(chan retrievalSubscribeEvent, 1)
subscribeCtx, cancel := context.WithCancel(ctx)
defer cancel()
unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) {
// We'll check the deal IDs inside consumeAllEvents.
if state.PayloadCID.Equals(order.Root) {
select {
case <-subscribeCtx.Done():
case subscribeEvents <- retrievalSubscribeEvent{event, state}:
}
}
})
id := a.Retrieval.NextID()
id, err = a.Retrieval.Retrieve(
ctx,
id,
order.Root,
params,
order.Total,
*order.MinerPeer,
order.Client,
order.Miner,
)
if err != nil {
unsubscribe()
finish(xerrors.Errorf("Retrieve failed: %w", err))
return
}
err = consumeAllEvents(ctx, id, subscribeEvents, events)
unsubscribe()
if err != nil {
finish(xerrors.Errorf("Retrieve: %w", err))
return
return xerrors.Errorf("unsupported retrieval blockstore accessor")
}
if retrieveIntoCAR {
carPath = carBss.PathFor(id)
carPath = carBss.PathFor(exportRef.DealID)
}
}
if ref == nil {
// If ref is nil, it only fetches the data into the configured blockstore
// (if fetching from network).
finish(nil)
return
}
// determine where did the retrieval go
var retrievalBs bstore.Blockstore
if retrieveIntoIPFS {
retrievalBs = proxyBss.Blockstore
} else {
cbs, err := stores.ReadOnlyFilestore(carPath)
if err != nil {
finish(err)
return
return err
}
defer cbs.Close() //nolint:errcheck
retrievalBs = cbs
}
dserv := merkledag.NewDAGService(blockservice.New(retrievalBs, offline.Exchange(retrievalBs)))
// Are we outputting a CAR?
if ref.IsCAR {
if car {
// not IPFS and we do full selection - just extract the CARv1 from the CARv2 we stored the retrieval in
if !retrieveIntoIPFS && order.DatamodelPathSelector == nil {
finish(carv2.ExtractV1File(carPath, ref.Path))
return
if !retrieveIntoIPFS && len(exportRef.DAGs) == 0 && dest.Writer == nil {
return carv2.ExtractV1File(carPath, dest.Path)
}
// generating a CARv1 from the configured blockstore
f, err := os.OpenFile(ref.Path, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
finish(err)
return
}
err = car.NewSelectiveCar(
ctx,
retrievalBs,
[]car.Dag{{
Root: order.Root,
Selector: sel,
}},
car.MaxTraversalLinks(config.MaxTraversalLinks),
).Write(f)
if err != nil {
finish(err)
return
}
finish(f.Close())
return
}
// we are extracting a UnixFS file.
ds := merkledag.NewDAGService(blockservice.New(retrievalBs, offline.Exchange(retrievalBs)))
root := order.Root
roots, err := parseDagSpec(ctx, exportRef.Root, exportRef.DAGs, dserv, car)
if err != nil {
return xerrors.Errorf("parsing dag spec: %w", err)
}
if car {
return a.outputCAR(ctx, dserv, retrievalBs, exportRef.Root, roots, dest)
}
// if we used a selector - need to find the sub-root the user actually wanted to retrieve
if order.DatamodelPathSelector != nil {
if len(roots) != 1 {
return xerrors.Errorf("unixfs retrieval requires one root node, got %d", len(roots))
}
var subRootFound bool
return a.outputUnixFS(ctx, roots[0].root, dserv, dest)
}
// no err check - we just compiled this before starting, but now we do not wrap a `*`
selspec, _ := textselector.SelectorSpecFromPath(*order.DatamodelPathSelector, nil) //nolint:errcheck
func (a *API) outputCAR(ctx context.Context, ds format.DAGService, bs bstore.Blockstore, root cid.Cid, dags []dagSpec, dest ExportDest) error {
// generating a CARv1 from the configured blockstore
roots := make([]cid.Cid, len(dags))
for i, dag := range dags {
roots[i] = dag.root
}
return dest.doWrite(func(w io.Writer) error {
if err := car.WriteHeader(&car.CarHeader{
Roots: roots,
Version: 1,
}, w); err != nil {
return fmt.Errorf("failed to write car header: %s", err)
}
cs := cid.NewSet()
for _, dagSpec := range dags {
if err := utils.TraverseDag(
ctx,
ds,
root,
dagSpec.selector,
func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error {
if r == traversal.VisitReason_SelectionMatch {
var c cid.Cid
if p.LastBlock.Link == nil {
c = root
} else {
cidLnk, castOK := p.LastBlock.Link.(cidlink.Link)
if !castOK {
return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link)
}
c = cidLnk.Cid
}
if cs.Visit(c) {
nb, err := bs.Get(c)
if err != nil {
return xerrors.Errorf("getting block data: %w", err)
}
err = util.LdWrite(w, c.Bytes(), nb.RawData())
if err != nil {
return xerrors.Errorf("writing block data: %w", err)
}
}
return nil
}
return nil
},
); err != nil {
return xerrors.Errorf("error while traversing car dag: %w", err)
}
}
return nil
})
}
func (a *API) outputUnixFS(ctx context.Context, root cid.Cid, ds format.DAGService, dest ExportDest) error {
nd, err := ds.Get(ctx, root)
if err != nil {
return xerrors.Errorf("ClientRetrieve: %w", err)
}
file, err := unixfile.NewUnixfsFile(ctx, ds, nd)
if err != nil {
return xerrors.Errorf("ClientRetrieve: %w", err)
}
if dest.Writer == nil {
return files.WriteTo(file, dest.Path)
}
switch f := file.(type) {
case files.File:
_, err = io.Copy(dest.Writer, f)
if err != nil {
return err
}
return nil
default:
return fmt.Errorf("file type %T is not supported", nd)
}
}
type dagSpec struct {
root cid.Cid
selector ipld.Node
}
func parseDagSpec(ctx context.Context, root cid.Cid, dsp []api.DagSpec, ds format.DAGService, car bool) ([]dagSpec, error) {
if len(dsp) == 0 {
return []dagSpec{
{
root: root,
selector: nil,
},
}, nil
}
out := make([]dagSpec, len(dsp))
for i, spec := range dsp {
if spec.DataSelector == nil {
return nil, xerrors.Errorf("invalid DagSpec at position %d: `DataSelector` can not be nil", i)
}
// reify selector
var err error
out[i].selector, err = getDataSelector(spec.DataSelector, car && spec.ExportMerkleProof)
if err != nil {
return nil, err
}
// find the pointed-at root node within the containing ds
var rsn ipld.Node
if strings.HasPrefix(string(*spec.DataSelector), "{") {
var err error
rsn, err = selectorparse.ParseJSONSelector(string(*spec.DataSelector))
if err != nil {
return nil, xerrors.Errorf("failed to parse json-selector '%s': %w", *spec.DataSelector, err)
}
} else {
selspec, _ := textselector.SelectorSpecFromPath(textselector.Expression(*spec.DataSelector), car && spec.ExportMerkleProof, nil) //nolint:errcheck
rsn = selspec.Node()
}
var newRoot cid.Cid
var errHalt = errors.New("halt walk")
if err := utils.TraverseDag(
ctx,
ds,
root,
selspec.Node(),
rsn,
func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error {
if r == traversal.VisitReason_SelectionMatch {
if p.LastBlock.Path.String() != p.Path.String() {
if !car && p.LastBlock.Path.String() != p.Path.String() {
return xerrors.Errorf("unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", p.Path.String())
}
if p.LastBlock.Link == nil {
// this is likely the root node that we've matched here
newRoot = root
return errHalt
}
cidLnk, castOK := p.LastBlock.Link.(cidlink.Link)
if !castOK {
return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link.String())
return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link)
}
root = cidLnk.Cid
subRootFound = true
newRoot = cidLnk.Cid
return errHalt
}
return nil
},
); err != nil {
finish(xerrors.Errorf("error while locating partial retrieval sub-root: %w", err))
return
); err != nil && err != errHalt {
return nil, xerrors.Errorf("error while locating partial retrieval sub-root: %w", err)
}
if !subRootFound {
finish(xerrors.Errorf("path selection '%s' does not match a node within %s", *order.DatamodelPathSelector, root))
return
if newRoot == cid.Undef {
return nil, xerrors.Errorf("path selection does not match a node within %s", root)
}
out[i].root = newRoot
}
nd, err := ds.Get(ctx, root)
if err != nil {
finish(xerrors.Errorf("ClientRetrieve: %w", err))
return
}
file, err := unixfile.NewUnixfsFile(ctx, ds, nd)
if err != nil {
finish(xerrors.Errorf("ClientRetrieve: %w", err))
return
}
finish(files.WriteTo(file, ref.Path))
return out, nil
}
func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) {
@ -1110,8 +1194,13 @@ func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, er
func (a *API) ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) {
updates := make(chan api.RetrievalInfo)
unsub := a.Retrieval.SubscribeToEvents(func(_ rm.ClientEvent, deal rm.ClientDealState) {
updates <- a.newRetrievalInfo(ctx, deal)
unsub := a.Retrieval.SubscribeToEvents(func(evt rm.ClientEvent, deal rm.ClientDealState) {
update := a.newRetrievalInfo(ctx, deal)
update.Event = &evt
select {
case updates <- update:
case <-ctx.Done():
}
})
go func() {

View File

@ -60,7 +60,7 @@ func TestImportLocal(t *testing.T) {
require.NoError(t, err)
require.True(t, local)
order := api.RetrievalOrder{
order := api.ExportRef{
Root: root,
FromLocalCAR: it.CARPath,
}
@ -68,7 +68,7 @@ func TestImportLocal(t *testing.T) {
// retrieve as UnixFS.
out1 := filepath.Join(dir, "retrieval1.data") // as unixfs
out2 := filepath.Join(dir, "retrieval2.data") // as car
err = a.ClientRetrieve(ctx, order, &api.FileRef{
err = a.ClientExport(ctx, order, api.FileRef{
Path: out1,
})
require.NoError(t, err)
@ -77,7 +77,7 @@ func TestImportLocal(t *testing.T) {
require.NoError(t, err)
require.Equal(t, b, outBytes)
err = a.ClientRetrieve(ctx, order, &api.FileRef{
err = a.ClientExport(ctx, order, api.FileRef{
Path: out2,
IsCAR: true,
})

View File

@ -100,7 +100,7 @@ func (a *MsigAPI) MsigAddCancel(ctx context.Context, msig address.Address, src a
return nil, actErr
}
return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc)
return a.MsigCancelTxnHash(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc)
}
func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (*api.MessagePrototype, error) {
@ -127,7 +127,7 @@ func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src
return nil, actErr
}
return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc)
return a.MsigCancelTxnHash(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc)
}
func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) {
@ -138,7 +138,11 @@ func (a *MsigAPI) MsigApproveTxnHash(ctx context.Context, msig address.Address,
return a.msigApproveOrCancelTxnHash(ctx, api.MsigApprove, msig, txID, proposer, to, amt, src, method, params)
}
func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) {
func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) {
return a.msigApproveOrCancelSimple(ctx, api.MsigCancel, msig, txID, src)
}
func (a *MsigAPI) MsigCancelTxnHash(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) {
return a.msigApproveOrCancelTxnHash(ctx, api.MsigCancel, msig, txID, src, to, amt, src, method, params)
}

Some files were not shown because too many files have changed in this diff Show More