diff --git a/.circleci/config.yml b/.circleci/config.yml index 30f2d5c01..2213d08ad 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -805,6 +805,11 @@ workflows: suite: itest-deals_padding target: "./itests/deals_padding_test.go" + - test: + name: test-itest-deals_partial_retrieval_dm-level + suite: itest-deals_partial_retrieval_dm-level + target: "./itests/deals_partial_retrieval_dm-level_test.go" + - test: name: test-itest-deals_partial_retrieval suite: itest-deals_partial_retrieval @@ -940,7 +945,7 @@ workflows: codecov-upload: false suite: conformance-bleeding-edge target: "./conformance" - vectors-branch: master + vectors-branch: specs-actors-v7 - trigger-testplans: filters: branches: @@ -971,19 +976,10 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - - build-appimage: - filters: - branches: - ignore: - - /.*/ - tags: - only: - - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - publish: requires: - build-all - build-macos - - build-appimage filters: branches: ignore: diff --git a/.circleci/template.yml b/.circleci/template.yml index 4b954391b..ef6818c6d 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -785,7 +785,7 @@ workflows: codecov-upload: false suite: conformance-bleeding-edge target: "./conformance" - vectors-branch: master + vectors-branch: specs-actors-v7 - trigger-testplans: filters: branches: @@ -816,19 +816,10 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - - build-appimage: - filters: - branches: - ignore: - - /.*/ - tags: - only: - - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - publish: requires: - build-all - build-macos - - build-appimage filters: branches: ignore: diff --git a/CHANGELOG.md b/CHANGELOG.md index 04471452f..a420421de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,107 @@ # Lotus changelog +# v1.13.2 / 2022-01-09 + +Lotus v1.13.2 is a *highly recommended* feature release with remarkable retrieval improvements, new features like +worker management, schedule enhancements and so on. + +## Highlights +- πŸš€πŸš€πŸš€Improve retrieval deal experience + - Testing result with MinerX.3 shows the retrieval deal success rate has increased dramatically with faster transfer + speed, you can join or follow along furthur performance testings [here](https://github.com/filecoin-project/lotus/discussions/7874). We recommend application developers to integrate with the new + retrieval APIs to provide a better client experience. + - 🌟🌟🌟 Reduce retrieval Time-To-First-Byte over 100x ([#7693](https://github.com/filecoin-project/lotus/pull/7693)) + - This change makes most free, small retrievals sub-second + - 🌟🌟🌟 Partial retrieval ux improvements ([#7610](https://github.com/filecoin-project/lotus/pull/7610)) + - New retrieval commands for clients: + - `lotus client ls`: retrieve and list desired object links + - `lotus client cat`: retrieve and print the data from the network + - 🌟🌟 The monolith `ClientRetrieve` method was broken into: + - `ClientRetrieve` which retrieves data into the local repo (or into an IPFS node if ipfs integration is enabled) + - `ClientRetrieveWait` which will wait for the retrieval to complete + - `ClientExport` which will export data from the local node + - Note: this change only applies to v1 API. v0 API remains unchanged. + - 🌟 Support for full ipld selectors was added (for example making it possible to only retrieve list of directories in a deal, without fetching any file data) + - To learn more, see [here](https://github.com/filecoin-project/lotus/blob/0523c946f984b22b3f5de8cc3003cc791389527e/api/types.go#L230-L264) +- πŸš€πŸš€ Sealing scheduler enhancements ([#7703](https://github.com/filecoin-project/lotus/pull/7703), + [#7269](https://github.com/filecoin-project/lotus/pull/7269)), [#7714](https://github.com/filecoin-project/lotus/pull/7714) + - Workers are now aware of cgroup memory limits + - Multiple tasks which use a GPU can be scheduled on a single worker + - Workers can override default resource table through env vars + - Default value list: https://gist.github.com/magik6k/c0e1c7cd73c1241a9acabc30bf469a43 +- πŸš€πŸš€ Sector storage groups ([#7453](https://github.com/filecoin-project/lotus/pull/7453)) + - Storage groups allow for better control of data flow between workers, for example, it makes it possible to define that data from PC1 on a given worker has to have it's PC2 step executed on the same worker + - To set it up, follow the instructions under the `Sector Storage Group` section [here](https://lotus.filecoin.io/docs/storage-providers/seal-workers/#lotus-worker-co-location) + +## New Features +- Add RLE dump code ([#7691](https://github.com/filecoin-project/lotus/pull/7691)) +- Shed: Add a util to list miner faults ([#7605](https://github.com/filecoin-project/lotus/pull/7605)) +- lotus-shed msg: Decode submessages/msig proposals ([#7639](https://github.com/filecoin-project/lotus/pull/7639)) +- CLI: Add a lotus multisig cancel command ([#7645](https://github.com/filecoin-project/lotus/pull/7645)) +- shed: simple wallet balancer util ([#7414](https://github.com/filecoin-project/lotus/pull/7414)) + - balancing token balance between multiple accounts + +## Improvements +- Add verbose mode to `lotus-miner pieces list-cids` ([#7699](https://github.com/filecoin-project/lotus/pull/7699)) +- retrieval: Only output matching nodes, MatchPath dagspec ([#7706](https://github.com/filecoin-project/lotus/pull/7706)) +- Cleanup partial retrieval codepaths ( zero functional changes ) ([#7688](https://github.com/filecoin-project/lotus/pull/7688)) +- storage: Use 1M buffers for Tar transfers ([#7681](https://github.com/filecoin-project/lotus/pull/7681)) +- Chore/dm level tests plus merkle proof cars ([#7673](https://github.com/filecoin-project/lotus/pull/7673)) +- Shed: Add a util to create miners more easily ([#7595](https://github.com/filecoin-project/lotus/pull/7595)) +- add timeout flag to wait-api command ([#7592](https://github.com/filecoin-project/lotus/pull/7592)) +- add log for restart windows post scheduler ([#7613](https://github.com/filecoin-project/lotus/pull/7613)) +- remove jaeger envvars ([#7631](https://github.com/filecoin-project/lotus/pull/7631)) +- remove api and jaeger env from docker file ([#7624](https://github.com/filecoin-project/lotus/pull/7624)) +- Wdpost worker: Reduce challenge confidence to 1 epoch ([#7572](https://github.com/filecoin-project/lotus/pull/7572)) +- add additional methods to lotus gateway ([#7644](https://github.com/filecoin-project/lotus/pull/7644)) +- Add caches to lotus-stats and splitcode ([#7329](https://github.com/filecoin-project/lotus/pull/7329)) +- remote store: Remove debug printf ([#7664](https://github.com/filecoin-project/lotus/pull/7664)) +- docsgen-cli: Handle commands with no description correctly ([#7659](https://github.com/filecoin-project/lotus/pull/7659)) + +## Bug Fixes +- fix docker logic error ([#7709](https://github.com/filecoin-project/lotus/pull/7709)) +- add missing NodeType tag ([#7559](https://github.com/filecoin-project/lotus/pull/7559)) +- checkCommit should return SectorCommitFailed ([#7555](https://github.com/filecoin-project/lotus/pull/7555)) +- ffiwrapper: Validate PC2 by calling C1 with random seeds ([#7710](https://github.com/filecoin-project/lotus/pull/7710)) + +## Dependency Updates +- Update go-graphsync v0.10.6 ([#7708](https://github.com/filecoin-project/lotus/pull/7708)) +- update go-libp2p-pubsub to v0.5.6 ([#7581](https://github.com/filecoin-project/lotus/pull/7581)) +- Update go-state-types ([#7591](https://github.com/filecoin-project/lotus/pull/7591)) +- disable mplex stream muxer ([#7689](https://github.com/filecoin-project/lotus/pull/7689)) +- Bump ws from 5.2.2 to 5.2.3 in /lotuspond/front ([#7660](https://github.com/filecoin-project/lotus/pull/7660)) +- Bump color-string from 1.5.3 to 1.6.0 in /lotuspond/front ([#7658](https://github.com/filecoin-project/lotus/pull/7658)) +- Bump postcss from 7.0.17 to 7.0.39 in /lotuspond/front ([#7657](https://github.com/filecoin-project/lotus/pull/7657)) +- Bump path-parse from 1.0.6 to 1.0.7 in /lotuspond/front ([#7656](https://github.com/filecoin-project/lotus/pull/7656)) +- Bump tmpl from 1.0.4 to 1.0.5 in /lotuspond/front ([#7655](https://github.com/filecoin-project/lotus/pull/7655)) +- Bump url-parse from 1.4.7 to 1.5.3 in /lotuspond/front ([#7654](https://github.com/filecoin-project/lotus/pull/7654)) +- github.com/filecoin-project/go-state-types (v0.1.1-0.20210915140513-d354ccf10379 -> v0.1.1): + +## Others +- Update archive script ([#7690](https://github.com/filecoin-project/lotus/pull/7690)) + +## Contributors + +| Contributor | Commits | Lines Β± | Files Changed | +|-------------|---------|---------|---------------| +| @magik6k | 89 | +5200/-1818 | 232 | +| Travis Person | 5 | +1473/-953 | 38 | +| @arajasek | 6 | +550/-38 | 19 | +| @clinta | 4 | +393/-123 | 26 | +| @ribasushi | 3 | +334/-68 | 7 | +| @jennijuju| 13 | +197/-120 | 67 | +| @Kubuxu | 10 | +153/-30 | 10 | +| @coryschwartz | 6 | +18/-26 | 6 | +| Marten Seemann | 2 | +6/-34 | 5 | +| @vyzo | 1 | +3/-3 | 2 | +| @hannahhoward | 1 | +3/-3 | 2 | +| @zenground0 | 2 | +2/-2 | 2 | +| @yaohcn | 2 | +2/-2 | 2 | +| @jennijuju | 1 | +1/-1 | 1 | +| @hunjixin | 1 | +1/-0 | 1 | + + + # v1.13.1 / 2021-11-26 This is an optional Lotus v1.13.1 release. diff --git a/Dockerfile.lotus b/Dockerfile.lotus index 72c609305..812ad9f61 100644 --- a/Dockerfile.lotus +++ b/Dockerfile.lotus @@ -36,7 +36,7 @@ WORKDIR /opt/filecoin ARG RUSTFLAGS="" ARG GOFLAGS="" -RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway +RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway lotus-stats FROM ubuntu:20.04 AS base @@ -66,8 +66,6 @@ COPY scripts/docker-lotus-entrypoint.sh / ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters ENV LOTUS_PATH /var/lib/lotus -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car ENV DOCKER_LOTUS_IMPORT_WALLET "" @@ -92,8 +90,6 @@ MAINTAINER Lotus Development Team COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ ENV WALLET_PATH /var/lib/lotus-wallet -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 RUN mkdir /var/lib/lotus-wallet RUN chown fc: /var/lib/lotus-wallet @@ -114,10 +110,6 @@ MAINTAINER Lotus Development Team COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 -ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http - USER fc EXPOSE 1234 @@ -135,11 +127,7 @@ COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ COPY scripts/docker-lotus-miner-entrypoint.sh / ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http ENV LOTUS_MINER_PATH /var/lib/lotus-miner -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 -ENV DOCKER_LOTUS_MINER_INIT true RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters @@ -163,10 +151,7 @@ MAINTAINER Lotus Development Team COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http ENV LOTUS_WORKER_PATH /var/lib/lotus-worker -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 RUN mkdir /var/lib/lotus-worker RUN chown fc: /var/lib/lotus-worker @@ -186,16 +171,11 @@ CMD ["-help"] from base as lotus-all-in-one ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 ENV LOTUS_MINER_PATH /var/lib/lotus-miner ENV LOTUS_PATH /var/lib/lotus ENV LOTUS_WORKER_PATH /var/lib/lotus-worker -ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http ENV WALLET_PATH /var/lib/lotus-wallet ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car -ENV DOCKER_LOTUS_MINER_INIT true COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/ @@ -203,6 +183,7 @@ COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-stats /usr/local/bin/ RUN mkdir /var/tmp/filecoin-proof-parameters RUN mkdir /var/lib/lotus diff --git a/api/api_full.go b/api/api_full.go index 158590b0d..06aaff99c 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -7,7 +7,6 @@ import ( "time" "github.com/ipfs/go-cid" - textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/libp2p/go-libp2p-core/peer" "github.com/filecoin-project/go-address" @@ -28,7 +27,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/types" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" ) @@ -352,10 +350,11 @@ type FullNode interface { // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read // ClientRetrieve initiates the retrieval of a file, as specified in the order. - ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error //perm:admin - // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel - // of status updates. - ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin + ClientRetrieve(ctx context.Context, params RetrievalOrder) (*RestrievalRes, error) //perm:admin + // ClientRetrieveWait waits for retrieval to be complete + ClientRetrieveWait(ctx context.Context, deal retrievalmarket.DealID) error //perm:admin + // ClientExport exports a file stored in the local filestore to a system file + ClientExport(ctx context.Context, exportRef ExportRef, fileRef FileRef) error //perm:admin // ClientListRetrievals returns information about retrievals made by the local client ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write // ClientGetRetrievalUpdates returns status of updated retrieval deals @@ -630,10 +629,14 @@ type FullNode interface { // , , MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign + // MsigCancel cancels a previously-proposed multisig message + // It takes the following params: , + MsigCancel(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign + // MsigCancel cancels a previously-proposed multisig message // It takes the following params: , , , , // , , - MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign + MsigCancelTxnHash(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign // MsigAddPropose proposes adding a signer in the multisig // It takes the following params: , , @@ -930,15 +933,14 @@ type MarketDeal struct { } type RetrievalOrder struct { - // TODO: make this less unixfs specific - Root cid.Cid - Piece *cid.Cid - DatamodelPathSelector *textselector.Expression - Size uint64 + Root cid.Cid + Piece *cid.Cid + DataSelector *Selector + + // todo: Size/Total are only used for calculating price per byte; we should let users just pass that + Size uint64 + Total types.BigInt - FromLocalCAR string // if specified, get data from a local CARv2 file. - // TODO: support offset - Total types.BigInt UnsealPrice types.BigInt PaymentInterval uint64 PaymentIntervalIncrease uint64 diff --git a/api/api_gateway.go b/api/api_gateway.go index 862c6ddb5..fbe2e0cd6 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -31,6 +31,8 @@ import ( type Gateway interface { ChainHasObj(context.Context, cid.Cid) (bool, error) ChainHead(ctx context.Context) (*types.TipSet, error) + ChainGetParentMessages(context.Context, cid.Cid) ([]Message, error) + ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error) ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*HeadChange, error) @@ -39,6 +41,7 @@ type Gateway interface { ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) ChainNotify(context.Context) (<-chan []*HeadChange, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainGetGenesis(context.Context) (*types.TipSet, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 25b9ac8c9..5478e5ea6 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -91,6 +91,8 @@ func init() { storeIDExample := imports.ID(50) textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash") + apiSelExample := api.Selector("Links/21/Hash/Links/42/Hash") + clientEvent := retrievalmarket.ClientEventDealAccepted addExample(bitfield.NewFromSet([]uint64{5})) addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1) @@ -122,9 +124,12 @@ func init() { addExample(datatransfer.Ongoing) addExample(storeIDExample) addExample(&storeIDExample) + addExample(clientEvent) + addExample(&clientEvent) addExample(retrievalmarket.ClientEventDealAccepted) addExample(retrievalmarket.DealStatusNew) addExample(&textSelExample) + addExample(&apiSelExample) addExample(network.ReachabilityPublic) addExample(build.NewestNetworkVersion) addExample(map[string]int{"name": 42}) @@ -226,16 +231,18 @@ func init() { Hostname: "host", Resources: storiface.WorkerResources{ MemPhysical: 256 << 30, + MemUsed: 2 << 30, MemSwap: 120 << 30, - MemReserved: 2 << 30, + MemSwapUsed: 2 << 30, CPUs: 64, GPUs: []string{"aGPU 1337"}, + Resources: storiface.ResourceTable, }, }, Enabled: true, MemUsedMin: 0, MemUsedMax: 0, - GpuUsed: false, + GpuUsed: 0, CpuUse: 0, }, }) @@ -281,6 +288,7 @@ func init() { State: "ShardStateAvailable", Error: "", }) + addExample(storiface.ResourceTable) } func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) { diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index a6781b0b7..3f9d75433 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -25,7 +25,6 @@ import ( miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" types "github.com/filecoin-project/lotus/chain/types" alerting "github.com/filecoin-project/lotus/journal/alerting" - marketevents "github.com/filecoin-project/lotus/markets/loggers" dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" imports "github.com/filecoin-project/lotus/node/repo/imports" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" @@ -537,6 +536,20 @@ func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1) } +// ClientExport mocks base method. +func (m *MockFullNode) ClientExport(arg0 context.Context, arg1 api.ExportRef, arg2 api.FileRef) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientExport", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientExport indicates an expected call of ClientExport. +func (mr *MockFullNodeMockRecorder) ClientExport(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientExport", reflect.TypeOf((*MockFullNode)(nil).ClientExport), arg0, arg1, arg2) +} + // ClientFindData mocks base method. func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) { m.ctrl.T.Helper() @@ -775,17 +788,18 @@ func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, } // ClientRetrieve mocks base method. -func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error { +func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder) (*api.RestrievalRes, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1) + ret0, _ := ret[0].(*api.RestrievalRes) + ret1, _ := ret[1].(error) + return ret0, ret1 } // ClientRetrieve indicates an expected call of ClientRetrieve. -func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1) } // ClientRetrieveTryRestartInsufficientFunds mocks base method. @@ -802,19 +816,18 @@ func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1) } -// ClientRetrieveWithEvents mocks base method. -func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +// ClientRetrieveWait mocks base method. +func (m *MockFullNode) ClientRetrieveWait(arg0 context.Context, arg1 retrievalmarket.DealID) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) - ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "ClientRetrieveWait", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 } -// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents. -func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call { +// ClientRetrieveWait indicates an expected call of ClientRetrieveWait. +func (mr *MockFullNodeMockRecorder) ClientRetrieveWait(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWait", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWait), arg0, arg1) } // ClientStartDeal mocks base method. @@ -1428,18 +1441,33 @@ func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, a } // MsigCancel mocks base method. -func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) { +func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (*api.MessagePrototype, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } // MsigCancel indicates an expected call of MsigCancel. -func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { +func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3) +} + +// MsigCancelTxnHash mocks base method. +func (m *MockFullNode) MsigCancelTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCancelTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCancelTxnHash indicates an expected call of MsigCancelTxnHash. +func (mr *MockFullNodeMockRecorder) MsigCancelTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancelTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigCancelTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // MsigCreate mocks base method. diff --git a/api/proxy_gen.go b/api/proxy_gen.go index b36f19a7e..feb08531f 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -28,7 +28,6 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/journal/alerting" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" "github.com/filecoin-project/specs-storage/storage" @@ -162,6 +161,8 @@ type FullNodeStruct struct { ClientDealSize func(p0 context.Context, p1 cid.Cid) (DataSize, error) `perm:"read"` + ClientExport func(p0 context.Context, p1 ExportRef, p2 FileRef) error `perm:"admin"` + ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) `perm:"read"` ClientGenCar func(p0 context.Context, p1 FileRef, p2 string) error `perm:"write"` @@ -194,11 +195,11 @@ type FullNodeStruct struct { ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error `perm:"admin"` + ClientRetrieve func(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) `perm:"admin"` ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"` - ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` + ClientRetrieveWait func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"admin"` ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"` @@ -270,7 +271,9 @@ type FullNodeStruct struct { MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) `perm:"sign"` - MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"` + MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) `perm:"sign"` + + MsigCancelTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"` MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) `perm:"sign"` @@ -478,8 +481,14 @@ type GatewayStruct struct { Internal struct { ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) `` + ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `` + ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `` + ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]Message, error) `` + + ChainGetParentReceipts func(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) `` + ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) `` ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `` @@ -1349,6 +1358,17 @@ func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, return *new(DataSize), ErrNotSupported } +func (s *FullNodeStruct) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error { + if s.Internal.ClientExport == nil { + return ErrNotSupported + } + return s.Internal.ClientExport(p0, p1, p2) +} + +func (s *FullNodeStub) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error { + return ErrNotSupported +} + func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) { if s.Internal.ClientFindData == nil { return *new([]QueryOffer), ErrNotSupported @@ -1525,15 +1545,15 @@ func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatran return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error { +func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) { if s.Internal.ClientRetrieve == nil { - return ErrNotSupported + return nil, ErrNotSupported } - return s.Internal.ClientRetrieve(p0, p1, p2) + return s.Internal.ClientRetrieve(p0, p1) } -func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error { - return ErrNotSupported +func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) { + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { @@ -1547,15 +1567,15 @@ func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Cont return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) { - if s.Internal.ClientRetrieveWithEvents == nil { - return nil, ErrNotSupported +func (s *FullNodeStruct) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error { + if s.Internal.ClientRetrieveWait == nil { + return ErrNotSupported } - return s.Internal.ClientRetrieveWithEvents(p0, p1, p2) + return s.Internal.ClientRetrieveWait(p0, p1) } -func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) { - return nil, ErrNotSupported +func (s *FullNodeStub) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error { + return ErrNotSupported } func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { @@ -1943,14 +1963,25 @@ func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address return nil, ErrNotSupported } -func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { +func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { if s.Internal.MsigCancel == nil { return nil, ErrNotSupported } - return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7) + return s.Internal.MsigCancel(p0, p1, p2, p3) } -func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { +func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { + return nil, ErrNotSupported +} + +func (s *FullNodeStruct) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { + if s.Internal.MsigCancelTxnHash == nil { + return nil, ErrNotSupported + } + return s.Internal.MsigCancelTxnHash(p0, p1, p2, p3, p4, p5, p6, p7) +} + +func (s *FullNodeStub) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { return nil, ErrNotSupported } @@ -3032,6 +3063,17 @@ func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*Bl return nil, ErrNotSupported } +func (s *GatewayStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + if s.Internal.ChainGetGenesis == nil { + return nil, ErrNotSupported + } + return s.Internal.ChainGetGenesis(p0) +} + +func (s *GatewayStub) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { if s.Internal.ChainGetMessage == nil { return nil, ErrNotSupported @@ -3043,6 +3085,28 @@ func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Me return nil, ErrNotSupported } +func (s *GatewayStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) { + if s.Internal.ChainGetParentMessages == nil { + return *new([]Message), ErrNotSupported + } + return s.Internal.ChainGetParentMessages(p0, p1) +} + +func (s *GatewayStub) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) { + return *new([]Message), ErrNotSupported +} + +func (s *GatewayStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + if s.Internal.ChainGetParentReceipts == nil { + return *new([]*types.MessageReceipt), ErrNotSupported + } + return s.Internal.ChainGetParentReceipts(p0, p1) +} + +func (s *GatewayStub) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + return *new([]*types.MessageReceipt), ErrNotSupported +} + func (s *GatewayStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) { if s.Internal.ChainGetPath == nil { return *new([]*HeadChange), ErrNotSupported diff --git a/api/types.go b/api/types.go index 9d887b0a1..0ecda0405 100644 --- a/api/types.go +++ b/api/types.go @@ -5,11 +5,10 @@ import ( "fmt" "time" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/lotus/chain/types" - datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" @@ -194,4 +193,47 @@ type RetrievalInfo struct { TransferChannelID *datatransfer.ChannelID DataTransfer *DataTransferChannel + + // optional event if part of ClientGetRetrievalUpdates + Event *retrievalmarket.ClientEvent +} + +type RestrievalRes struct { + DealID retrievalmarket.DealID +} + +// Selector specifies ipld selector string +// - if the string starts with '{', it's interpreted as json selector string +// see https://ipld.io/specs/selectors/ and https://ipld.io/specs/selectors/fixtures/selector-fixtures-1/ +// - otherwise the string is interpreted as ipld-selector-text-lite (simple ipld path) +// see https://github.com/ipld/go-ipld-selector-text-lite +type Selector string + +type DagSpec struct { + // DataSelector matches data to be retrieved + // - when using textselector, the path specifies subtree + // - the matched graph must have a single root + DataSelector *Selector + + // ExportMerkleProof is applicable only when exporting to a CAR file via a path textselector + // When true, in addition to the selection target, the resulting CAR will contain every block along the + // path back to, and including the original root + // When false the resulting CAR contains only the blocks of the target subdag + ExportMerkleProof bool +} + +type ExportRef struct { + Root cid.Cid + + // DAGs array specifies a list of DAGs to export + // - If exporting into unixfs files, only one DAG is supported, DataSelector is only used to find the targeted root node + // - If exporting into a car file + // - When exactly one text-path DataSelector is specified exports the subgraph and its full merkle-path from the original root + // - Otherwise ( multiple paths and/or JSON selector specs) determines each individual subroot and exports the subtrees as a multi-root car + // - When not specified defaults to a single DAG: + // - Data - the entire DAG: `{"R":{"l":{"none":{}},":>":{"a":{">":{"@":{}}}}}}` + DAGs []DagSpec + + FromLocalCAR string // if specified, get data from a local CARv2 file. + DealID retrievalmarket.DealID } diff --git a/api/v0api/full.go b/api/v0api/full.go index d7e38ce97..b37e89155 100644 --- a/api/v0api/full.go +++ b/api/v0api/full.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-cid" + textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/libp2p/go-libp2p-core/peer" "github.com/filecoin-project/lotus/api" @@ -325,10 +326,10 @@ type FullNode interface { // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read // ClientRetrieve initiates the retrieval of a file, as specified in the order. - ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error //perm:admin + ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error //perm:admin // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel // of status updates. - ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin + ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin // ClientQueryAsk returns a signed StorageAsk from the specified miner. // ClientListRetrievals returns information about retrievals made by the local client ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write @@ -714,3 +715,37 @@ type FullNode interface { // the path specified when calling CreateBackup is within the base path CreateBackup(ctx context.Context, fpath string) error //perm:admin } + +func OfferOrder(o api.QueryOffer, client address.Address) RetrievalOrder { + return RetrievalOrder{ + Root: o.Root, + Piece: o.Piece, + Size: o.Size, + Total: o.MinPrice, + UnsealPrice: o.UnsealPrice, + PaymentInterval: o.PaymentInterval, + PaymentIntervalIncrease: o.PaymentIntervalIncrease, + Client: client, + + Miner: o.Miner, + MinerPeer: &o.MinerPeer, + } +} + +type RetrievalOrder struct { + // TODO: make this less unixfs specific + Root cid.Cid + Piece *cid.Cid + DatamodelPathSelector *textselector.Expression + Size uint64 + + FromLocalCAR string // if specified, get data from a local CARv2 file. + // TODO: support offset + Total types.BigInt + UnsealPrice types.BigInt + PaymentInterval uint64 + PaymentIntervalIncrease uint64 + Client address.Address + Miner address.Address + MinerPeer *retrievalmarket.RetrievalPeer +} diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index dd6330a02..af0687fe5 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -125,11 +125,11 @@ type FullNodeStruct struct { ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - ClientRetrieve func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error `perm:"admin"` + ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error `perm:"admin"` ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"` - ClientRetrieveWithEvents func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` + ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` @@ -965,14 +965,14 @@ func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatran return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error { +func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error { if s.Internal.ClientRetrieve == nil { return ErrNotSupported } return s.Internal.ClientRetrieve(p0, p1, p2) } -func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error { +func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error { return ErrNotSupported } @@ -987,14 +987,14 @@ func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Cont return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { if s.Internal.ClientRetrieveWithEvents == nil { return nil, ErrNotSupported } return s.Internal.ClientRetrieveWithEvents(p0, p1, p2) } -func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { return nil, ErrNotSupported } diff --git a/api/v0api/v0mocks/mock_full.go b/api/v0api/v0mocks/mock_full.go index 0344eebf3..3e9caaee8 100644 --- a/api/v0api/v0mocks/mock_full.go +++ b/api/v0api/v0mocks/mock_full.go @@ -21,6 +21,7 @@ import ( network "github.com/filecoin-project/go-state-types/network" api "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" + v0api "github.com/filecoin-project/lotus/api/v0api" miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" types "github.com/filecoin-project/lotus/chain/types" alerting "github.com/filecoin-project/lotus/journal/alerting" @@ -760,7 +761,7 @@ func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, } // ClientRetrieve mocks base method. -func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error { +func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) ret0, _ := ret[0].(error) @@ -788,7 +789,7 @@ func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(ar } // ClientRetrieveWithEvents mocks base method. -func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent) diff --git a/api/v0api/v1_wrapper.go b/api/v0api/v1_wrapper.go index 7f7291600..7e0d7a94a 100644 --- a/api/v0api/v1_wrapper.go +++ b/api/v0api/v1_wrapper.go @@ -3,7 +3,10 @@ package v0api import ( "context" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" + marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/types" @@ -108,7 +111,7 @@ func (w *WrapperV1Full) MsigApproveTxnHash(ctx context.Context, msig address.Add } func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { - p, err := w.FullNode.MsigCancel(ctx, msig, txID, to, amt, src, method, params) + p, err := w.FullNode.MsigCancelTxnHash(ctx, msig, txID, to, amt, src, method, params) if err != nil { return cid.Undef, xerrors.Errorf("creating prototype: %w", err) } @@ -194,4 +197,144 @@ func (w *WrapperV1Full) ChainGetRandomnessFromBeacon(ctx context.Context, tsk ty return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk) } +func (w *WrapperV1Full) ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error { + events := make(chan marketevents.RetrievalEvent) + go w.clientRetrieve(ctx, order, ref, events) + + for { + select { + case evt, ok := <-events: + if !ok { // done successfully + return nil + } + + if evt.Err != "" { + return xerrors.Errorf("retrieval failed: %s", evt.Err) + } + case <-ctx.Done(): + return xerrors.Errorf("retrieval timed out") + } + } +} + +func (w *WrapperV1Full) ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { + events := make(chan marketevents.RetrievalEvent) + go w.clientRetrieve(ctx, order, ref, events) + return events, nil +} + +func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, subscribeEvents <-chan api.RetrievalInfo, events chan marketevents.RetrievalEvent) error { + for { + var subscribeEvent api.RetrievalInfo + var evt retrievalmarket.ClientEvent + select { + case <-ctx.Done(): + return xerrors.New("Retrieval Timed Out") + case subscribeEvent = <-subscribeEvents: + if subscribeEvent.ID != dealID { + // we can't check the deal ID ahead of time because: + // 1. We need to subscribe before retrieving. + // 2. We won't know the deal ID until after retrieving. + continue + } + if subscribeEvent.Event != nil { + evt = *subscribeEvent.Event + } + } + + select { + case <-ctx.Done(): + return xerrors.New("Retrieval Timed Out") + case events <- marketevents.RetrievalEvent{ + Event: evt, + Status: subscribeEvent.Status, + BytesReceived: subscribeEvent.BytesReceived, + FundsSpent: subscribeEvent.TotalPaid, + }: + } + + switch subscribeEvent.Status { + case retrievalmarket.DealStatusCompleted: + return nil + case retrievalmarket.DealStatusRejected: + return xerrors.Errorf("Retrieval Proposal Rejected: %s", subscribeEvent.Message) + case + retrievalmarket.DealStatusDealNotFound, + retrievalmarket.DealStatusErrored: + return xerrors.Errorf("Retrieval Error: %s", subscribeEvent.Message) + } + } +} + +func (w *WrapperV1Full) clientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) { + defer close(events) + + finish := func(e error) { + if e != nil { + events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()} + } + } + + var dealID retrievalmarket.DealID + if order.FromLocalCAR == "" { + // Subscribe to events before retrieving to avoid losing events. + subscribeCtx, cancel := context.WithCancel(ctx) + defer cancel() + retrievalEvents, err := w.ClientGetRetrievalUpdates(subscribeCtx) + + if err != nil { + finish(xerrors.Errorf("GetRetrievalUpdates failed: %w", err)) + return + } + + retrievalRes, err := w.FullNode.ClientRetrieve(ctx, api.RetrievalOrder{ + Root: order.Root, + Piece: order.Piece, + Size: order.Size, + Total: order.Total, + UnsealPrice: order.UnsealPrice, + PaymentInterval: order.PaymentInterval, + PaymentIntervalIncrease: order.PaymentIntervalIncrease, + Client: order.Client, + Miner: order.Miner, + MinerPeer: order.MinerPeer, + }) + + if err != nil { + finish(xerrors.Errorf("Retrieve failed: %w", err)) + return + } + + dealID = retrievalRes.DealID + + err = readSubscribeEvents(ctx, retrievalRes.DealID, retrievalEvents, events) + if err != nil { + finish(xerrors.Errorf("Retrieve: %w", err)) + return + } + } + + // If ref is nil, it only fetches the data into the configured blockstore. + if ref == nil { + finish(nil) + return + } + + eref := api.ExportRef{ + Root: order.Root, + FromLocalCAR: order.FromLocalCAR, + DealID: dealID, + } + + if order.DatamodelPathSelector != nil { + s := api.Selector(*order.DatamodelPathSelector) + eref.DAGs = append(eref.DAGs, api.DagSpec{ + DataSelector: &s, + ExportMerkleProof: true, + }) + } + + finish(w.ClientExport(ctx, eref, *ref)) +} + var _ FullNode = &WrapperV1Full{} diff --git a/api/version.go b/api/version.go index 2c87fe0a4..93148f28d 100644 --- a/api/version.go +++ b/api/version.go @@ -58,7 +58,7 @@ var ( FullAPIVersion1 = newVer(2, 1, 0) MinerAPIVersion0 = newVer(1, 2, 0) - WorkerAPIVersion0 = newVer(1, 1, 0) + WorkerAPIVersion0 = newVer(1, 5, 0) ) //nolint:varcheck,deadcode diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index e70c35073..9c8692acc 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index f8ec0f7fb..c2707bfa0 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 3ba1f8b7b..605a44f09 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/version.go b/build/version.go index b4681afd3..a9adf49e6 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.13.1" +const BuildVersion = "1.13.2" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/builtin/miner/utils.go b/chain/actors/builtin/miner/utils.go index 2f24e8454..5fafc31ef 100644 --- a/chain/actors/builtin/miner/utils.go +++ b/chain/actors/builtin/miner/utils.go @@ -67,3 +67,22 @@ func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi. return 0, xerrors.Errorf("unsupported network version") } + +// WindowPoStProofTypeFromSectorSize returns preferred post proof type for creating +// new miner actors and new sectors +func WindowPoStProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredPoStProof, error) { + switch ssize { + case 2 << 10: + return abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, nil + case 8 << 20: + return abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, nil + case 512 << 20: + return abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, nil + case 32 << 30: + return abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, nil + case 64 << 30: + return abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, nil + default: + return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) + } +} diff --git a/cli/client.go b/cli/client.go index daaf5f3fe..f5b38788b 100644 --- a/cli/client.go +++ b/cli/client.go @@ -26,7 +26,6 @@ import ( datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/ipfs/go-cid" "github.com/ipfs/go-cidutil/cidenc" - textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/libp2p/go-libp2p-core/peer" "github.com/multiformats/go-multibase" "github.com/urfave/cli/v2" @@ -94,6 +93,8 @@ var clientCmd = &cli.Command{ WithCategory("data", clientStat), WithCategory("retrieval", clientFindCmd), WithCategory("retrieval", clientRetrieveCmd), + WithCategory("retrieval", clientRetrieveCatCmd), + WithCategory("retrieval", clientRetrieveLsCmd), WithCategory("retrieval", clientCancelRetrievalDealCmd), WithCategory("retrieval", clientListRetrievalsCmd), WithCategory("util", clientCommPCmd), @@ -1029,209 +1030,6 @@ var clientFindCmd = &cli.Command{ }, } -const DefaultMaxRetrievePrice = "0.01" - -var clientRetrieveCmd = &cli.Command{ - Name: "retrieve", - Usage: "Retrieve data from network", - ArgsUsage: "[dataCid outputPath]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "from", - Usage: "address to send transactions from", - }, - &cli.BoolFlag{ - Name: "car", - Usage: "export to a car file instead of a regular file", - }, - &cli.StringFlag{ - Name: "miner", - Usage: "miner address for retrieval, if not present it'll use local discovery", - }, - &cli.StringFlag{ - Name: "datamodel-path-selector", - Usage: "a rudimentary (DM-level-only) text-path selector, allowing for sub-selection within a deal", - }, - &cli.StringFlag{ - Name: "maxPrice", - Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice), - }, - &cli.StringFlag{ - Name: "pieceCid", - Usage: "require data to be retrieved from a specific Piece CID", - }, - &cli.BoolFlag{ - Name: "allow-local", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 2 { - return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) - } - - fapi, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - afmt := NewAppFmt(cctx.App) - - var payer address.Address - if cctx.String("from") != "" { - payer, err = address.NewFromString(cctx.String("from")) - } else { - payer, err = fapi.WalletDefaultAddress(ctx) - } - if err != nil { - return err - } - - file, err := cid.Parse(cctx.Args().Get(0)) - if err != nil { - return err - } - - var pieceCid *cid.Cid - if cctx.String("pieceCid") != "" { - parsed, err := cid.Parse(cctx.String("pieceCid")) - if err != nil { - return err - } - pieceCid = &parsed - } - - var order *lapi.RetrievalOrder - if cctx.Bool("allow-local") { - imports, err := fapi.ClientListImports(ctx) - if err != nil { - return err - } - - for _, i := range imports { - if i.Root != nil && i.Root.Equals(file) { - order = &lapi.RetrievalOrder{ - Root: file, - FromLocalCAR: i.CARPath, - - Total: big.Zero(), - UnsealPrice: big.Zero(), - } - break - } - } - } - - if order == nil { - var offer api.QueryOffer - minerStrAddr := cctx.String("miner") - if minerStrAddr == "" { // Local discovery - offers, err := fapi.ClientFindData(ctx, file, pieceCid) - - var cleaned []api.QueryOffer - // filter out offers that errored - for _, o := range offers { - if o.Err == "" { - cleaned = append(cleaned, o) - } - } - - offers = cleaned - - // sort by price low to high - sort.Slice(offers, func(i, j int) bool { - return offers[i].MinPrice.LessThan(offers[j].MinPrice) - }) - if err != nil { - return err - } - - // TODO: parse offer strings from `client find`, make this smarter - if len(offers) < 1 { - fmt.Println("Failed to find file") - return nil - } - offer = offers[0] - } else { // Directed retrieval - minerAddr, err := address.NewFromString(minerStrAddr) - if err != nil { - return err - } - offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid) - if err != nil { - return err - } - } - if offer.Err != "" { - return fmt.Errorf("The received offer errored: %s", offer.Err) - } - - maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice) - - if cctx.String("maxPrice") != "" { - maxPrice, err = types.ParseFIL(cctx.String("maxPrice")) - if err != nil { - return xerrors.Errorf("parsing maxPrice: %w", err) - } - } - - if offer.MinPrice.GreaterThan(big.Int(maxPrice)) { - return xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice) - } - - o := offer.Order(payer) - order = &o - } - ref := &lapi.FileRef{ - Path: cctx.Args().Get(1), - IsCAR: cctx.Bool("car"), - } - - if sel := textselector.Expression(cctx.String("datamodel-path-selector")); sel != "" { - order.DatamodelPathSelector = &sel - } - - updates, err := fapi.ClientRetrieveWithEvents(ctx, *order, ref) - if err != nil { - return xerrors.Errorf("error setting up retrieval: %w", err) - } - - var prevStatus retrievalmarket.DealStatus - - for { - select { - case evt, ok := <-updates: - if ok { - afmt.Printf("> Recv: %s, Paid %s, %s (%s)\n", - types.SizeStr(types.NewInt(evt.BytesReceived)), - types.FIL(evt.FundsSpent), - retrievalmarket.ClientEvents[evt.Event], - retrievalmarket.DealStatuses[evt.Status], - ) - prevStatus = evt.Status - } - - if evt.Err != "" { - return xerrors.Errorf("retrieval failed: %s", evt.Err) - } - - if !ok { - if prevStatus == retrievalmarket.DealStatusCompleted { - afmt.Println("Success") - } else { - afmt.Printf("saw final deal state %s instead of expected success state DealStatusCompleted\n", - retrievalmarket.DealStatuses[prevStatus]) - } - return nil - } - - case <-ctx.Done(): - return xerrors.Errorf("retrieval timed out") - } - } - }, -} - var clientListRetrievalsCmd = &cli.Command{ Name: "list-retrievals", Usage: "List retrieval market deals", diff --git a/cli/client_retr.go b/cli/client_retr.go new file mode 100644 index 000000000..9b195a5d8 --- /dev/null +++ b/cli/client_retr.go @@ -0,0 +1,602 @@ +package cli + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + offline "github.com/ipfs/go-ipfs-exchange-offline" + "github.com/ipfs/go-merkledag" + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/blockstore" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagjson" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/traversal" + "github.com/ipld/go-ipld-prime/traversal/selector" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + textselector "github.com/ipld/go-ipld-selector-text-lite" + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/big" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/markets/utils" + "github.com/filecoin-project/lotus/node/repo" +) + +const DefaultMaxRetrievePrice = "0" + +func retrieve(ctx context.Context, cctx *cli.Context, fapi lapi.FullNode, sel *lapi.Selector, printf func(string, ...interface{})) (*lapi.ExportRef, error) { + var payer address.Address + var err error + if cctx.String("from") != "" { + payer, err = address.NewFromString(cctx.String("from")) + } else { + payer, err = fapi.WalletDefaultAddress(ctx) + } + if err != nil { + return nil, err + } + + file, err := cid.Parse(cctx.Args().Get(0)) + if err != nil { + return nil, err + } + + var pieceCid *cid.Cid + if cctx.String("pieceCid") != "" { + parsed, err := cid.Parse(cctx.String("pieceCid")) + if err != nil { + return nil, err + } + pieceCid = &parsed + } + + var eref *lapi.ExportRef + if cctx.Bool("allow-local") { + imports, err := fapi.ClientListImports(ctx) + if err != nil { + return nil, err + } + + for _, i := range imports { + if i.Root != nil && i.Root.Equals(file) { + eref = &lapi.ExportRef{ + Root: file, + FromLocalCAR: i.CARPath, + } + break + } + } + } + + // no local found, so make a retrieval + if eref == nil { + var offer lapi.QueryOffer + minerStrAddr := cctx.String("provider") + if minerStrAddr == "" { // Local discovery + offers, err := fapi.ClientFindData(ctx, file, pieceCid) + + var cleaned []lapi.QueryOffer + // filter out offers that errored + for _, o := range offers { + if o.Err == "" { + cleaned = append(cleaned, o) + } + } + + offers = cleaned + + // sort by price low to high + sort.Slice(offers, func(i, j int) bool { + return offers[i].MinPrice.LessThan(offers[j].MinPrice) + }) + if err != nil { + return nil, err + } + + // TODO: parse offer strings from `client find`, make this smarter + if len(offers) < 1 { + fmt.Println("Failed to find file") + return nil, nil + } + offer = offers[0] + } else { // Directed retrieval + minerAddr, err := address.NewFromString(minerStrAddr) + if err != nil { + return nil, err + } + offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid) + if err != nil { + return nil, err + } + } + if offer.Err != "" { + return nil, fmt.Errorf("offer error: %s", offer.Err) + } + + maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice) + + if cctx.String("maxPrice") != "" { + maxPrice, err = types.ParseFIL(cctx.String("maxPrice")) + if err != nil { + return nil, xerrors.Errorf("parsing maxPrice: %w", err) + } + } + + if offer.MinPrice.GreaterThan(big.Int(maxPrice)) { + return nil, xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice) + } + + o := offer.Order(payer) + o.DataSelector = sel + + subscribeEvents, err := fapi.ClientGetRetrievalUpdates(ctx) + if err != nil { + return nil, xerrors.Errorf("error setting up retrieval updates: %w", err) + } + retrievalRes, err := fapi.ClientRetrieve(ctx, o) + if err != nil { + return nil, xerrors.Errorf("error setting up retrieval: %w", err) + } + + start := time.Now() + readEvents: + for { + var evt lapi.RetrievalInfo + select { + case <-ctx.Done(): + return nil, xerrors.New("Retrieval Timed Out") + case evt = <-subscribeEvents: + if evt.ID != retrievalRes.DealID { + // we can't check the deal ID ahead of time because: + // 1. We need to subscribe before retrieving. + // 2. We won't know the deal ID until after retrieving. + continue + } + } + + event := "New" + if evt.Event != nil { + event = retrievalmarket.ClientEvents[*evt.Event] + } + + printf("Recv %s, Paid %s, %s (%s), %s\n", + types.SizeStr(types.NewInt(evt.BytesReceived)), + types.FIL(evt.TotalPaid), + strings.TrimPrefix(event, "ClientEvent"), + strings.TrimPrefix(retrievalmarket.DealStatuses[evt.Status], "DealStatus"), + time.Now().Sub(start).Truncate(time.Millisecond), + ) + + switch evt.Status { + case retrievalmarket.DealStatusCompleted: + break readEvents + case retrievalmarket.DealStatusRejected: + return nil, xerrors.Errorf("Retrieval Proposal Rejected: %s", evt.Message) + case + retrievalmarket.DealStatusDealNotFound, + retrievalmarket.DealStatusErrored: + return nil, xerrors.Errorf("Retrieval Error: %s", evt.Message) + } + } + + eref = &lapi.ExportRef{ + Root: file, + DealID: retrievalRes.DealID, + } + } + + return eref, nil +} + +var retrFlagsCommon = []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "address to send transactions from", + }, + &cli.StringFlag{ + Name: "provider", + Usage: "provider to use for retrieval, if not present it'll use local discovery", + Aliases: []string{"miner"}, + }, + &cli.StringFlag{ + Name: "maxPrice", + Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice), + }, + &cli.StringFlag{ + Name: "pieceCid", + Usage: "require data to be retrieved from a specific Piece CID", + }, + &cli.BoolFlag{ + Name: "allow-local", + // todo: default to true? + }, +} + +var clientRetrieveCmd = &cli.Command{ + Name: "retrieve", + Usage: "Retrieve data from network", + ArgsUsage: "[dataCid outputPath]", + Description: `Retrieve data from the Filecoin network. + +The retrieve command will attempt to find a provider make a retrieval deal with +them. In case a provider can't be found, it can be specified with the --provider +flag. + +By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively +a CAR file containing the raw IPLD graph can be exported by setting the --car +flag. + +Partial Retrieval: + +The --data-selector flag can be used to specify a sub-graph to fetch. The +selector can be specified as either IPLD datamodel text-path selector, or IPLD +json selector. + +In case of unixfs retrieval, the selector must point at a single root node, and +match the entire graph under that node. + +In case of CAR retrieval, the selector must have one common "sub-root" node. + +Examples: + +- Retrieve a file by CID + $ lotus client retrieve Qm... my-file.txt + +- Retrieve a file by CID from f0123 + $ lotus client retrieve --provider f0123 Qm... my-file.txt + +- Retrieve a first file from a specified directory + $ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt +`, + Flags: append([]cli.Flag{ + &cli.BoolFlag{ + Name: "car", + Usage: "Export to a car file instead of a regular file", + }, + &cli.StringFlag{ + Name: "data-selector", + Aliases: []string{"datamodel-path-selector"}, + Usage: "IPLD datamodel text-path selector, or IPLD json selector", + }, + &cli.BoolFlag{ + Name: "car-export-merkle-proof", + Usage: "(requires --data-selector and --car) Export data-selector merkle proof", + }, + }, retrFlagsCommon...), + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 2 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + if cctx.Bool("car-export-merkle-proof") { + if !cctx.Bool("car") || !cctx.IsSet("data-selector") { + return ShowHelp(cctx, fmt.Errorf("--car-export-merkle-proof requires --car and --data-selector")) + } + } + + fapi, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + afmt := NewAppFmt(cctx.App) + + var s *lapi.Selector + if sel := lapi.Selector(cctx.String("data-selector")); sel != "" { + s = &sel + } + + eref, err := retrieve(ctx, cctx, fapi, s, afmt.Printf) + if err != nil { + return err + } + + if s != nil { + eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: s, ExportMerkleProof: cctx.Bool("car-export-merkle-proof")}) + } + + err = fapi.ClientExport(ctx, *eref, lapi.FileRef{ + Path: cctx.Args().Get(1), + IsCAR: cctx.Bool("car"), + }) + if err != nil { + return err + } + afmt.Println("Success") + return nil + }, +} + +func ClientExportStream(apiAddr string, apiAuth http.Header, eref lapi.ExportRef, car bool) (io.ReadCloser, error) { + rj, err := json.Marshal(eref) + if err != nil { + return nil, xerrors.Errorf("marshaling export ref: %w", err) + } + + ma, err := multiaddr.NewMultiaddr(apiAddr) + if err == nil { + _, addr, err := manet.DialArgs(ma) + if err != nil { + return nil, err + } + + // todo: make cliutil helpers for this + apiAddr = "http://" + addr + } + + aa, err := url.Parse(apiAddr) + if err != nil { + return nil, xerrors.Errorf("parsing api address: %w", err) + } + switch aa.Scheme { + case "ws": + aa.Scheme = "http" + case "wss": + aa.Scheme = "https" + } + + aa.Path = path.Join(aa.Path, "rest/v0/export") + req, err := http.NewRequest("GET", fmt.Sprintf("%s?car=%t&export=%s", aa, car, url.QueryEscape(string(rj))), nil) + if err != nil { + return nil, err + } + + req.Header = apiAuth + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + em, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, xerrors.Errorf("reading error body: %w", err) + } + + resp.Body.Close() // nolint + return nil, xerrors.Errorf("getting root car: http %d: %s", resp.StatusCode, string(em)) + } + + return resp.Body, nil +} + +var clientRetrieveCatCmd = &cli.Command{ + Name: "cat", + Usage: "Show data from network", + ArgsUsage: "[dataCid]", + Flags: append([]cli.Flag{ + &cli.BoolFlag{ + Name: "ipld", + Usage: "list IPLD datamodel links", + }, + &cli.StringFlag{ + Name: "data-selector", + Usage: "IPLD datamodel text-path selector, or IPLD json selector", + }, + }, retrFlagsCommon...), + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 1 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + ainfo, err := GetAPIInfo(cctx, repo.FullNode) + if err != nil { + return xerrors.Errorf("could not get API info: %w", err) + } + + fapi, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + afmt := NewAppFmt(cctx.App) + + sel := lapi.Selector(cctx.String("data-selector")) + selp := &sel + if sel == "" { + selp = nil + } + + eref, err := retrieve(ctx, cctx, fapi, selp, afmt.Printf) + if err != nil { + return err + } + + fmt.Println() // separate retrieval events from results + + if sel != "" { + eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: &sel}) + } + + rc, err := ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, false) + if err != nil { + return err + } + defer rc.Close() // nolint + + _, err = io.Copy(os.Stdout, rc) + return err + }, +} + +func pathToSel(psel string, matchTraversal bool, sub builder.SelectorSpec) (lapi.Selector, error) { + rs, err := textselector.SelectorSpecFromPath(textselector.Expression(psel), matchTraversal, sub) + if err != nil { + return "", xerrors.Errorf("failed to parse path-selector: %w", err) + } + + var b bytes.Buffer + if err := dagjson.Encode(rs.Node(), &b); err != nil { + return "", err + } + + return lapi.Selector(b.String()), nil +} + +var clientRetrieveLsCmd = &cli.Command{ + Name: "ls", + Usage: "List object links", + ArgsUsage: "[dataCid]", + Flags: append([]cli.Flag{ + &cli.BoolFlag{ + Name: "ipld", + Usage: "list IPLD datamodel links", + }, + &cli.IntFlag{ + Name: "depth", + Usage: "list links recursively up to the specified depth", + Value: 1, + }, + &cli.StringFlag{ + Name: "data-selector", + Usage: "IPLD datamodel text-path selector, or IPLD json selector", + }, + }, retrFlagsCommon...), + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 1 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + ainfo, err := GetAPIInfo(cctx, repo.FullNode) + if err != nil { + return xerrors.Errorf("could not get API info: %w", err) + } + + fapi, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + afmt := NewAppFmt(cctx.App) + + dataSelector := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth"))) + + if cctx.IsSet("data-selector") { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + dataSelector, err = pathToSel(cctx.String("data-selector"), cctx.Bool("ipld"), + ssb.ExploreUnion( + ssb.Matcher(), + ssb.ExploreAll( + ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))), + ))) + if err != nil { + return xerrors.Errorf("parsing datamodel path: %w", err) + } + } + + eref, err := retrieve(ctx, cctx, fapi, &dataSelector, afmt.Printf) + if err != nil { + return xerrors.Errorf("retrieve: %w", err) + } + + fmt.Println() // separate retrieval events from results + + eref.DAGs = append(eref.DAGs, lapi.DagSpec{ + DataSelector: &dataSelector, + }) + + rc, err := ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, true) + if err != nil { + return xerrors.Errorf("export: %w", err) + } + defer rc.Close() // nolint + + var memcar bytes.Buffer + _, err = io.Copy(&memcar, rc) + if err != nil { + return err + } + + cbs, err := blockstore.NewReadOnly(&bytesReaderAt{bytes.NewReader(memcar.Bytes())}, nil, + carv2.ZeroLengthSectionAsEOF(true), + blockstore.UseWholeCIDs(true)) + if err != nil { + return xerrors.Errorf("opening car blockstore: %w", err) + } + + roots, err := cbs.Roots() + if err != nil { + return xerrors.Errorf("getting roots: %w", err) + } + + if len(roots) != 1 { + return xerrors.Errorf("expected 1 car root, got %d", len(roots)) + } + dserv := merkledag.NewDAGService(blockservice.New(cbs, offline.Exchange(cbs))) + + if !cctx.Bool("ipld") { + links, err := dserv.GetLinks(ctx, roots[0]) + if err != nil { + return xerrors.Errorf("getting links: %w", err) + } + + for _, link := range links { + fmt.Printf("%s %s\t%d\n", link.Cid, link.Name, link.Size) + } + } else { + jsel := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth"))) + + if cctx.IsSet("data-selector") { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + jsel, err = pathToSel(cctx.String("data-selector"), false, + ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))), + ) + } + + sel, _ := selectorparse.ParseJSONSelector(string(jsel)) + + if err := utils.TraverseDag( + ctx, + dserv, + roots[0], + sel, + func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { + if r == traversal.VisitReason_SelectionMatch { + fmt.Println(p.Path) + } + return nil + }, + ); err != nil { + return err + } + } + + return err + }, +} + +type bytesReaderAt struct { + btr *bytes.Reader +} + +func (b bytesReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + return b.btr.ReadAt(p, off) +} + +var _ io.ReaderAt = &bytesReaderAt{} diff --git a/cli/multisig.go b/cli/multisig.go index 7b93e55f9..0179378a7 100644 --- a/cli/multisig.go +++ b/cli/multisig.go @@ -51,6 +51,7 @@ var multisigCmd = &cli.Command{ msigProposeCmd, msigRemoveProposeCmd, msigApproveCmd, + msigCancelCmd, msigAddProposeCmd, msigAddApproveCmd, msigAddCancelCmd, @@ -159,6 +160,8 @@ var msigCreateCmd = &cli.Command{ msgCid := sm.Cid() + fmt.Println("sent create in message: ", msgCid) + // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { @@ -448,7 +451,7 @@ var msigProposeCmd = &cli.Command{ msgCid := sm.Cid() - fmt.Println("send proposal in message: ", msgCid) + fmt.Println("sent proposal in message: ", msgCid) wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { @@ -612,6 +615,131 @@ var msigApproveCmd = &cli.Command{ }, } +var msigCancelCmd = &cli.Command{ + Name: "cancel", + Usage: "Cancel a multisig message", + ArgsUsage: " [destination value [methodId methodParams]]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "account to send the cancel message from", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() < 2 { + return ShowHelp(cctx, fmt.Errorf("must pass at least multisig address and message ID")) + } + + if cctx.Args().Len() > 2 && cctx.Args().Len() < 4 { + return ShowHelp(cctx, fmt.Errorf("usage: msig cancel ")) + } + + if cctx.Args().Len() > 4 && cctx.Args().Len() != 6 { + return ShowHelp(cctx, fmt.Errorf("usage: msig cancel [ ]")) + } + + srv, err := GetFullNodeServices(cctx) + if err != nil { + return err + } + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() + ctx := ReqContext(cctx) + + msig, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return err + } + + var from address.Address + if cctx.IsSet("from") { + f, err := address.NewFromString(cctx.String("from")) + if err != nil { + return err + } + from = f + } else { + defaddr, err := api.WalletDefaultAddress(ctx) + if err != nil { + return err + } + from = defaddr + } + + var msgCid cid.Cid + if cctx.Args().Len() == 2 { + proto, err := api.MsigCancel(ctx, msig, txid, from) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid = sm.Cid() + } else { + dest, err := address.NewFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + value, err := types.ParseFIL(cctx.Args().Get(3)) + if err != nil { + return err + } + + var method uint64 + var params []byte + if cctx.Args().Len() == 6 { + m, err := strconv.ParseUint(cctx.Args().Get(4), 10, 64) + if err != nil { + return err + } + method = m + + p, err := hex.DecodeString(cctx.Args().Get(5)) + if err != nil { + return err + } + params = p + } + + proto, err := api.MsigCancelTxnHash(ctx, msig, txid, dest, types.BigInt(value), from, method, params) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid = sm.Cid() + } + + fmt.Println("sent cancel in message: ", msgCid) + + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("cancel returned exit %d", wait.Receipt.ExitCode) + } + + return nil + }, +} + var msigRemoveProposeCmd = &cli.Command{ Name: "propose-remove", Usage: "Propose to remove a signer", @@ -1490,7 +1618,7 @@ var msigLockCancelCmd = &cli.Command{ return actErr } - proto, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + proto, err := api.MsigCancelTxnHash(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) if err != nil { return err } diff --git a/cli/wait.go b/cli/wait.go index 5fc5fa469..a3c0e511a 100644 --- a/cli/wait.go +++ b/cli/wait.go @@ -1,6 +1,7 @@ package cli import ( + "context" "fmt" "time" @@ -10,8 +11,22 @@ import ( var WaitApiCmd = &cli.Command{ Name: "wait-api", Usage: "Wait for lotus api to come online", + Flags: []cli.Flag{ + &cli.DurationFlag{ + Name: "timeout", + Usage: "duration to wait till fail", + Value: time.Second * 30, + }, + }, Action: func(cctx *cli.Context) error { - for i := 0; i < 30; i++ { + ctx := ReqContext(cctx) + ctx, cancel := context.WithTimeout(ctx, cctx.Duration("timeout")) + defer cancel() + for { + if ctx.Err() != nil { + break + } + api, closer, err := GetAPI(cctx) if err != nil { fmt.Printf("Not online yet... (%s)\n", err) @@ -20,8 +35,6 @@ var WaitApiCmd = &cli.Command{ } defer closer() - ctx := ReqContext(cctx) - _, err = api.Version(ctx) if err != nil { return err @@ -29,6 +42,11 @@ var WaitApiCmd = &cli.Command{ return nil } - return fmt.Errorf("timed out waiting for api to come online") + + if ctx.Err() == context.DeadlineExceeded { + return fmt.Errorf("timed out waiting for api to come online") + } + + return ctx.Err() }, } diff --git a/cmd/lotus-miner/init.go b/cmd/lotus-miner/init.go index a5e9710a9..b2199dd94 100644 --- a/cmd/lotus-miner/init.go +++ b/cmd/lotus-miner/init.go @@ -13,6 +13,8 @@ import ( "path/filepath" "strconv" + power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power" + "github.com/docker/go-units" "github.com/google/uuid" "github.com/ipfs/go-datastore" @@ -644,11 +646,26 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID, return address.Address{}, err } + sender := owner + if fromstr := cctx.String("from"); fromstr != "" { + faddr, err := address.NewFromString(fromstr) + if err != nil { + return address.Undef, fmt.Errorf("could not parse from address: %w", err) + } + sender = faddr + } + + // make sure the sender account exists on chain + _, err = api.StateLookupID(ctx, owner, types.EmptyTSK) + if err != nil { + return address.Undef, xerrors.Errorf("sender must exist on chain: %w", err) + } + // make sure the worker account exists on chain _, err = api.StateLookupID(ctx, worker, types.EmptyTSK) if err != nil { signed, err := api.MpoolPushMessage(ctx, &types.Message{ - From: owner, + From: sender, To: worker, Value: types.NewInt(0), }, nil) @@ -668,35 +685,46 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID, } } - nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) + // make sure the owner account exists on chain + _, err = api.StateLookupID(ctx, owner, types.EmptyTSK) if err != nil { - return address.Undef, xerrors.Errorf("getting network version: %w", err) + signed, err := api.MpoolPushMessage(ctx, &types.Message{ + From: sender, + To: owner, + Value: types.NewInt(0), + }, nil) + if err != nil { + return address.Undef, xerrors.Errorf("push owner init: %w", err) + } + + log.Infof("Initializing owner account %s, message: %s", worker, signed.Cid()) + log.Infof("Waiting for confirmation") + + mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true) + if err != nil { + return address.Undef, xerrors.Errorf("waiting for owner init: %w", err) + } + if mw.Receipt.ExitCode != 0 { + return address.Undef, xerrors.Errorf("initializing owner account failed: exit code %d", mw.Receipt.ExitCode) + } } - spt, err := miner.SealProofTypeFromSectorSize(abi.SectorSize(ssize), nv) + // Note: the correct thing to do would be to call SealProofTypeFromSectorSize if actors version is v3 or later, but this still works + spt, err := miner.WindowPoStProofTypeFromSectorSize(abi.SectorSize(ssize)) if err != nil { - return address.Undef, xerrors.Errorf("getting seal proof type: %w", err) + return address.Undef, xerrors.Errorf("getting post proof type: %w", err) } - params, err := actors.SerializeParams(&power2.CreateMinerParams{ - Owner: owner, - Worker: worker, - SealProofType: spt, - Peer: abi.PeerID(peerid), + params, err := actors.SerializeParams(&power6.CreateMinerParams{ + Owner: owner, + Worker: worker, + WindowPoStProofType: spt, + Peer: abi.PeerID(peerid), }) if err != nil { return address.Undef, err } - sender := owner - if fromstr := cctx.String("from"); fromstr != "" { - faddr, err := address.NewFromString(fromstr) - if err != nil { - return address.Undef, fmt.Errorf("could not parse from address: %w", err) - } - sender = faddr - } - createStorageMinerMsg := &types.Message{ To: power.Address, From: sender, diff --git a/cmd/lotus-miner/pieces.go b/cmd/lotus-miner/pieces.go index 75605c1ed..778f8e6cf 100644 --- a/cmd/lotus-miner/pieces.go +++ b/cmd/lotus-miner/pieces.go @@ -6,6 +6,7 @@ import ( "text/tabwriter" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/tablewriter" "github.com/ipfs/go-cid" "github.com/urfave/cli/v2" ) @@ -48,6 +49,12 @@ var piecesListPiecesCmd = &cli.Command{ var piecesListCidInfosCmd = &cli.Command{ Name: "list-cids", Usage: "list registered payload CIDs", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + }, + }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { @@ -61,9 +68,54 @@ var piecesListCidInfosCmd = &cli.Command{ return err } + w := tablewriter.New(tablewriter.Col("CID"), + tablewriter.Col("Piece"), + tablewriter.Col("BlockOffset"), + tablewriter.Col("BlockLen"), + tablewriter.Col("Deal"), + tablewriter.Col("Sector"), + tablewriter.Col("DealOffset"), + tablewriter.Col("DealLen"), + ) + for _, c := range cids { - fmt.Println(c) + if !cctx.Bool("verbose") { + fmt.Println(c) + continue + } + + ci, err := nodeApi.PiecesGetCIDInfo(ctx, c) + if err != nil { + fmt.Printf("Error getting CID info: %s\n", err) + continue + } + + for _, location := range ci.PieceBlockLocations { + pi, err := nodeApi.PiecesGetPieceInfo(ctx, location.PieceCID) + if err != nil { + fmt.Printf("Error getting piece info: %s\n", err) + continue + } + + for _, deal := range pi.Deals { + w.Write(map[string]interface{}{ + "CID": c, + "Piece": location.PieceCID, + "BlockOffset": location.RelOffset, + "BlockLen": location.BlockSize, + "Deal": deal.DealID, + "Sector": deal.SectorID, + "DealOffset": deal.Offset, + "DealLen": deal.Length, + }) + } + } } + + if cctx.Bool("verbose") { + return w.Flush(os.Stdout) + } + return nil }, } diff --git a/cmd/lotus-miner/sealing.go b/cmd/lotus-miner/sealing.go index 472af8da6..16b02f7bb 100644 --- a/cmd/lotus-miner/sealing.go +++ b/cmd/lotus-miner/sealing.go @@ -4,6 +4,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "math" "os" "sort" "strings" @@ -32,6 +33,17 @@ var sealingCmd = &cli.Command{ }, } +var barCols = float64(64) + +func barString(total, y, g float64) string { + yBars := int(math.Round(y / total * barCols)) + gBars := int(math.Round(g / total * barCols)) + eBars := int(barCols) - yBars - gBars + return color.YellowString(strings.Repeat("|", yBars)) + + color.GreenString(strings.Repeat("|", gBars)) + + strings.Repeat(" ", eBars) +} + var sealingWorkersCmd = &cli.Command{ Name: "workers", Usage: "list workers", @@ -77,7 +89,7 @@ var sealingWorkersCmd = &cli.Command{ for _, stat := range st { gpuUse := "not " gpuCol := color.FgBlue - if stat.GpuUsed { + if stat.GpuUsed > 0 { gpuCol = color.FgGreen gpuUse = "" } @@ -89,56 +101,43 @@ var sealingWorkersCmd = &cli.Command{ fmt.Printf("Worker %s, host %s%s\n", stat.id, color.MagentaString(stat.Info.Hostname), disabled) - var barCols = uint64(64) - cpuBars := int(stat.CpuUse * barCols / stat.Info.Resources.CPUs) - cpuBar := strings.Repeat("|", cpuBars) - if int(barCols)-cpuBars >= 0 { - cpuBar += strings.Repeat(" ", int(barCols)-cpuBars) - } - fmt.Printf("\tCPU: [%s] %d/%d core(s) in use\n", - color.GreenString(cpuBar), stat.CpuUse, stat.Info.Resources.CPUs) + barString(float64(stat.Info.Resources.CPUs), 0, float64(stat.CpuUse)), stat.CpuUse, stat.Info.Resources.CPUs) - ramBarsRes := int(stat.Info.Resources.MemReserved * barCols / stat.Info.Resources.MemPhysical) - ramBarsUsed := int(stat.MemUsedMin * barCols / stat.Info.Resources.MemPhysical) - ramRepeatSpace := int(barCols) - (ramBarsUsed + ramBarsRes) - - colorFunc := color.YellowString - if ramRepeatSpace < 0 { - ramRepeatSpace = 0 - colorFunc = color.RedString + ramTotal := stat.Info.Resources.MemPhysical + ramTasks := stat.MemUsedMin + ramUsed := stat.Info.Resources.MemUsed + var ramReserved uint64 = 0 + if ramUsed > ramTasks { + ramReserved = ramUsed - ramTasks } - - ramBar := colorFunc(strings.Repeat("|", ramBarsRes)) + - color.GreenString(strings.Repeat("|", ramBarsUsed)) + - strings.Repeat(" ", ramRepeatSpace) - - vmem := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap - - vmemBarsRes := int(stat.Info.Resources.MemReserved * barCols / vmem) - vmemBarsUsed := int(stat.MemUsedMax * barCols / vmem) - vmemRepeatSpace := int(barCols) - (vmemBarsUsed + vmemBarsRes) - - colorFunc = color.YellowString - if vmemRepeatSpace < 0 { - vmemRepeatSpace = 0 - colorFunc = color.RedString - } - - vmemBar := colorFunc(strings.Repeat("|", vmemBarsRes)) + - color.GreenString(strings.Repeat("|", vmemBarsUsed)) + - strings.Repeat(" ", vmemRepeatSpace) + ramBar := barString(float64(ramTotal), float64(ramReserved), float64(ramTasks)) fmt.Printf("\tRAM: [%s] %d%% %s/%s\n", ramBar, - (stat.Info.Resources.MemReserved+stat.MemUsedMin)*100/stat.Info.Resources.MemPhysical, - types.SizeStr(types.NewInt(stat.Info.Resources.MemReserved+stat.MemUsedMin)), + (ramTasks+ramReserved)*100/stat.Info.Resources.MemPhysical, + types.SizeStr(types.NewInt(ramTasks+ramUsed)), types.SizeStr(types.NewInt(stat.Info.Resources.MemPhysical))) - fmt.Printf("\tVMEM: [%s] %d%% %s/%s\n", vmemBar, - (stat.Info.Resources.MemReserved+stat.MemUsedMax)*100/vmem, - types.SizeStr(types.NewInt(stat.Info.Resources.MemReserved+stat.MemUsedMax)), - types.SizeStr(types.NewInt(vmem))) + vmemTotal := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap + vmemTasks := stat.MemUsedMax + vmemUsed := stat.Info.Resources.MemUsed + stat.Info.Resources.MemSwapUsed + var vmemReserved uint64 = 0 + if vmemUsed > vmemTasks { + vmemReserved = vmemUsed - vmemTasks + } + vmemBar := barString(float64(vmemTotal), float64(vmemReserved), float64(vmemTasks)) + fmt.Printf("\tVMEM: [%s] %d%% %s/%s\n", vmemBar, + (vmemTasks+vmemReserved)*100/vmemTotal, + types.SizeStr(types.NewInt(vmemTasks+vmemReserved)), + types.SizeStr(types.NewInt(vmemTotal))) + + if len(stat.Info.Resources.GPUs) > 0 { + gpuBar := barString(float64(len(stat.Info.Resources.GPUs)), 0, stat.GpuUsed) + fmt.Printf("\tGPU: [%s] %.f%% %.2f/%d gpu(s) in use\n", color.GreenString(gpuBar), + stat.GpuUsed*100/float64(len(stat.Info.Resources.GPUs)), + stat.GpuUsed, len(stat.Info.Resources.GPUs)) + } for _, gpu := range stat.Info.Resources.GPUs { fmt.Printf("\tGPU: %s\n", color.New(gpuCol).Sprintf("%s, %sused", gpu, gpuUse)) } diff --git a/cmd/lotus-miner/storage.go b/cmd/lotus-miner/storage.go index e7508eb29..4df6a9904 100644 --- a/cmd/lotus-miner/storage.go +++ b/cmd/lotus-miner/storage.go @@ -95,6 +95,14 @@ over time Name: "max-storage", Usage: "(for init) limit storage space for sectors (expensive for very large paths!)", }, + &cli.StringSliceFlag{ + Name: "groups", + Usage: "path group names", + }, + &cli.StringSliceFlag{ + Name: "allow-to", + Usage: "path groups allowed to pull data from this path (allow all if not specified)", + }, }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) @@ -142,6 +150,8 @@ over time CanSeal: cctx.Bool("seal"), CanStore: cctx.Bool("store"), MaxStorage: uint64(maxStor), + Groups: cctx.StringSlice("groups"), + AllowTo: cctx.StringSlice("allow-to"), } if !(cfg.CanStore || cfg.CanSeal) { @@ -322,10 +332,17 @@ var storageListCmd = &cli.Command{ if si.CanStore { fmt.Print(color.CyanString("Store")) } - fmt.Println("") } else { fmt.Print(color.HiYellowString("Use: ReadOnly")) } + fmt.Println() + + if len(si.Groups) > 0 { + fmt.Printf("\tGroups: %s\n", strings.Join(si.Groups, ", ")) + } + if len(si.AllowTo) > 0 { + fmt.Printf("\tAllowTo: %s\n", strings.Join(si.AllowTo, ", ")) + } if localPath, ok := local[s.ID]; ok { fmt.Printf("\tLocal: %s\n", color.GreenString(localPath)) diff --git a/cmd/lotus-pcr/main.go b/cmd/lotus-pcr/main.go index 8ee79b44a..469f5ad8e 100644 --- a/cmd/lotus-pcr/main.go +++ b/cmd/lotus-pcr/main.go @@ -17,6 +17,7 @@ import ( "time" "github.com/filecoin-project/lotus/chain/actors/builtin" + lcli "github.com/filecoin-project/lotus/cli" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" @@ -41,7 +42,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/tools/stats" + "github.com/filecoin-project/lotus/tools/stats/sync" ) var log = logging.Logger("main") @@ -160,15 +161,15 @@ var findMinersCmd = &cli.Command{ }, Action: func(cctx *cli.Context) error { ctx := context.Background() - api, closer, err := stats.GetFullNodeAPI(cctx.Context, cctx.String("lotus-path")) + api, closer, err := lcli.GetFullNodeAPI(cctx) if err != nil { - log.Fatal(err) + return err } defer closer() if !cctx.Bool("no-sync") { - if err := stats.WaitForSyncComplete(ctx, api); err != nil { - log.Fatal(err) + if err := sync.SyncWait(ctx, api); err != nil { + return err } } @@ -245,7 +246,7 @@ var recoverMinersCmd = &cli.Command{ }, Action: func(cctx *cli.Context) error { ctx := context.Background() - api, closer, err := stats.GetFullNodeAPI(cctx.Context, cctx.String("lotus-path")) + api, closer, err := lcli.GetFullNodeAPI(cctx) if err != nil { log.Fatal(err) } @@ -266,8 +267,8 @@ var recoverMinersCmd = &cli.Command{ } if !cctx.Bool("no-sync") { - if err := stats.WaitForSyncComplete(ctx, api); err != nil { - log.Fatal(err) + if err := sync.SyncWait(ctx, api); err != nil { + return err } } @@ -427,7 +428,7 @@ var runCmd = &cli.Command{ }() ctx := context.Background() - api, closer, err := stats.GetFullNodeAPI(cctx.Context, cctx.String("lotus-path")) + api, closer, err := lcli.GetFullNodeAPI(cctx) if err != nil { log.Fatal(err) } @@ -448,12 +449,12 @@ var runCmd = &cli.Command{ } if !cctx.Bool("no-sync") { - if err := stats.WaitForSyncComplete(ctx, api); err != nil { - log.Fatal(err) + if err := sync.SyncWait(ctx, api); err != nil { + return err } } - tipsetsCh, err := stats.GetTips(ctx, api, r.Height(), cctx.Int("head-delay")) + tipsetsCh, err := sync.BufferedTipsetChannel(ctx, api, r.Height(), cctx.Int("head-delay")) if err != nil { log.Fatal(err) } diff --git a/cmd/lotus-seal-worker/info.go b/cmd/lotus-seal-worker/info.go index 6d5c2d64e..057e1303c 100644 --- a/cmd/lotus-seal-worker/info.go +++ b/cmd/lotus-seal-worker/info.go @@ -58,8 +58,11 @@ var infoCmd = &cli.Command{ fmt.Printf("Hostname: %s\n", info.Hostname) fmt.Printf("CPUs: %d; GPUs: %v\n", info.Resources.CPUs, info.Resources.GPUs) - fmt.Printf("RAM: %s; Swap: %s\n", types.SizeStr(types.NewInt(info.Resources.MemPhysical)), types.SizeStr(types.NewInt(info.Resources.MemSwap))) - fmt.Printf("Reserved memory: %s\n", types.SizeStr(types.NewInt(info.Resources.MemReserved))) + fmt.Printf("RAM: %s/%s; Swap: %s/%s\n", + types.SizeStr(types.NewInt(info.Resources.MemUsed)), + types.SizeStr(types.NewInt(info.Resources.MemPhysical)), + types.SizeStr(types.NewInt(info.Resources.MemSwapUsed)), + types.SizeStr(types.NewInt(info.Resources.MemSwap))) fmt.Printf("Task types: ") for _, t := range ttList(tt) { diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go index ce2a32cd4..5aec2f52f 100644 --- a/cmd/lotus-seal-worker/main.go +++ b/cmd/lotus-seal-worker/main.go @@ -60,6 +60,7 @@ func main() { storageCmd, setCmd, waitQuietCmd, + resourcesCmd, tasksCmd, } diff --git a/cmd/lotus-seal-worker/resources.go b/cmd/lotus-seal-worker/resources.go new file mode 100644 index 000000000..539f141b9 --- /dev/null +++ b/cmd/lotus-seal-worker/resources.go @@ -0,0 +1,72 @@ +package main + +import ( + "fmt" + "os" + "sort" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +var resourcesCmd = &cli.Command{ + Name: "resources", + Usage: "Manage resource table overrides", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "all", + Usage: "print all resource envvars", + }, + &cli.BoolFlag{ + Name: "default", + Usage: "print default resource envvars", + }, + }, + Action: func(cctx *cli.Context) error { + def := map[string]string{} + set := map[string]string{} + all := map[string]string{} + + _, err := storiface.ParseResourceEnv(func(key, d string) (string, bool) { + if d != "" { + all[key] = d + def[key] = d + } + + s, ok := os.LookupEnv(key) + if ok { + all[key] = s + set[key] = s + } + + return s, ok + }) + if err != nil { + return err + } + + printMap := func(m map[string]string) { + var arr []string + for k, v := range m { + arr = append(arr, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(arr) + for _, s := range arr { + fmt.Println(s) + } + } + + if cctx.Bool("default") { + printMap(def) + } else { + if cctx.Bool("all") { + printMap(all) + } else { + printMap(set) + } + } + + return nil + }, +} diff --git a/cmd/lotus-seal-worker/storage.go b/cmd/lotus-seal-worker/storage.go index be662a6c3..721523fd0 100644 --- a/cmd/lotus-seal-worker/storage.go +++ b/cmd/lotus-seal-worker/storage.go @@ -51,6 +51,14 @@ var storageAttachCmd = &cli.Command{ Name: "max-storage", Usage: "(for init) limit storage space for sectors (expensive for very large paths!)", }, + &cli.StringSliceFlag{ + Name: "groups", + Usage: "path group names", + }, + &cli.StringSliceFlag{ + Name: "allow-to", + Usage: "path groups allowed to pull data from this path (allow all if not specified)", + }, }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetWorkerAPI(cctx) @@ -98,6 +106,8 @@ var storageAttachCmd = &cli.Command{ CanSeal: cctx.Bool("seal"), CanStore: cctx.Bool("store"), MaxStorage: uint64(maxStor), + Groups: cctx.StringSlice("groups"), + AllowTo: cctx.StringSlice("allow-to"), } if !(cfg.CanStore || cfg.CanSeal) { diff --git a/cmd/lotus-shed/balancer.go b/cmd/lotus-shed/balancer.go new file mode 100644 index 000000000..edc484ab6 --- /dev/null +++ b/cmd/lotus-shed/balancer.go @@ -0,0 +1,222 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" +) + +var balancerCmd = &cli.Command{ + Name: "balancer", + Usage: "Utility for balancing tokens between multiple wallets", + Description: `Tokens are balanced based on the specification provided in arguments + +Each argument specifies an address, role, and role parameters separated by ';' + +Supported roles: + - request;[addr];[low];[high] - request tokens when balance drops to [low], topping up to [high] + - provide;[addr];[min] - provide tokens to other addresses as long as the balance is above [min] +`, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + type request struct { + addr address.Address + low, high abi.TokenAmount + } + type provide struct { + addr address.Address + min abi.TokenAmount + } + + var requests []request + var provides []provide + + for i, s := range cctx.Args().Slice() { + ss := strings.Split(s, ";") + switch ss[0] { + case "request": + if len(ss) != 4 { + return xerrors.Errorf("request role needs 4 parameters (arg %d)", i) + } + + addr, err := address.NewFromString(ss[1]) + if err != nil { + return xerrors.Errorf("parsing address in arg %d: %w", i, err) + } + + low, err := types.ParseFIL(ss[2]) + if err != nil { + return xerrors.Errorf("parsing low in arg %d: %w", i, err) + } + + high, err := types.ParseFIL(ss[3]) + if err != nil { + return xerrors.Errorf("parsing high in arg %d: %w", i, err) + } + + if abi.TokenAmount(low).GreaterThanEqual(abi.TokenAmount(high)) { + return xerrors.Errorf("low must be less than high in arg %d", i) + } + + requests = append(requests, request{ + addr: addr, + low: abi.TokenAmount(low), + high: abi.TokenAmount(high), + }) + case "provide": + if len(ss) != 3 { + return xerrors.Errorf("provide role needs 3 parameters (arg %d)", i) + } + + addr, err := address.NewFromString(ss[1]) + if err != nil { + return xerrors.Errorf("parsing address in arg %d: %w", i, err) + } + + min, err := types.ParseFIL(ss[2]) + if err != nil { + return xerrors.Errorf("parsing min in arg %d: %w", i, err) + } + + provides = append(provides, provide{ + addr: addr, + min: abi.TokenAmount(min), + }) + default: + return xerrors.Errorf("unknown role '%s' in arg %d", ss[0], i) + } + } + + if len(provides) == 0 { + return xerrors.Errorf("no provides specified") + } + if len(requests) == 0 { + return xerrors.Errorf("no requests specified") + } + + const confidence = 16 + + var notifs <-chan []*lapi.HeadChange + for { + if notifs == nil { + notifs, err = api.ChainNotify(ctx) + if err != nil { + return xerrors.Errorf("chain notify error: %w", err) + } + } + + var ts *types.TipSet + loop: + for { + time.Sleep(150 * time.Millisecond) + select { + case n := <-notifs: + for _, change := range n { + if change.Type != store.HCApply { + continue + } + + ts = change.Val + } + case <-ctx.Done(): + return nil + default: + break loop + } + } + + type send struct { + to address.Address + amt abi.TokenAmount + filled bool + } + var toSend []*send + + for _, req := range requests { + bal, err := api.StateGetActor(ctx, req.addr, ts.Key()) + if err != nil { + return err + } + + if bal.Balance.LessThan(req.low) { + toSend = append(toSend, &send{ + to: req.addr, + amt: big.Sub(req.high, bal.Balance), + }) + } + } + + for _, s := range toSend { + fmt.Printf("REQUEST %s for %s\n", types.FIL(s.amt), s.to) + } + + var msgs []cid.Cid + + for _, prov := range provides { + bal, err := api.StateGetActor(ctx, prov.addr, ts.Key()) + if err != nil { + return err + } + + avail := big.Sub(bal.Balance, prov.min) + for _, s := range toSend { + if s.filled { + continue + } + if avail.LessThan(s.amt) { + continue + } + + m, err := api.MpoolPushMessage(ctx, &types.Message{ + From: prov.addr, + To: s.to, + Value: s.amt, + }, nil) + if err != nil { + fmt.Printf("SEND ERROR %s\n", err.Error()) + } + fmt.Printf("SEND %s; %s from %s TO %s\n", m.Cid(), types.FIL(s.amt), s.to, prov.addr) + + msgs = append(msgs, m.Cid()) + s.filled = true + avail = big.Sub(avail, s.amt) + } + } + + if len(msgs) > 0 { + fmt.Printf("WAITING FOR %d MESSAGES\n", len(msgs)) + } + + for _, msg := range msgs { + ml, err := api.StateWaitMsg(ctx, msg, confidence, lapi.LookbackNoLimit, true) + if err != nil { + return err + } + if ml.Receipt.ExitCode != exitcode.Ok { + fmt.Printf("MSG %s NON-ZERO EXITCODE: %s\n", msg, ml.Receipt.ExitCode) + } + } + } + }, +} diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index a982fcf23..d35fb56dd 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -64,6 +64,7 @@ func main() { splitstoreCmd, fr32Cmd, chainCmd, + balancerCmd, } app := &cli.App{ diff --git a/cmd/lotus-shed/miner.go b/cmd/lotus-shed/miner.go index ec5a445f9..479e081e9 100644 --- a/cmd/lotus-shed/miner.go +++ b/cmd/lotus-shed/miner.go @@ -2,11 +2,29 @@ package main import ( "bufio" + "bytes" + "fmt" "io" "os" "path/filepath" "strings" + miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + + power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power" + + "github.com/docker/go-units" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -17,6 +35,231 @@ var minerCmd = &cli.Command{ Usage: "miner-related utilities", Subcommands: []*cli.Command{ minerUnpackInfoCmd, + minerCreateCmd, + minerFaultsCmd, + }, +} + +var minerFaultsCmd = &cli.Command{ + Name: "faults", + Usage: "Display a list of faulty sectors for a SP", + ArgsUsage: "[minerAddress]", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "expiring-in", + Usage: "only list sectors that are expiring in the next epochs", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass miner address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + + ctx := lcli.ReqContext(cctx) + + m, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + faultBf, err := api.StateMinerFaults(ctx, m, types.EmptyTSK) + if err != nil { + return err + } + + faults, err := faultBf.All(miner2.SectorsMax) + if err != nil { + return err + } + + if len(faults) == 0 { + fmt.Println("no faults") + return nil + } + + expEpoch := abi.ChainEpoch(cctx.Uint64("expiring-in")) + + if expEpoch == 0 { + fmt.Print("faulty sectors: ") + for _, v := range faults { + fmt.Printf("%d ", v) + } + + return nil + } + + h, err := api.ChainHead(ctx) + if err != nil { + return err + } + + fmt.Printf("faulty sectors expiring in the next %d epochs: ", expEpoch) + for _, v := range faults { + ss, err := api.StateSectorExpiration(ctx, m, abi.SectorNumber(v), types.EmptyTSK) + if err != nil { + return err + } + + if ss.Early < h.Height()+expEpoch { + fmt.Printf("%d ", v) + } + } + + return nil + }, +} + +var minerCreateCmd = &cli.Command{ + Name: "create", + Usage: "sends a create miner msg", + ArgsUsage: "[sender] [owner] [worker] [sector size]", + Action: func(cctx *cli.Context) error { + wapi, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Args().Len() != 4 { + return xerrors.Errorf("expected 4 args (sender owner worker sectorSize)") + } + + sender, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + owner, err := address.NewFromString(cctx.Args().Get(1)) + if err != nil { + return err + } + + worker, err := address.NewFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + ssize, err := units.RAMInBytes(cctx.Args().Get(3)) + if err != nil { + return fmt.Errorf("failed to parse sector size: %w", err) + } + + // make sure the sender account exists on chain + _, err = wapi.StateLookupID(ctx, owner, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("sender must exist on chain: %w", err) + } + + // make sure the worker account exists on chain + _, err = wapi.StateLookupID(ctx, worker, types.EmptyTSK) + if err != nil { + signed, err := wapi.MpoolPushMessage(ctx, &types.Message{ + From: sender, + To: worker, + Value: types.NewInt(0), + }, nil) + if err != nil { + return xerrors.Errorf("push worker init: %w", err) + } + + log.Infof("Initializing worker account %s, message: %s", worker, signed.Cid()) + log.Infof("Waiting for confirmation") + + mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + if err != nil { + return xerrors.Errorf("waiting for worker init: %w", err) + } + + if mw.Receipt.ExitCode != 0 { + return xerrors.Errorf("initializing worker account failed: exit code %d", mw.Receipt.ExitCode) + } + } + + // make sure the owner account exists on chain + _, err = wapi.StateLookupID(ctx, owner, types.EmptyTSK) + if err != nil { + signed, err := wapi.MpoolPushMessage(ctx, &types.Message{ + From: sender, + To: owner, + Value: types.NewInt(0), + }, nil) + if err != nil { + return xerrors.Errorf("push owner init: %w", err) + } + + log.Infof("Initializing owner account %s, message: %s", worker, signed.Cid()) + log.Infof("Wating for confirmation") + + mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + if err != nil { + return xerrors.Errorf("waiting for owner init: %w", err) + } + + if mw.Receipt.ExitCode != 0 { + return xerrors.Errorf("initializing owner account failed: exit code %d", mw.Receipt.ExitCode) + } + } + + // Note: the correct thing to do would be to call SealProofTypeFromSectorSize if actors version is v3 or later, but this still works + spt, err := miner.WindowPoStProofTypeFromSectorSize(abi.SectorSize(ssize)) + if err != nil { + return xerrors.Errorf("getting post proof type: %w", err) + } + + params, err := actors.SerializeParams(&power6.CreateMinerParams{ + Owner: owner, + Worker: worker, + WindowPoStProofType: spt, + }) + + if err != nil { + return err + } + + createStorageMinerMsg := &types.Message{ + To: power.Address, + From: sender, + Value: big.Zero(), + + Method: power.Methods.CreateMiner, + Params: params, + } + + signed, err := wapi.MpoolPushMessage(ctx, createStorageMinerMsg, nil) + if err != nil { + return xerrors.Errorf("pushing createMiner message: %w", err) + } + + log.Infof("Pushed CreateMiner message: %s", signed.Cid()) + log.Infof("Waiting for confirmation") + + mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + if err != nil { + return xerrors.Errorf("waiting for createMiner message: %w", err) + } + + if mw.Receipt.ExitCode != 0 { + return xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode) + } + + var retval power6.CreateMinerReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil { + return err + } + + log.Infof("New miners address is: %s (%s)", retval.IDAddress, retval.RobustAddress) + + return nil }, } diff --git a/cmd/lotus-shed/msg.go b/cmd/lotus-shed/msg.go index b640fb9c9..7853624a6 100644 --- a/cmd/lotus-shed/msg.go +++ b/cmd/lotus-shed/msg.go @@ -148,6 +148,15 @@ func printMessage(cctx *cli.Context, msg *types.Message) error { fmt.Println("Params:", p) + if msg, err := messageFromBytes(cctx, msg.Params); err == nil { + fmt.Println("---") + color.Red("Params message:") + + if err := printMessage(cctx, msg.VMMessage()); err != nil { + return err + } + } + return nil } diff --git a/cmd/lotus-shed/sectors.go b/cmd/lotus-shed/sectors.go index 726d992c4..4894a6eea 100644 --- a/cmd/lotus-shed/sectors.go +++ b/cmd/lotus-shed/sectors.go @@ -2,11 +2,14 @@ package main import ( "bytes" + "context" "encoding/base64" + "encoding/binary" "fmt" "image" "image/color" "image/png" + "io" "os" "sort" "strconv" @@ -23,6 +26,7 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" @@ -38,6 +42,7 @@ var sectorsCmd = &cli.Command{ terminateSectorCmd, terminateSectorPenaltyEstimationCmd, visAllocatedSectorsCmd, + dumpRLESectorCmd, }, } @@ -275,6 +280,113 @@ var terminateSectorPenaltyEstimationCmd = &cli.Command{ }, } +func activeMiners(ctx context.Context, api v0api.FullNode) ([]address.Address, error) { + miners, err := api.StateListMiners(ctx, types.EmptyTSK) + if err != nil { + return nil, err + } + powCache := make(map[address.Address]types.BigInt) + var lk sync.Mutex + parmap.Par(32, miners, func(a address.Address) { + pow, err := api.StateMinerPower(ctx, a, types.EmptyTSK) + + lk.Lock() + if err == nil { + powCache[a] = pow.MinerPower.QualityAdjPower + } else { + powCache[a] = types.NewInt(0) + } + lk.Unlock() + }) + sort.Slice(miners, func(i, j int) bool { + return powCache[miners[i]].GreaterThan(powCache[miners[j]]) + }) + n := sort.Search(len(miners), func(i int) bool { + pow := powCache[miners[i]] + return pow.IsZero() + }) + return append(miners[0:0:0], miners[:n]...), nil +} + +var dumpRLESectorCmd = &cli.Command{ + Name: "dump-rles", + Usage: "Dump AllocatedSectors RLEs from miners passed as arguments as run lengths in uint64 LE format.\nIf no arguments are passed, dumps all active miners in the state tree.", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + var miners []address.Address + if cctx.NArg() == 0 { + miners, err = activeMiners(ctx, api) + if err != nil { + return xerrors.Errorf("getting active miners: %w", err) + } + } else { + for _, mS := range cctx.Args().Slice() { + mA, err := address.NewFromString(mS) + if err != nil { + return xerrors.Errorf("parsing address '%s': %w", mS, err) + } + miners = append(miners, mA) + } + } + wbuf := make([]byte, 8) + buf := &bytes.Buffer{} + + for i := 0; i < len(miners); i++ { + buf.Reset() + err := func() error { + state, err := api.StateReadState(ctx, miners[i], types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting state: %+v", err) + } + allocSString := state.State.(map[string]interface{})["AllocatedSectors"].(map[string]interface{})["/"].(string) + + allocCid, err := cid.Decode(allocSString) + if err != nil { + return xerrors.Errorf("decoding cid: %+v", err) + } + rle, err := api.ChainReadObj(ctx, allocCid) + if err != nil { + return xerrors.Errorf("reading AllocatedSectors: %+v", err) + } + + var bf bitfield.BitField + err = bf.UnmarshalCBOR(bytes.NewReader(rle)) + if err != nil { + return xerrors.Errorf("decoding bitfield: %w", err) + } + ri, err := bf.RunIterator() + if err != nil { + return xerrors.Errorf("creating iterator: %w", err) + } + + for ri.HasNext() { + run, err := ri.NextRun() + if err != nil { + return xerrors.Errorf("getting run: %w", err) + } + binary.LittleEndian.PutUint64(wbuf, run.Len) + buf.Write(wbuf) + } + _, err = io.Copy(os.Stdout, buf) + if err != nil { + return xerrors.Errorf("copy: %w", err) + } + + return nil + }() + if err != nil { + log.Errorf("miner %d: %s: %+v", i, miners[i], err) + } + } + return nil + }, +} + var visAllocatedSectorsCmd = &cli.Command{ Name: "vis-allocated", Usage: "Produces a html with visualisation of allocated sectors", @@ -287,32 +399,10 @@ var visAllocatedSectorsCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) var miners []address.Address if cctx.NArg() == 0 { - miners, err = api.StateListMiners(ctx, types.EmptyTSK) + miners, err = activeMiners(ctx, api) if err != nil { - return err + return xerrors.Errorf("getting active miners: %w", err) } - powCache := make(map[address.Address]types.BigInt) - var lk sync.Mutex - parmap.Par(32, miners, func(a address.Address) { - pow, err := api.StateMinerPower(ctx, a, types.EmptyTSK) - - lk.Lock() - if err == nil { - powCache[a] = pow.MinerPower.QualityAdjPower - } else { - powCache[a] = types.NewInt(0) - } - lk.Unlock() - }) - sort.Slice(miners, func(i, j int) bool { - return powCache[miners[i]].GreaterThan(powCache[miners[j]]) - }) - n := sort.Search(len(miners), func(i int) bool { - pow := powCache[miners[i]] - log.Infof("pow @%d = %s", i, pow) - return pow.IsZero() - }) - miners = miners[:n] } else { for _, mS := range cctx.Args().Slice() { mA, err := address.NewFromString(mS) diff --git a/cmd/lotus-stats/docker-compose.yml b/cmd/lotus-stats/docker-compose.yml index b08a2157e..4453f49ec 100644 --- a/cmd/lotus-stats/docker-compose.yml +++ b/cmd/lotus-stats/docker-compose.yml @@ -2,7 +2,7 @@ version: '3' services: influxdb: - image: influxdb:latest + image: influxdb:1.8 container_name: influxdb ports: - "18086:8086" diff --git a/cmd/lotus-stats/main.go b/cmd/lotus-stats/main.go index b4c13ea8c..706a453eb 100644 --- a/cmd/lotus-stats/main.go +++ b/cmd/lotus-stats/main.go @@ -2,18 +2,36 @@ package main import ( "context" + "net/http" + _ "net/http/pprof" "os" + "time" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/tools/stats" + "github.com/filecoin-project/lotus/tools/stats/influx" + "github.com/filecoin-project/lotus/tools/stats/ipldstore" + "github.com/filecoin-project/lotus/tools/stats/metrics" + "github.com/filecoin-project/lotus/tools/stats/points" + "github.com/filecoin-project/lotus/tools/stats/sync" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" + + "contrib.go.opencensus.io/exporter/prometheus" + stats "go.opencensus.io/stats" + "go.opencensus.io/stats/view" ) var log = logging.Logger("stats") +func init() { + if err := view.Register(metrics.DefaultViews...); err != nil { + log.Fatal(err) + } +} + func main() { local := []*cli.Command{ runCmd, @@ -37,7 +55,7 @@ func main() { }, }, Before: func(cctx *cli.Context) error { - return logging.SetLogLevel("stats", cctx.String("log-level")) + return logging.SetLogLevelRegex("stats/*", cctx.String("log-level")) }, Commands: local, } @@ -104,6 +122,12 @@ var runCmd = &cli.Command{ Usage: "do not wait for chain sync to complete", Value: false, }, + &cli.IntFlag{ + Name: "ipld-store-cache-size", + Usage: "size of lru cache for ChainReadObj", + EnvVars: []string{"LOTUS_STATS_IPLD_STORE_CACHE_SIZE"}, + Value: 2 << 15, + }, }, Action: func(cctx *cli.Context) error { ctx := context.Background() @@ -118,30 +142,35 @@ var runCmd = &cli.Command{ influxPasswordFlag := cctx.String("influx-password") influxDatabaseFlag := cctx.String("influx-database") + ipldStoreCacheSizeFlag := cctx.Int("ipld-store-cache-size") + log.Infow("opening influx client", "hostname", influxHostnameFlag, "username", influxUsernameFlag, "database", influxDatabaseFlag) - influx, err := stats.InfluxClient(influxHostnameFlag, influxUsernameFlag, influxPasswordFlag) + influxClient, err := influx.NewClient(influxHostnameFlag, influxUsernameFlag, influxPasswordFlag) if err != nil { - log.Fatal(err) + return err } + exporter, err := prometheus.NewExporter(prometheus.Options{ + Namespace: "lotus_stats", + }) + if err != nil { + return err + } + + go func() { + http.Handle("/metrics", exporter) + if err := http.ListenAndServe(":6688", nil); err != nil { + log.Errorw("failed to start http server", "err", err) + } + }() + if resetFlag { - if err := stats.ResetDatabase(influx, influxDatabaseFlag); err != nil { - log.Fatal(err) + if err := influx.ResetDatabase(influxClient, influxDatabaseFlag); err != nil { + return err } } - height := int64(heightFlag) - - if !resetFlag && height == 0 { - h, err := stats.GetLastRecordedHeight(influx, influxDatabaseFlag) - if err != nil { - log.Info(err) - } - - height = h - } - api, closer, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -149,12 +178,89 @@ var runCmd = &cli.Command{ defer closer() if !noSyncFlag { - if err := stats.WaitForSyncComplete(ctx, api); err != nil { - log.Fatal(err) + if err := sync.SyncWait(ctx, api); err != nil { + return err } } - stats.Collect(ctx, api, influx, influxDatabaseFlag, height, headLagFlag) + gtp, err := api.ChainGetGenesis(ctx) + if err != nil { + return err + } + + genesisTime := time.Unix(int64(gtp.MinTimestamp()), 0) + + // When height is set to `0` we will resume from the best height we can. + // The goal is to ensure we have data in the last 60 tipsets + height := int64(heightFlag) + if !resetFlag && height == 0 { + lastHeight, err := influx.GetLastRecordedHeight(influxClient, influxDatabaseFlag) + if err != nil { + return err + } + + sinceGenesis := build.Clock.Now().Sub(genesisTime) + expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs) + + startOfWindowHeight := expectedHeight - 60 + + if lastHeight > startOfWindowHeight { + height = lastHeight + } else { + height = startOfWindowHeight + } + + ts, err := api.ChainHead(ctx) + if err != nil { + return err + } + + headHeight := int64(ts.Height()) + if headHeight < height { + height = headHeight + } + } + + go func() { + t := time.NewTicker(time.Second) + + for { + select { + case <-t.C: + sinceGenesis := build.Clock.Now().Sub(genesisTime) + expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs) + + stats.Record(ctx, metrics.TipsetCollectionHeightExpected.M(expectedHeight)) + } + } + }() + + store, err := ipldstore.NewApiIpldStore(ctx, api, ipldStoreCacheSizeFlag) + if err != nil { + return err + } + + collector, err := points.NewChainPointCollector(ctx, store, api) + if err != nil { + return err + } + + tipsets, err := sync.BufferedTipsetChannel(ctx, api, abi.ChainEpoch(height), headLagFlag) + if err != nil { + return err + } + + wq := influx.NewWriteQueue(ctx, influxClient) + defer wq.Close() + + for tipset := range tipsets { + if nb, err := collector.Collect(ctx, tipset); err != nil { + log.Warnw("failed to collect points", "err", err) + } else { + nb.SetDatabase(influxDatabaseFlag) + wq.AddBatch(nb) + } + } return nil }, diff --git a/docker-compose.yaml b/docker-compose.yaml index b962d5cc2..d68eed8db 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -103,6 +103,7 @@ services: # - FULLNODE_API_INFO=/dns/lotus/tcp/1234/http # - LOTUS_JAEGER_AGENT_HOST=jaeger # - LOTUS_JAEGER_AGENT_PORT=6831 + # - DOCKER_LOTUS_MINER_INIT=true # deploy: # restart_policy: # condition: on-failure diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 4d14bcb0e..3d27f0c75 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -2148,7 +2148,9 @@ Inputs: "Weight": 42, "MaxStorage": 42, "CanSeal": true, - "CanStore": true + "CanStore": true, + "Groups": null, + "AllowTo": null }, { "Capacity": 9, @@ -2258,7 +2260,9 @@ Response: "Weight": 42, "MaxStorage": 42, "CanSeal": true, - "CanStore": true + "CanStore": true, + "Groups": null, + "AllowTo": null } ``` @@ -2449,18 +2453,595 @@ Response: "IgnoreResources": false, "Resources": { "MemPhysical": 274877906944, + "MemUsed": 2147483648, "MemSwap": 128849018880, - "MemReserved": 2147483648, + "MemSwapUsed": 2147483648, "CPUs": 64, "GPUs": [ "aGPU 1337" - ] + ], + "Resources": { + "seal/v0/addpiece": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "3": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "4": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "8": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "9": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + } + }, + "seal/v0/fetch": { + "0": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "1": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "2": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "3": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "4": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "5": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "6": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "7": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "8": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "9": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + } + }, + "seal/v0/precommit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + }, + "seal/v0/precommit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/unseal": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + } + } } }, "Enabled": true, "MemUsedMin": 0, "MemUsedMax": 0, - "GpuUsed": false, + "GpuUsed": 0, "CpuUse": 0 } } diff --git a/documentation/en/api-v0-methods-worker.md b/documentation/en/api-v0-methods-worker.md index c620113f4..7a1c2e2f2 100644 --- a/documentation/en/api-v0-methods-worker.md +++ b/documentation/en/api-v0-methods-worker.md @@ -92,10 +92,587 @@ Response: "IgnoreResources": true, "Resources": { "MemPhysical": 42, + "MemUsed": 42, "MemSwap": 42, - "MemReserved": 42, + "MemSwapUsed": 42, "CPUs": 42, - "GPUs": null + "GPUs": null, + "Resources": { + "seal/v0/addpiece": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "3": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "4": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "8": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "9": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + } + }, + "seal/v0/fetch": { + "0": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "1": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "2": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "3": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "4": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "5": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "6": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "7": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "8": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "9": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + } + }, + "seal/v0/precommit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + }, + "seal/v0/precommit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/unseal": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + } + } } } ``` diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index 4d9530821..2e57b8d7d 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -1269,7 +1269,8 @@ Response: "Stages": { "Stages": null } - } + }, + "Event": 5 } ``` diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index b03f75e9d..1a14dbd71 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -41,6 +41,7 @@ * [ClientDataTransferUpdates](#ClientDataTransferUpdates) * [ClientDealPieceCID](#ClientDealPieceCID) * [ClientDealSize](#ClientDealSize) + * [ClientExport](#ClientExport) * [ClientFindData](#ClientFindData) * [ClientGenCar](#ClientGenCar) * [ClientGetDealInfo](#ClientGetDealInfo) @@ -59,7 +60,7 @@ * [ClientRestartDataTransfer](#ClientRestartDataTransfer) * [ClientRetrieve](#ClientRetrieve) * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) - * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents) + * [ClientRetrieveWait](#ClientRetrieveWait) * [ClientStartDeal](#ClientStartDeal) * [ClientStatelessDeal](#ClientStatelessDeal) * [Create](#Create) @@ -108,6 +109,7 @@ * [MsigApprove](#MsigApprove) * [MsigApproveTxnHash](#MsigApproveTxnHash) * [MsigCancel](#MsigCancel) + * [MsigCancelTxnHash](#MsigCancelTxnHash) * [MsigCreate](#MsigCreate) * [MsigGetAvailableBalance](#MsigGetAvailableBalance) * [MsigGetPending](#MsigGetPending) @@ -1054,6 +1056,32 @@ Response: } ``` +### ClientExport +ClientExport exports a file stored in the local filestore to a system file + + +Perms: admin + +Inputs: +```json +[ + { + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DAGs": null, + "FromLocalCAR": "string value", + "DealID": 5 + }, + { + "Path": "string value", + "IsCAR": true + } +] +``` + +Response: `{}` + ### ClientFindData ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). @@ -1281,7 +1309,8 @@ Response: "Stages": { "Stages": null } - } + }, + "Event": 5 } ``` @@ -1481,9 +1510,8 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "Piece": null, - "DatamodelPathSelector": "Links/21/Hash/Links/42/Hash", + "DataSelector": "Links/21/Hash/Links/42/Hash", "Size": 42, - "FromLocalCAR": "string value", "Total": "0", "UnsealPrice": "0", "PaymentInterval": 42, @@ -1495,15 +1523,16 @@ Inputs: "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "PieceCID": null } - }, - { - "Path": "string value", - "IsCAR": true } ] ``` -Response: `{}` +Response: +```json +{ + "DealID": 5 +} +``` ### ClientRetrieveTryRestartInsufficientFunds ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel @@ -1521,9 +1550,8 @@ Inputs: Response: `{}` -### ClientRetrieveWithEvents -ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel -of status updates. +### ClientRetrieveWait +ClientRetrieveWait waits for retrieval to be complete Perms: admin @@ -1531,43 +1559,11 @@ Perms: admin Inputs: ```json [ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": null, - "DatamodelPathSelector": "Links/21/Hash/Links/42/Hash", - "Size": 42, - "FromLocalCAR": "string value", - "Total": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Client": "f01234", - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": null - } - }, - { - "Path": "string value", - "IsCAR": true - } + 5 ] ``` -Response: -```json -{ - "Event": 5, - "Status": 0, - "BytesReceived": 42, - "FundsSpent": "0", - "Err": "string value" -} -``` +Response: `{}` ### ClientStartDeal ClientStartDeal proposes a deal with a miner. @@ -2702,6 +2698,44 @@ Response: ### MsigCancel MsigCancel cancels a previously-proposed multisig message +It takes the following params: , + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigCancelTxnHash +MsigCancel cancels a previously-proposed multisig message It takes the following params: , , , , , , diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index a723ec038..37fbea8c0 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,7 +7,7 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.13.1 + 1.13.2 COMMANDS: init Initialize a lotus miner repo @@ -590,7 +590,8 @@ CATEGORY: DEVELOPER OPTIONS: - --help, -h show help (default: false) + --timeout value duration to wait till fail (default: 30s) + --help, -h show help (default: false) ``` @@ -1459,7 +1460,8 @@ USAGE: lotus-miner pieces list-cids [command options] [arguments...] OPTIONS: - --help, -h show help (default: false) + --verbose, -v (default: false) + --help, -h show help (default: false) ``` @@ -1982,6 +1984,8 @@ OPTIONS: --seal (for init) use path for sealing (default: false) --store (for init) use path for long-term storage (default: false) --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) + --groups value path group names + --allow-to value path groups allowed to pull data from this path (allow all if not specified) --help, -h show help (default: false) ``` diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md index 2eee1774d..38ba917e2 100644 --- a/documentation/en/cli-lotus-worker.md +++ b/documentation/en/cli-lotus-worker.md @@ -7,7 +7,7 @@ USAGE: lotus-worker [global options] command [command options] [arguments...] VERSION: - 1.13.1 + 1.13.2 COMMANDS: run Start lotus worker @@ -15,6 +15,7 @@ COMMANDS: storage manage sector storage set Manage worker settings wait-quiet Block until all running tasks exit + resources Manage resource table overrides tasks Manage task processing help, h Shows a list of commands or help for one command @@ -94,6 +95,8 @@ OPTIONS: --seal (for init) use path for sealing (default: false) --store (for init) use path for long-term storage (default: false) --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) + --groups value path group names + --allow-to value path groups allowed to pull data from this path (allow all if not specified) --help, -h show help (default: false) ``` @@ -125,6 +128,21 @@ OPTIONS: ``` +## lotus-worker resources +``` +NAME: + lotus-worker resources - Manage resource table overrides + +USAGE: + lotus-worker resources [command options] [arguments...] + +OPTIONS: + --all print all resource envvars (default: false) + --default print default resource envvars (default: false) + --help, -h show help (default: false) + +``` + ## lotus-worker tasks ``` NAME: diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index ff2ca00b1..cdfdc0b4c 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.13.1 + 1.13.2 COMMANDS: daemon Start a lotus daemon process @@ -426,6 +426,8 @@ COMMANDS: RETRIEVAL: find Find data in the network retrieve Retrieve data from network + cat Show data from network + ls List object links cancel-retrieval Cancel a retrieval deal by deal ID; this also cancels the associated transfer list-retrievals List retrieval market deals STORAGE: @@ -544,12 +546,94 @@ USAGE: CATEGORY: RETRIEVAL +DESCRIPTION: + Retrieve data from the Filecoin network. + +The retrieve command will attempt to find a provider make a retrieval deal with +them. In case a provider can't be found, it can be specified with the --provider +flag. + +By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively +a CAR file containing the raw IPLD graph can be exported by setting the --car +flag. + +Partial Retrieval: + +The --data-selector flag can be used to specify a sub-graph to fetch. The +selector can be specified as either IPLD datamodel text-path selector, or IPLD +json selector. + +In case of unixfs retrieval, the selector must point at a single root node, and +match the entire graph under that node. + +In case of CAR retrieval, the selector must have one common "sub-root" node. + +Examples: + +- Retrieve a file by CID + $ lotus client retrieve Qm... my-file.txt + +- Retrieve a file by CID from f0123 + $ lotus client retrieve --provider f0123 Qm... my-file.txt + +- Retrieve a first file from a specified directory + $ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt + + OPTIONS: + --car Export to a car file instead of a regular file (default: false) + --data-selector value, --datamodel-path-selector value IPLD datamodel text-path selector, or IPLD json selector + --car-export-merkle-proof (requires --data-selector and --car) Export data-selector merkle proof (default: false) + --from value address to send transactions from + --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery + --maxPrice value maximum price the client is willing to consider (default: 0 FIL) + --pieceCid value require data to be retrieved from a specific Piece CID + --allow-local (default: false) + --help, -h show help (default: false) + +``` + +### lotus client cat +``` +NAME: + lotus client cat - Show data from network + +USAGE: + lotus client cat [command options] [dataCid] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --ipld list IPLD datamodel links (default: false) + --data-selector value IPLD datamodel text-path selector, or IPLD json selector --from value address to send transactions from - --car export to a car file instead of a regular file (default: false) - --miner value miner address for retrieval, if not present it'll use local discovery - --datamodel-path-selector value a rudimentary (DM-level-only) text-path selector, allowing for sub-selection within a deal - --maxPrice value maximum price the client is willing to consider (default: 0.01 FIL) + --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery + --maxPrice value maximum price the client is willing to consider (default: 0 FIL) + --pieceCid value require data to be retrieved from a specific Piece CID + --allow-local (default: false) + --help, -h show help (default: false) + +``` + +### lotus client ls +``` +NAME: + lotus client ls - List object links + +USAGE: + lotus client ls [command options] [dataCid] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --ipld list IPLD datamodel links (default: false) + --depth value list links recursively up to the specified depth (default: 1) + --data-selector value IPLD datamodel text-path selector, or IPLD json selector + --from value address to send transactions from + --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery + --maxPrice value maximum price the client is willing to consider (default: 0 FIL) --pieceCid value require data to be retrieved from a specific Piece CID --allow-local (default: false) --help, -h show help (default: false) @@ -857,6 +941,7 @@ COMMANDS: propose Propose a multisig transaction propose-remove Propose to remove a signer approve Approve a multisig message + cancel Cancel a multisig message add-propose Propose to add a signer add-approve Approve a message to add a signer add-cancel Cancel a message to add a signer @@ -952,6 +1037,20 @@ OPTIONS: ``` +### lotus msig cancel +``` +NAME: + lotus msig cancel - Cancel a multisig message + +USAGE: + lotus msig cancel [command options] [destination value [methodId methodParams]] + +OPTIONS: + --from value account to send the cancel message from + --help, -h show help (default: false) + +``` + ### lotus msig add-propose ``` NAME: @@ -1580,8 +1679,18 @@ OPTIONS: --help, -h show help (default: false) ``` -# nage + +### lotus mpool manage ``` +NAME: + lotus mpool manage - + +USAGE: + lotus mpool manage [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + ``` ## lotus state @@ -2450,7 +2559,8 @@ CATEGORY: DEVELOPER OPTIONS: - --help, -h show help (default: false) + --timeout value duration to wait till fail (default: 30s) + --help, -h show help (default: false) ``` diff --git a/extern/sector-storage/cgroups.go b/extern/sector-storage/cgroups.go new file mode 100644 index 000000000..e2ec0564e --- /dev/null +++ b/extern/sector-storage/cgroups.go @@ -0,0 +1,12 @@ +//go:build !linux +// +build !linux + +package sectorstorage + +func cgroupV1Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + return 0, 0, 0, 0, nil +} + +func cgroupV2Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + return 0, 0, 0, 0, nil +} diff --git a/extern/sector-storage/cgroups_linux.go b/extern/sector-storage/cgroups_linux.go new file mode 100644 index 000000000..38fe88f19 --- /dev/null +++ b/extern/sector-storage/cgroups_linux.go @@ -0,0 +1,117 @@ +//go:build linux +// +build linux + +package sectorstorage + +import ( + "bufio" + "bytes" + "math" + "os" + "path/filepath" + + "github.com/containerd/cgroups" + cgroupv2 "github.com/containerd/cgroups/v2" +) + +func cgroupV2MountPoint() (string, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", err + } + defer f.Close() //nolint + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := bytes.Fields(scanner.Bytes()) + if len(fields) >= 9 && bytes.Equal(fields[8], []byte("cgroup2")) { + return string(fields[4]), nil + } + } + return "", cgroups.ErrMountPointNotExist +} + +func cgroupV1Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + path := cgroups.NestedPath("") + if pid := os.Getpid(); pid == 1 { + path = cgroups.RootPath + } + c, err := cgroups.Load(cgroups.SingleSubsystem(cgroups.V1, cgroups.Memory), path) + if err != nil { + return 0, 0, 0, 0, err + } + stats, err := c.Stat() + if err != nil { + return 0, 0, 0, 0, err + } + if stats.Memory == nil { + return 0, 0, 0, 0, nil + } + if stats.Memory.Usage != nil { + memoryMax = stats.Memory.Usage.Limit + // Exclude cached files + memoryUsed = stats.Memory.Usage.Usage - stats.Memory.InactiveFile - stats.Memory.ActiveFile + } + if stats.Memory.Swap != nil { + swapMax = stats.Memory.Swap.Limit + swapUsed = stats.Memory.Swap.Usage + } + return memoryMax, memoryUsed, swapMax, swapUsed, nil +} + +func cgroupV2MemFromPath(mp, path string) (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + c, err := cgroupv2.LoadManager(mp, path) + if err != nil { + return 0, 0, 0, 0, err + } + + stats, err := c.Stat() + if err != nil { + return 0, 0, 0, 0, err + } + + if stats.Memory != nil { + memoryMax = stats.Memory.UsageLimit + // Exclude memory used caching files + memoryUsed = stats.Memory.Usage - stats.Memory.File + swapMax = stats.Memory.SwapLimit + swapUsed = stats.Memory.SwapUsage + } + + return memoryMax, memoryUsed, swapMax, swapUsed, nil +} + +func cgroupV2Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + memoryMax = math.MaxUint64 + swapMax = math.MaxUint64 + + path, err := cgroupv2.PidGroupPath(os.Getpid()) + if err != nil { + return 0, 0, 0, 0, err + } + + mp, err := cgroupV2MountPoint() + if err != nil { + return 0, 0, 0, 0, err + } + + for path != "/" { + cgMemoryMax, cgMemoryUsed, cgSwapMax, cgSwapUsed, err := cgroupV2MemFromPath(mp, path) + if err != nil { + return 0, 0, 0, 0, err + } + if cgMemoryMax != 0 && cgMemoryMax < memoryMax { + log.Debugf("memory limited by cgroup %s: %v", path, cgMemoryMax) + memoryMax = cgMemoryMax + memoryUsed = cgMemoryUsed + } + if cgSwapMax != 0 && cgSwapMax < swapMax { + log.Debugf("swap limited by cgroup %s: %v", path, cgSwapMax) + swapMax = cgSwapMax + swapUsed = cgSwapUsed + } + path = filepath.Dir(path) + } + + return memoryMax, memoryUsed, swapMax, swapUsed, nil +} diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index 59770ec9a..61aceadaf 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -7,6 +7,9 @@ import ( "bufio" "bytes" "context" + "crypto/rand" + "encoding/base64" + "encoding/json" "io" "math/bits" "os" @@ -530,9 +533,19 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, if err != nil { return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) } - return p1o, nil + + p1odec := map[string]interface{}{} + if err := json.Unmarshal(p1o, &p1odec); err != nil { + return nil, xerrors.Errorf("unmarshaling pc1 output: %w", err) + } + + p1odec["_lotus_SealRandomness"] = ticket + + return json.Marshal(&p1odec) } +var PC2CheckRounds = 3 + func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) if err != nil { @@ -545,6 +558,50 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) } + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return storage.SectorCids{}, xerrors.Errorf("get ssize: %w", err) + } + + p1odec := map[string]interface{}{} + if err := json.Unmarshal(phase1Out, &p1odec); err != nil { + return storage.SectorCids{}, xerrors.Errorf("unmarshaling pc1 output: %w", err) + } + + var ticket abi.SealRandomness + ti, found := p1odec["_lotus_SealRandomness"] + + if found { + ticket, err = base64.StdEncoding.DecodeString(ti.(string)) + if err != nil { + return storage.SectorCids{}, xerrors.Errorf("decoding ticket: %w", err) + } + + for i := 0; i < PC2CheckRounds; i++ { + var sd [32]byte + _, _ = rand.Read(sd[:]) + + _, err := ffi.SealCommitPhase1( + sector.ProofType, + sealedCID, + unsealedCID, + paths.Cache, + paths.Sealed, + sector.ID.Number, + sector.ID.Miner, + ticket, + sd[:], + []abi.PieceInfo{{Size: abi.PaddedPieceSize(ssize), PieceCID: unsealedCID}}, + ) + if err != nil { + log.Warn("checking PreCommit failed: ", err) + log.Warnf("num:%d tkt:%v seed:%v sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, sd[:], sealedCID, unsealedCID) + + return storage.SectorCids{}, xerrors.Errorf("checking PreCommit failed: %w", err) + } + } + } + return storage.SectorCids{ Unsealed: unsealedCID, Sealed: sealedCID, diff --git a/extern/sector-storage/fr32/fr32.go b/extern/sector-storage/fr32/fr32.go index 17e6a1142..24175719c 100644 --- a/extern/sector-storage/fr32/fr32.go +++ b/extern/sector-storage/fr32/fr32.go @@ -8,7 +8,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" ) -var MTTresh = uint64(32 << 20) +var MTTresh = uint64(512 << 10) func mtChunkCount(usz abi.PaddedPieceSize) uint64 { threads := (uint64(usz)) / MTTresh diff --git a/extern/sector-storage/fr32/readers.go b/extern/sector-storage/fr32/readers.go index f14d5bf1c..163c520aa 100644 --- a/extern/sector-storage/fr32/readers.go +++ b/extern/sector-storage/fr32/readers.go @@ -16,13 +16,21 @@ type unpadReader struct { work []byte } +func BufSize(sz abi.PaddedPieceSize) int { + return int(MTTresh * mtChunkCount(sz)) +} + func NewUnpadReader(src io.Reader, sz abi.PaddedPieceSize) (io.Reader, error) { + buf := make([]byte, BufSize(sz)) + + return NewUnpadReaderBuf(src, sz, buf) +} + +func NewUnpadReaderBuf(src io.Reader, sz abi.PaddedPieceSize, buf []byte) (io.Reader, error) { if err := sz.Validate(); err != nil { return nil, xerrors.Errorf("bad piece size: %w", err) } - buf := make([]byte, MTTresh*mtChunkCount(sz)) - return &unpadReader{ src: src, diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index 430313730..fb081ee5d 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -51,13 +51,8 @@ type SectorManager interface { FaultTracker } -type WorkerID uuid.UUID // worker session UUID var ClosedWorkerID = uuid.UUID{} -func (w WorkerID) String() string { - return uuid.UUID(w).String() -} - type Manager struct { ls stores.LocalStorage storage *stores.Remote diff --git a/extern/sector-storage/manager_test.go b/extern/sector-storage/manager_test.go index d4044bbae..4a8ca5f22 100644 --- a/extern/sector-storage/manager_test.go +++ b/extern/sector-storage/manager_test.go @@ -322,7 +322,7 @@ func TestRestartWorker(t *testing.T) { defer cleanup() localTasks := []sealtasks.TaskType{ - sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + sealtasks.TTAddPiece, sealtasks.TTFetch, } wds := datastore.NewMapDatastore() @@ -332,7 +332,7 @@ func TestRestartWorker(t *testing.T) { return &testExec{apch: arch}, nil }, WorkerConfig{ TaskTypes: localTasks, - }, stor, lstor, idx, m, statestore.New(wds)) + }, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds)) err := m.AddWorker(ctx, w) require.NoError(t, err) @@ -368,7 +368,7 @@ func TestRestartWorker(t *testing.T) { return &testExec{apch: arch}, nil }, WorkerConfig{ TaskTypes: localTasks, - }, stor, lstor, idx, m, statestore.New(wds)) + }, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds)) err = m.AddWorker(ctx, w) require.NoError(t, err) @@ -404,7 +404,7 @@ func TestReenableWorker(t *testing.T) { return &testExec{apch: arch}, nil }, WorkerConfig{ TaskTypes: localTasks, - }, stor, lstor, idx, m, statestore.New(wds)) + }, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds)) err := m.AddWorker(ctx, w) require.NoError(t, err) @@ -453,3 +453,123 @@ func TestReenableWorker(t *testing.T) { i, _ = m.sched.Info(ctx) require.Len(t, i.(SchedDiagInfo).OpenWindows, 2) } + +func TestResUse(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + ctx, done := context.WithCancel(context.Background()) + defer done() + + ds := datastore.NewMapDatastore() + + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTFetch, + } + + wds := datastore.NewMapDatastore() + + arch := make(chan chan apres) + w := newLocalWorker(func() (ffiwrapper.Storage, error) { + return &testExec{apch: arch}, nil + }, WorkerConfig{ + TaskTypes: localTasks, + }, func(s string) (string, bool) { + return "", false + }, stor, lstor, idx, m, statestore.New(wds)) + + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } + + go func() { + _, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) + require.Error(t, err) + }() + +l: + for { + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + if w.MemUsedMax > 0 { + break l + } + time.Sleep(time.Millisecond) + } + } + + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + require.Equal(t, storiface.ResourceTable[sealtasks.TTAddPiece][abi.RegisteredSealProof_StackedDrg2KiBV1].MaxMemory, w.MemUsedMax) + } +} + +func TestResOverride(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + ctx, done := context.WithCancel(context.Background()) + defer done() + + ds := datastore.NewMapDatastore() + + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTFetch, + } + + wds := datastore.NewMapDatastore() + + arch := make(chan chan apres) + w := newLocalWorker(func() (ffiwrapper.Storage, error) { + return &testExec{apch: arch}, nil + }, WorkerConfig{ + TaskTypes: localTasks, + }, func(s string) (string, bool) { + if s == "AP_2K_MAX_MEMORY" { + return "99999", true + } + + return "", false + }, stor, lstor, idx, m, statestore.New(wds)) + + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } + + go func() { + _, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) + require.Error(t, err) + }() + +l: + for { + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + if w.MemUsedMax > 0 { + break l + } + time.Sleep(time.Millisecond) + } + } + + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + require.Equal(t, uint64(99999), w.MemUsedMax) + } +} diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index 273f0928e..8eaed54f6 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -12,6 +12,7 @@ import ( proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/filecoin-project/dagstore/mount" ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper" commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" @@ -384,12 +385,22 @@ func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSea } } -func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { - if offset != 0 { +func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) { + if uint64(offset) != 0 { panic("implme") } - return ioutil.NopCloser(bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size])), false, nil + br := bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size]) + + return struct { + io.ReadCloser + io.Seeker + io.ReaderAt + }{ + ReadCloser: ioutil.NopCloser(br), + Seeker: br, + ReaderAt: br, + }, false, nil } func (mgr *SectorMgr) StageFakeData(mid abi.ActorID, spt abi.RegisteredSealProof) (storage.SectorRef, []abi.PieceInfo, error) { diff --git a/extern/sector-storage/piece_provider.go b/extern/sector-storage/piece_provider.go index ad3a2543e..4622289e8 100644 --- a/extern/sector-storage/piece_provider.go +++ b/extern/sector-storage/piece_provider.go @@ -8,6 +8,7 @@ import ( "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" @@ -23,7 +24,11 @@ type Unsealer interface { type PieceProvider interface { // ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector - ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) + // pieceOffset + pieceSize specify piece bounds for unsealing (note: with SDR the entire sector will be unsealed by + // default in most cases, but this might matter with future PoRep) + // startOffset is added to the pieceOffset to get the starting reader offset. + // The number of bytes that can be read is pieceSize-startOffset + ReadPiece(ctx context.Context, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) } @@ -67,50 +72,104 @@ func (p *pieceProvider) IsUnsealed(ctx context.Context, sector storage.SectorRef // It will NOT try to schedule an Unseal of a sealed sector file for the read. // // Returns a nil reader if the piece does NOT exist in any unsealed file or there is no unsealed file for the given sector on any of the workers. -func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (io.ReadCloser, context.CancelFunc, error) { +func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, pc cid.Cid, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (mount.Reader, error) { // acquire a lock purely for reading unsealed sectors ctx, cancel := context.WithCancel(ctx) if err := p.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { cancel() - return nil, nil, xerrors.Errorf("acquiring read sector lock: %w", err) + return nil, xerrors.Errorf("acquiring read sector lock: %w", err) } - // Reader returns a reader for an unsealed piece at the given offset in the given sector. + // Reader returns a reader getter for an unsealed piece at the given offset in the given sector. // The returned reader will be nil if none of the workers has an unsealed sector file containing // the unsealed piece. - r, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(offset.Padded()), size.Padded()) + rg, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(pieceOffset.Padded()), size.Padded()) if err != nil { + cancel() log.Debugf("did not get storage reader;sector=%+v, err:%s", sector.ID, err) - cancel() - return nil, nil, err + return nil, err } - if r == nil { + if rg == nil { cancel() + return nil, nil } - return r, cancel, nil + buf := make([]byte, fr32.BufSize(size.Padded())) + + pr, err := (&pieceReader{ + ctx: ctx, + getReader: func(ctx context.Context, startOffset uint64) (io.ReadCloser, error) { + startOffsetAligned := storiface.UnpaddedByteIndex(startOffset / 127 * 127) // floor to multiple of 127 + + r, err := rg(startOffsetAligned.Padded()) + if err != nil { + return nil, xerrors.Errorf("getting reader at +%d: %w", startOffsetAligned, err) + } + + upr, err := fr32.NewUnpadReaderBuf(r, size.Padded(), buf) + if err != nil { + r.Close() // nolint + return nil, xerrors.Errorf("creating unpadded reader: %w", err) + } + + bir := bufio.NewReaderSize(upr, 127) + if startOffset > uint64(startOffsetAligned) { + if _, err := bir.Discard(int(startOffset - uint64(startOffsetAligned))); err != nil { + r.Close() // nolint + return nil, xerrors.Errorf("discarding bytes for startOffset: %w", err) + } + } + + return struct { + io.Reader + io.Closer + }{ + Reader: bir, + Closer: funcCloser(func() error { + return r.Close() + }), + }, nil + }, + len: size, + onClose: cancel, + pieceCid: pc, + }).init() + if err != nil || pr == nil { // pr == nil to make sure we don't return typed nil + cancel() + return nil, err + } + + return pr, err } +type funcCloser func() error + +func (f funcCloser) Close() error { + return f() +} + +var _ io.Closer = funcCloser(nil) + // ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector // If an Unsealed sector file exists with the Piece Unsealed in it, we'll use that for the read. // Otherwise, we will Unseal a Sealed sector file for the given sector and read the Unsealed piece from it. // If we do NOT have an existing unsealed file containing the given piece thus causing us to schedule an Unseal, // the returned boolean parameter will be set to true. // If we have an existing unsealed file containing the given piece, the returned boolean will be set to false. -func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { - if err := offset.Valid(); err != nil { - return nil, false, xerrors.Errorf("offset is not valid: %w", err) +func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) { + if err := pieceOffset.Valid(); err != nil { + return nil, false, xerrors.Errorf("pieceOffset is not valid: %w", err) } if err := size.Validate(); err != nil { return nil, false, xerrors.Errorf("size is not a valid piece size: %w", err) } - r, unlock, err := p.tryReadUnsealedPiece(ctx, sector, offset, size) + r, err := p.tryReadUnsealedPiece(ctx, unsealed, sector, pieceOffset, size) log.Debugf("result of first tryReadUnsealedPiece: r=%+v, err=%s", r, err) if xerrors.Is(err, storiface.ErrSectorNotFound) { - log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) err = nil } if err != nil { @@ -129,14 +188,14 @@ func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, if unsealed == cid.Undef { commd = nil } - if err := p.uns.SectorsUnsealPiece(ctx, sector, offset, size, ticket, commd); err != nil { + if err := p.uns.SectorsUnsealPiece(ctx, sector, pieceOffset, size, ticket, commd); err != nil { log.Errorf("failed to SectorsUnsealPiece: %s", err) return nil, false, xerrors.Errorf("unsealing piece: %w", err) } - log.Debugf("unsealed a sector file to read the piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("unsealed a sector file to read the piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) - r, unlock, err = p.tryReadUnsealedPiece(ctx, sector, offset, size) + r, err = p.tryReadUnsealedPiece(ctx, unsealed, sector, pieceOffset, size) if err != nil { log.Errorf("failed to tryReadUnsealedPiece after SectorsUnsealPiece: %s", err) return nil, true, xerrors.Errorf("read after unsealing: %w", err) @@ -145,32 +204,12 @@ func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, log.Errorf("got no reader after unsealing piece") return nil, true, xerrors.Errorf("got no reader after unsealing piece") } - log.Debugf("got a reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("got a reader to read unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) } else { - log.Debugf("unsealed piece already exists, no need to unseal, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("unsealed piece already exists, no need to unseal, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) } - upr, err := fr32.NewUnpadReader(r, size.Padded()) - if err != nil { - unlock() - return nil, uns, xerrors.Errorf("creating unpadded reader: %w", err) - } + log.Debugf("returning reader to read unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) - log.Debugf("returning reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) - - return &funcCloser{ - Reader: bufio.NewReaderSize(upr, 127), - close: func() error { - err = r.Close() - unlock() - return err - }, - }, uns, nil + return r, uns, nil } - -type funcCloser struct { - io.Reader - close func() error -} - -func (fc *funcCloser) Close() error { return fc.close() } diff --git a/extern/sector-storage/piece_provider_test.go b/extern/sector-storage/piece_provider_test.go index eb3ffa7c3..3ace2916e 100644 --- a/extern/sector-storage/piece_provider_test.go +++ b/extern/sector-storage/piece_provider_test.go @@ -7,6 +7,7 @@ import ( "math/rand" "net" "net/http" + "os" "testing" "github.com/filecoin-project/go-state-types/abi" @@ -286,7 +287,7 @@ func (p *pieceProviderTestHarness) addRemoteWorker(t *testing.T, tasks []sealtas worker := newLocalWorker(nil, WorkerConfig{ TaskTypes: tasks, - }, remote, localStore, p.index, p.mgr, csts) + }, os.LookupEnv, remote, localStore, p.index, p.mgr, csts) p.servers = append(p.servers, svc) p.localStores = append(p.localStores, localStore) diff --git a/extern/sector-storage/piece_reader.go b/extern/sector-storage/piece_reader.go new file mode 100644 index 000000000..d7a3f4e98 --- /dev/null +++ b/extern/sector-storage/piece_reader.go @@ -0,0 +1,180 @@ +package sectorstorage + +import ( + "bufio" + "context" + "io" + + "github.com/ipfs/go-cid" + "go.opencensus.io/stats" + "golang.org/x/xerrors" + + "github.com/filecoin-project/dagstore/mount" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/metrics" +) + +// For small read skips, it's faster to "burn" some bytes than to setup new sector reader. +// Assuming 1ms stream seek latency, and 1G/s stream rate, we're willing to discard up to 1 MiB. +var MaxPieceReaderBurnBytes int64 = 1 << 20 // 1M +var ReadBuf = 128 * (127 * 8) // unpadded(128k) + +type pieceGetter func(ctx context.Context, offset uint64) (io.ReadCloser, error) + +type pieceReader struct { + ctx context.Context + getReader pieceGetter + pieceCid cid.Cid + len abi.UnpaddedPieceSize + onClose context.CancelFunc + + closed bool + seqAt int64 // next byte to be read by io.Reader + + r io.ReadCloser + br *bufio.Reader + rAt int64 +} + +func (p *pieceReader) init() (_ *pieceReader, err error) { + stats.Record(p.ctx, metrics.DagStorePRInitCount.M(1)) + + p.rAt = 0 + p.r, err = p.getReader(p.ctx, uint64(p.rAt)) + if err != nil { + return nil, err + } + if p.r == nil { + return nil, nil + } + + p.br = bufio.NewReaderSize(p.r, ReadBuf) + + return p, nil +} + +func (p *pieceReader) check() error { + if p.closed { + return xerrors.Errorf("reader closed") + } + + return nil +} + +func (p *pieceReader) Close() error { + if err := p.check(); err != nil { + return err + } + + if p.r != nil { + if err := p.r.Close(); err != nil { + return err + } + if err := p.r.Close(); err != nil { + return err + } + p.r = nil + } + + p.onClose() + + p.closed = true + + return nil +} + +func (p *pieceReader) Read(b []byte) (int, error) { + if err := p.check(); err != nil { + return 0, err + } + + n, err := p.ReadAt(b, p.seqAt) + p.seqAt += int64(n) + return n, err +} + +func (p *pieceReader) Seek(offset int64, whence int) (int64, error) { + if err := p.check(); err != nil { + return 0, err + } + + switch whence { + case io.SeekStart: + p.seqAt = offset + case io.SeekCurrent: + p.seqAt += offset + case io.SeekEnd: + p.seqAt = int64(p.len) + offset + default: + return 0, xerrors.Errorf("bad whence") + } + + return p.seqAt, nil +} + +func (p *pieceReader) ReadAt(b []byte, off int64) (n int, err error) { + if err := p.check(); err != nil { + return 0, err + } + + stats.Record(p.ctx, metrics.DagStorePRBytesRequested.M(int64(len(b)))) + + // 1. Get the backing reader into the correct position + + // if the backing reader is ahead of the offset we want, or more than + // MaxPieceReaderBurnBytes behind, reset the reader + if p.r == nil || p.rAt > off || p.rAt+MaxPieceReaderBurnBytes < off { + if p.r != nil { + if err := p.r.Close(); err != nil { + return 0, xerrors.Errorf("closing backing reader: %w", err) + } + p.r = nil + p.br = nil + } + + log.Debugw("pieceReader new stream", "piece", p.pieceCid, "at", p.rAt, "off", off-p.rAt, "n", len(b)) + + if off > p.rAt { + stats.Record(p.ctx, metrics.DagStorePRSeekForwardBytes.M(off-p.rAt), metrics.DagStorePRSeekForwardCount.M(1)) + } else { + stats.Record(p.ctx, metrics.DagStorePRSeekBackBytes.M(p.rAt-off), metrics.DagStorePRSeekBackCount.M(1)) + } + + p.rAt = off + p.r, err = p.getReader(p.ctx, uint64(p.rAt)) + p.br = bufio.NewReaderSize(p.r, ReadBuf) + if err != nil { + return 0, xerrors.Errorf("getting backing reader: %w", err) + } + } + + // 2. Check if we need to burn some bytes + if off > p.rAt { + stats.Record(p.ctx, metrics.DagStorePRBytesDiscarded.M(off-p.rAt), metrics.DagStorePRDiscardCount.M(1)) + + n, err := io.CopyN(io.Discard, p.br, off-p.rAt) + p.rAt += n + if err != nil { + return 0, xerrors.Errorf("discarding read gap: %w", err) + } + } + + // 3. Sanity check + if off != p.rAt { + return 0, xerrors.Errorf("bad reader offset; requested %d; at %d", off, p.rAt) + } + + // 4. Read! + n, err = io.ReadFull(p.br, b) + if n < len(b) { + log.Debugw("pieceReader short read", "piece", p.pieceCid, "at", p.rAt, "toEnd", int64(p.len)-p.rAt, "n", len(b), "read", n, "err", err) + } + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + + p.rAt += int64(n) + return n, err +} + +var _ mount.Reader = (*pieceReader)(nil) diff --git a/extern/sector-storage/sched.go b/extern/sector-storage/sched.go index 1ffb15e5b..d7d7d3265 100644 --- a/extern/sector-storage/sched.go +++ b/extern/sector-storage/sched.go @@ -53,7 +53,7 @@ type WorkerSelector interface { type scheduler struct { workersLk sync.RWMutex - workers map[WorkerID]*workerHandle + workers map[storiface.WorkerID]*workerHandle schedule chan *workerRequest windowRequests chan *schedWindowRequest @@ -95,7 +95,7 @@ type workerHandle struct { } type schedWindowRequest struct { - worker WorkerID + worker storiface.WorkerID done chan *schedWindow } @@ -107,14 +107,14 @@ type schedWindow struct { type workerDisableReq struct { activeWindows []*schedWindow - wid WorkerID + wid storiface.WorkerID done func() } type activeResources struct { memUsedMin uint64 memUsedMax uint64 - gpuUsed bool + gpuUsed float64 cpuUse uint64 cond *sync.Cond @@ -145,7 +145,7 @@ type workerResponse struct { func newScheduler() *scheduler { return &scheduler{ - workers: map[WorkerID]*workerHandle{}, + workers: map[storiface.WorkerID]*workerHandle{}, schedule: make(chan *workerRequest), windowRequests: make(chan *schedWindowRequest, 20), @@ -378,7 +378,6 @@ func (sh *scheduler) trySched() { }() task := (*sh.schedQueue)[sqi] - needRes := ResourceTable[task.taskType][task.sector.ProofType] task.indexHeap = sqi for wnd, windowRequest := range sh.openWindows { @@ -394,6 +393,8 @@ func (sh *scheduler) trySched() { continue } + needRes := worker.info.Resources.ResourceSpec(task.sector.ProofType, task.taskType) + // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info) { continue @@ -457,7 +458,6 @@ func (sh *scheduler) trySched() { for sqi := 0; sqi < queueLen; sqi++ { task := (*sh.schedQueue)[sqi] - needRes := ResourceTable[task.taskType][task.sector.ProofType] selectedWindow := -1 for _, wnd := range acceptableWindows[task.indexHeap] { @@ -466,6 +466,8 @@ func (sh *scheduler) trySched() { log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.ID.Number, wnd) + needRes := info.Resources.ResourceSpec(task.sector.ProofType, task.taskType) + // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", info) { continue diff --git a/extern/sector-storage/sched_resources.go b/extern/sector-storage/sched_resources.go index 7c16120c2..5f7f1cfb8 100644 --- a/extern/sector-storage/sched_resources.go +++ b/extern/sector-storage/sched_resources.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerInfo, r Resources, locker sync.Locker, cb func() error) error { +func (a *activeResources) withResources(id storiface.WorkerID, wr storiface.WorkerInfo, r storiface.Resources, locker sync.Locker, cb func() error) error { for !a.canHandleRequest(r, id, "withResources", wr) { if a.cond == nil { a.cond = sync.NewCond(locker) @@ -30,20 +30,20 @@ func (a *activeResources) hasWorkWaiting() bool { return a.waiting > 0 } -func (a *activeResources) add(wr storiface.WorkerResources, r Resources) { - if r.CanGPU { - a.gpuUsed = true +func (a *activeResources) add(wr storiface.WorkerResources, r storiface.Resources) { + if r.GPUUtilization > 0 { + a.gpuUsed += r.GPUUtilization } - a.cpuUse += r.Threads(wr.CPUs) + a.cpuUse += r.Threads(wr.CPUs, len(wr.GPUs)) a.memUsedMin += r.MinMemory a.memUsedMax += r.MaxMemory } -func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { - if r.CanGPU { - a.gpuUsed = false +func (a *activeResources) free(wr storiface.WorkerResources, r storiface.Resources) { + if r.GPUUtilization > 0 { + a.gpuUsed -= r.GPUUtilization } - a.cpuUse -= r.Threads(wr.CPUs) + a.cpuUse -= r.Threads(wr.CPUs, len(wr.GPUs)) a.memUsedMin -= r.MinMemory a.memUsedMax -= r.MaxMemory @@ -54,35 +54,46 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { // canHandleRequest evaluates if the worker has enough available resources to // handle the request. -func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, caller string, info storiface.WorkerInfo) bool { +func (a *activeResources) canHandleRequest(needRes storiface.Resources, wid storiface.WorkerID, caller string, info storiface.WorkerInfo) bool { if info.IgnoreResources { // shortcircuit; if this worker is ignoring resources, it can always handle the request. return true } res := info.Resources + // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) - minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory - if minNeedMem > res.MemPhysical { - log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM", wid, caller, minNeedMem/mib, res.MemPhysical/mib) + memNeeded := needRes.MinMemory + needRes.BaseMinMemory + memUsed := a.memUsedMin + // assume that MemUsed can be swapped, so only check it in the vmem Check + memAvail := res.MemPhysical - memUsed + if memNeeded > memAvail { + log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM available", wid, caller, memNeeded/mib, memAvail/mib) return false } - maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory + vmemNeeded := needRes.MaxMemory + needRes.BaseMinMemory + vmemUsed := a.memUsedMax + workerMemoryReserved := res.MemUsed + res.MemSwapUsed // memory used outside lotus-worker (used by the OS, etc.) - if maxNeedMem > res.MemSwap+res.MemPhysical { - log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM", wid, caller, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) + if vmemUsed < workerMemoryReserved { + vmemUsed = workerMemoryReserved + } + vmemAvail := (res.MemPhysical + res.MemSwap) - vmemUsed + + if vmemNeeded > vmemAvail { + log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM available", wid, caller, vmemNeeded/mib, vmemAvail/mib) return false } - if a.cpuUse+needRes.Threads(res.CPUs) > res.CPUs { - log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs) + if a.cpuUse+needRes.Threads(res.CPUs, len(res.GPUs)) > res.CPUs { + log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs, len(res.GPUs)), a.cpuUse, res.CPUs) return false } - if len(res.GPUs) > 0 && needRes.CanGPU { - if a.gpuUsed { - log.Debugf("sched: not scheduling on worker %s for %s; GPU in use", wid, caller) + if len(res.GPUs) > 0 && needRes.GPUUtilization > 0 { + if a.gpuUsed+needRes.GPUUtilization > float64(len(res.GPUs)) { + log.Debugf("sched: not scheduling on worker %s for %s; GPU(s) in use", wid, caller) return false } } @@ -96,12 +107,21 @@ func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { cpu := float64(a.cpuUse) / float64(wr.CPUs) max = cpu - memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical) + memUsed := a.memUsedMin + if memUsed < wr.MemUsed { + memUsed = wr.MemUsed + } + memMin := float64(memUsed) / float64(wr.MemPhysical) if memMin > max { max = memMin } - memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap) + vmemUsed := a.memUsedMax + if a.memUsedMax < wr.MemUsed+wr.MemSwapUsed { + vmemUsed = wr.MemUsed + wr.MemSwapUsed + } + memMax := float64(vmemUsed) / float64(wr.MemPhysical+wr.MemSwap) + if memMax > max { max = memMax } diff --git a/extern/sector-storage/sched_test.go b/extern/sector-storage/sched_test.go index fbc4d83ee..667fabb66 100644 --- a/extern/sector-storage/sched_test.go +++ b/extern/sector-storage/sched_test.go @@ -41,14 +41,16 @@ func TestWithPriority(t *testing.T) { var decentWorkerResources = storiface.WorkerResources{ MemPhysical: 128 << 30, MemSwap: 200 << 30, - MemReserved: 2 << 30, + MemUsed: 1 << 30, + MemSwapUsed: 1 << 30, CPUs: 32, - GPUs: []string{"a GPU"}, + GPUs: []string{}, } var constrainedWorkerResources = storiface.WorkerResources{ MemPhysical: 1 << 30, - MemReserved: 2 << 30, + MemUsed: 1 << 30, + MemSwapUsed: 1 << 30, CPUs: 1, } @@ -188,6 +190,9 @@ func TestSchedStartStop(t *testing.T) { } func TestSched(t *testing.T) { + storiface.ParallelNum = 1 + storiface.ParallelDenom = 1 + ctx, done := context.WithTimeout(context.Background(), 30*time.Second) defer done() @@ -254,7 +259,9 @@ func TestSched(t *testing.T) { return nil }, noopAction) - require.NoError(t, err, fmt.Sprint(l, l2)) + if err != context.Canceled { + require.NoError(t, err, fmt.Sprint(l, l2)) + } }() <-sched.testSync @@ -299,9 +306,6 @@ func TestSched(t *testing.T) { } testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) { - ParallelNum = 1 - ParallelDenom = 1 - return func(t *testing.T) { index := stores.NewIndex() @@ -558,7 +562,7 @@ func BenchmarkTrySched(b *testing.B) { b.StopTimer() sched := newScheduler() - sched.workers[WorkerID{}] = &workerHandle{ + sched.workers[storiface.WorkerID{}] = &workerHandle{ workerRpc: nil, info: storiface.WorkerInfo{ Hostname: "t", @@ -570,7 +574,7 @@ func BenchmarkTrySched(b *testing.B) { for i := 0; i < windows; i++ { sched.openWindows = append(sched.openWindows, &schedWindowRequest{ - worker: WorkerID{}, + worker: storiface.WorkerID{}, done: make(chan *schedWindow, 1000), }) } @@ -616,7 +620,7 @@ func TestWindowCompact(t *testing.T) { taskType: task, sector: storage.SectorRef{ProofType: spt}, }) - window.allocated.add(wh.info.Resources, ResourceTable[task][spt]) + window.allocated.add(wh.info.Resources, storiface.ResourceTable[task][spt]) } wh.activeWindows = append(wh.activeWindows, window) @@ -635,7 +639,7 @@ func TestWindowCompact(t *testing.T) { for ti, task := range tasks { require.Equal(t, task, wh.activeWindows[wi].todo[ti].taskType, "%d, %d", wi, ti) - expectRes.add(wh.info.Resources, ResourceTable[task][spt]) + expectRes.add(wh.info.Resources, storiface.ResourceTable[task][spt]) } require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].allocated.cpuUse, "%d", wi) diff --git a/extern/sector-storage/sched_worker.go b/extern/sector-storage/sched_worker.go index e717e58e2..762c3fc3a 100644 --- a/extern/sector-storage/sched_worker.go +++ b/extern/sector-storage/sched_worker.go @@ -4,17 +4,18 @@ import ( "context" "time" - "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) type schedWorker struct { sched *scheduler worker *workerHandle - wid WorkerID + wid storiface.WorkerID heartbeatTimer *time.Ticker scheduledWindows chan *schedWindow @@ -50,7 +51,7 @@ func (sh *scheduler) runWorker(ctx context.Context, w Worker) error { closedMgr: make(chan struct{}), } - wid := WorkerID(sessID) + wid := storiface.WorkerID(sessID) sh.workersLk.Lock() _, exist := sh.workers[wid] @@ -237,7 +238,7 @@ func (sw *schedWorker) checkSession(ctx context.Context) bool { continue } - if WorkerID(curSes) != sw.wid { + if storiface.WorkerID(curSes) != sw.wid { if curSes != ClosedWorkerID { // worker restarted log.Warnw("worker session changed (worker restarted?)", "initial", sw.wid, "current", curSes) @@ -296,7 +297,7 @@ func (sw *schedWorker) workerCompactWindows() { var moved []int for ti, todo := range window.todo { - needRes := ResourceTable[todo.taskType][todo.sector.ProofType] + needRes := worker.info.Resources.ResourceSpec(todo.sector.ProofType, todo.taskType) if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info) { continue } @@ -357,7 +358,7 @@ assignLoop: worker.lk.Lock() for t, todo := range firstWindow.todo { - needRes := ResourceTable[todo.taskType][todo.sector.ProofType] + needRes := worker.info.Resources.ResourceSpec(todo.sector.ProofType, todo.taskType) if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) { tidx = t break @@ -418,7 +419,7 @@ assignLoop: continue } - needRes := ResourceTable[todo.taskType][todo.sector.ProofType] + needRes := storiface.ResourceTable[todo.taskType][todo.sector.ProofType] if worker.active.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) { tidx = t break @@ -456,7 +457,7 @@ assignLoop: func (sw *schedWorker) startProcessingTask(req *workerRequest) error { w, sh := sw.worker, sw.sched - needRes := ResourceTable[req.taskType][req.sector.ProofType] + needRes := w.info.Resources.ResourceSpec(req.sector.ProofType, req.taskType) w.lk.Lock() w.preparing.add(w.info.Resources, needRes) @@ -539,7 +540,7 @@ func (sw *schedWorker) startProcessingTask(req *workerRequest) error { func (sw *schedWorker) startProcessingReadyTask(req *workerRequest) error { w, sh := sw.worker, sw.sched - needRes := ResourceTable[req.taskType][req.sector.ProofType] + needRes := w.info.Resources.ResourceSpec(req.sector.ProofType, req.taskType) w.active.add(w.info.Resources, needRes) @@ -579,7 +580,7 @@ func (sw *schedWorker) startProcessingReadyTask(req *workerRequest) error { return nil } -func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) { +func (sh *scheduler) workerCleanup(wid storiface.WorkerID, w *workerHandle) { select { case <-w.closingMgr: default: diff --git a/extern/sector-storage/stores/http_handler.go b/extern/sector-storage/stores/http_handler.go index 845bfdd7b..771a9a3a1 100644 --- a/extern/sector-storage/stores/http_handler.go +++ b/extern/sector-storage/stores/http_handler.go @@ -2,7 +2,6 @@ package stores import ( "encoding/json" - "io" "net/http" "os" "strconv" @@ -85,7 +84,6 @@ func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request // remoteGetSector returns the sector file/tared directory byte stream for the sectorID and sector file type sent in the request. // returns an error if it does NOT have the required sector file/dir. func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) { - log.Infof("SERVE GET %s", r.URL) vars := mux.Vars(r) id, err := storiface.ParseSectorID(vars["id"]) @@ -139,17 +137,12 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ return } - rd, err := tarutil.TarDirectory(path) - if err != nil { - log.Errorf("%+v", err) - w.WriteHeader(500) - return - } - w.Header().Set("Content-Type", "application/x-tar") w.WriteHeader(200) - if _, err := io.CopyBuffer(w, rd, make([]byte, CopyBuf)); err != nil { - log.Errorf("%+v", err) + + err := tarutil.TarDirectory(path, w, make([]byte, CopyBuf)) + if err != nil { + log.Errorf("send tar: %+v", err) return } } else { diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index 2a37e653a..12cb26a56 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -29,6 +29,8 @@ var SkippedHeartbeatThresh = HeartbeatInterval * 5 // filesystem, local or networked / shared by multiple machines type ID string +type Group = string + type StorageInfo struct { ID ID URLs []string // TODO: Support non-http transports @@ -37,6 +39,9 @@ type StorageInfo struct { CanSeal bool CanStore bool + + Groups []Group + AllowTo []Group } type HealthReport struct { @@ -168,6 +173,8 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsS i.stores[si.ID].info.MaxStorage = si.MaxStorage i.stores[si.ID].info.CanSeal = si.CanSeal i.stores[si.ID].info.CanStore = si.CanStore + i.stores[si.ID].info.Groups = si.Groups + i.stores[si.ID].info.AllowTo = si.AllowTo return nil } @@ -292,6 +299,8 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif storageIDs := map[ID]uint64{} isprimary := map[ID]bool{} + allowTo := map[Group]struct{}{} + for _, pathType := range storiface.PathTypes { if ft&pathType == 0 { continue @@ -323,6 +332,14 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif urls[k] = rl.String() } + if allowTo != nil && len(st.info.AllowTo) > 0 { + for _, group := range st.info.AllowTo { + allowTo[group] = struct{}{} + } + } else { + allowTo = nil // allow to any + } + out = append(out, SectorStorageInfo{ ID: id, URLs: urls, @@ -365,6 +382,22 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif continue } + if allowTo != nil { + allow := false + for _, group := range st.info.Groups { + if _, found := allowTo[group]; found { + log.Debugf("path %s in allowed group %s", st.info.ID, group) + allow = true + break + } + } + + if !allow { + log.Debugf("not selecting on %s, not in allowed group, allow %+v; path has %+v", st.info.ID, allowTo, st.info.Groups) + continue + } + } + urls := make([]string, len(st.info.URLs)) for k, u := range st.info.URLs { rl, err := url.Parse(u) diff --git a/extern/sector-storage/stores/index_test.go b/extern/sector-storage/stores/index_test.go new file mode 100644 index 000000000..bb4239035 --- /dev/null +++ b/extern/sector-storage/stores/index_test.go @@ -0,0 +1,154 @@ +package stores + +import ( + "context" + "testing" + + "github.com/google/uuid" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +func init() { + logging.SetLogLevel("stores", "DEBUG") +} + +func newTestStorage() StorageInfo { + return StorageInfo{ + ID: ID(uuid.New().String()), + CanSeal: true, + CanStore: true, + Groups: nil, + AllowTo: nil, + } +} + +var bigFsStat = fsutil.FsStat{ + Capacity: 1 << 40, + Available: 1 << 40, + FSAvailable: 1 << 40, + Reserved: 0, + Max: 0, + Used: 0, +} + +const s32g = 32 << 30 + +func TestFindSimple(t *testing.T) { + ctx := context.Background() + + i := NewIndex() + stor1 := newTestStorage() + stor2 := newTestStorage() + + require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat)) + + s1 := abi.SectorID{ + Miner: 12, + Number: 34, + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 0) + } + + require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true)) + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 2) + } +} + +func TestFindNoAllow(t *testing.T) { + ctx := context.Background() + + i := NewIndex() + stor1 := newTestStorage() + stor1.AllowTo = []Group{"grp1"} + stor2 := newTestStorage() + + require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat)) + + s1 := abi.SectorID{ + Miner: 12, + Number: 34, + } + require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true)) + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } +} + +func TestFindAllow(t *testing.T) { + ctx := context.Background() + + i := NewIndex() + + stor1 := newTestStorage() + stor1.AllowTo = []Group{"grp1"} + + stor2 := newTestStorage() + stor2.Groups = []Group{"grp1"} + + stor3 := newTestStorage() + stor3.Groups = []Group{"grp2"} + + require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor3, bigFsStat)) + + s1 := abi.SectorID{ + Miner: 12, + Number: 34, + } + require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true)) + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 2) + if si[0].ID == stor1.ID { + require.Equal(t, stor1.ID, si[0].ID) + require.Equal(t, stor2.ID, si[1].ID) + } else { + require.Equal(t, stor1.ID, si[1].ID) + require.Equal(t, stor2.ID, si[0].ID) + } + } +} diff --git a/extern/sector-storage/stores/local.go b/extern/sector-storage/stores/local.go index c2e8e3df6..8121c418d 100644 --- a/extern/sector-storage/stores/local.go +++ b/extern/sector-storage/stores/local.go @@ -46,6 +46,13 @@ type LocalStorageMeta struct { // MaxStorage specifies the maximum number of bytes to use for sector storage // (0 = unlimited) MaxStorage uint64 + + // List of storage groups this path belongs to + Groups []string + + // List of storage groups to which data from this path can be moved. If none + // are specified, allow to all + AllowTo []string } // StorageConfig .lotusstorage/storage.json @@ -212,6 +219,8 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { MaxStorage: meta.MaxStorage, CanSeal: meta.CanSeal, CanStore: meta.CanStore, + Groups: meta.Groups, + AllowTo: meta.AllowTo, }, fst) if err != nil { return xerrors.Errorf("declaring storage in index: %w", err) @@ -276,6 +285,8 @@ func (st *Local) Redeclare(ctx context.Context) error { MaxStorage: meta.MaxStorage, CanSeal: meta.CanSeal, CanStore: meta.CanStore, + Groups: meta.Groups, + AllowTo: meta.AllowTo, }, fst) if err != nil { return xerrors.Errorf("redeclaring storage in index: %w", err) diff --git a/extern/sector-storage/stores/remote.go b/extern/sector-storage/stores/remote.go index aa6075e62..bd6b34be3 100644 --- a/extern/sector-storage/stores/remote.go +++ b/extern/sector-storage/stores/remote.go @@ -281,7 +281,7 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { switch mediatype { case "application/x-tar": - return tarutil.ExtractTar(resp.Body, outname) + return tarutil.ExtractTar(resp.Body, outname, make([]byte, CopyBuf)) case "application/octet-stream": f, err := os.Create(outname) if err != nil { @@ -305,7 +305,6 @@ func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.Registe return false, xerrors.Errorf("request: %w", err) } req.Header = r.auth.Clone() - fmt.Printf("req using header: %#v \n", r.auth) req = req.WithContext(ctx) resp, err := http.DefaultClient.Do(req) @@ -586,7 +585,7 @@ func (r *Remote) CheckIsUnsealed(ctx context.Context, s storage.SectorRef, offse // 1. no worker(local worker included) has an unsealed file for the given sector OR // 2. no worker(local worker included) has the unsealed piece in their unsealed sector file. // Will return a nil reader and a nil error in such a case. -func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) { +func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error), error) { ft := storiface.FTUnsealed // check if we have the unsealed sector file locally @@ -624,7 +623,52 @@ func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size a if has { log.Infof("returning piece reader for local unsealed piece sector=%+v, (offset=%d, size=%d)", s.ID, offset, size) - return r.pfHandler.Reader(pf, storiface.PaddedByteIndex(offset), size) + + return func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error) { + // don't reuse between readers unless closed + f := pf + pf = nil + + if f == nil { + f, err = r.pfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + return nil, xerrors.Errorf("opening partial file: %w", err) + } + log.Debugf("local partial file (re)opened %s (+%d,%d)", path, offset, size) + } + + r, err := r.pfHandler.Reader(f, storiface.PaddedByteIndex(offset)+startOffsetAligned, size-abi.PaddedPieceSize(startOffsetAligned)) + if err != nil { + return nil, err + } + + return struct { + io.Reader + io.Closer + }{ + Reader: r, + Closer: funcCloser(func() error { + // if we already have a reader cached, close this one + if pf != nil { + if f == nil { + return nil + } + if pf == f { + pf = nil + } + + tmp := f + f = nil + return tmp.Close() + } + + // otherwise stash it away for reuse + pf = f + return nil + }), + }, nil + }, nil + } log.Debugf("miner has unsealed file but not unseal piece, %s (+%d,%d)", path, offset, size) @@ -667,16 +711,18 @@ func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size a continue } - // readRemote fetches a reader that we can use to read the unsealed piece from the remote worker. - // It uses a ranged HTTP query to ensure we ONLY read the unsealed piece and not the entire unsealed file. - rd, err := r.readRemote(ctx, url, offset, size) - if err != nil { - log.Warnw("reading from remote", "url", url, "error", err) - lastErr = err - continue - } - log.Infof("Read remote %s (+%d,%d)", url, offset, size) - return rd, nil + return func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error) { + // readRemote fetches a reader that we can use to read the unsealed piece from the remote worker. + // It uses a ranged HTTP query to ensure we ONLY read the unsealed piece and not the entire unsealed file. + rd, err := r.readRemote(ctx, url, offset+abi.PaddedPieceSize(startOffsetAligned), size) + if err != nil { + log.Warnw("reading from remote", "url", url, "error", err) + return nil, err + } + + return rd, err + }, nil + } } @@ -693,3 +739,11 @@ func (r *Remote) Reserve(ctx context.Context, sid storage.SectorRef, ft storifac } var _ Store = &Remote{} + +type funcCloser func() error + +func (f funcCloser) Close() error { + return f() +} + +var _ io.Closer = funcCloser(nil) diff --git a/extern/sector-storage/stores/remote_test.go b/extern/sector-storage/stores/remote_test.go index ea9179655..0bc439dee 100644 --- a/extern/sector-storage/stores/remote_test.go +++ b/extern/sector-storage/stores/remote_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "io" "io/ioutil" "net/http" "net/http/httptest" @@ -470,12 +471,20 @@ func TestReader(t *testing.T) { remoteStore := stores.NewRemote(lstore, index, nil, 6000, pfhandler) - rd, err := remoteStore.Reader(ctx, sectorRef, offset, size) + rdg, err := remoteStore.Reader(ctx, sectorRef, offset, size) + var rd io.ReadCloser if tc.errStr != "" { - require.Error(t, err) - require.Nil(t, rd) - require.Contains(t, err.Error(), tc.errStr) + if rdg == nil { + require.Error(t, err) + require.Nil(t, rdg) + require.Contains(t, err.Error(), tc.errStr) + } else { + rd, err = rdg(0) + require.Error(t, err) + require.Nil(t, rd) + require.Contains(t, err.Error(), tc.errStr) + } } else { require.NoError(t, err) } @@ -483,7 +492,10 @@ func TestReader(t *testing.T) { if !tc.expectedNonNilReader { require.Nil(t, rd) } else { - require.NotNil(t, rd) + require.NotNil(t, rdg) + rd, err := rdg(0) + require.NoError(t, err) + defer func() { require.NoError(t, rd.Close()) }() diff --git a/extern/sector-storage/resources.go b/extern/sector-storage/storiface/resources.go similarity index 61% rename from extern/sector-storage/resources.go rename to extern/sector-storage/storiface/resources.go index 2e989fdf4..b5f45d722 100644 --- a/extern/sector-storage/resources.go +++ b/extern/sector-storage/storiface/resources.go @@ -1,19 +1,31 @@ -package sectorstorage +package storiface import ( - "github.com/filecoin-project/go-state-types/abi" + "fmt" + "reflect" + "strconv" + "strings" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" ) type Resources struct { - MinMemory uint64 // What Must be in RAM for decent perf - MaxMemory uint64 // Memory required (swap + ram) + MinMemory uint64 `envname:"MIN_MEMORY"` // What Must be in RAM for decent perf + MaxMemory uint64 `envname:"MAX_MEMORY"` // Memory required (swap + ram; peak memory usage during task execution) - MaxParallelism int // -1 = multithread - CanGPU bool + // GPUUtilization specifes the number of GPUs a task can use + GPUUtilization float64 `envname:"GPU_UTILIZATION"` - BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads) + // MaxParallelism specifies the number of CPU cores when GPU is NOT in use + MaxParallelism int `envname:"MAX_PARALLELISM"` // -1 = multithread + + // MaxParallelismGPU specifies the number of CPU cores when GPU is in use + MaxParallelismGPU int `envname:"MAX_PARALLELISM_GPU"` // when 0, inherits MaxParallelism + + BaseMinMemory uint64 `envname:"BASE_MIN_MEMORY"` // What Must be in RAM for decent perf (shared between threads) } /* @@ -32,8 +44,14 @@ var ParallelNum uint64 = 92 var ParallelDenom uint64 = 100 // TODO: Take NUMA into account -func (r Resources) Threads(wcpus uint64) uint64 { - if r.MaxParallelism == -1 { +func (r Resources) Threads(wcpus uint64, gpus int) uint64 { + mp := r.MaxParallelism + + if r.GPUUtilization > 0 && gpus > 0 && r.MaxParallelismGPU != 0 { // task can use GPUs and worker has some + mp = r.MaxParallelismGPU + } + + if mp == -1 { n := (wcpus * ParallelNum) / ParallelDenom if n == 0 { return wcpus @@ -41,7 +59,7 @@ func (r Resources) Threads(wcpus uint64) uint64 { return n } - return uint64(r.MaxParallelism) + return uint64(mp) } var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{ @@ -134,8 +152,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MaxMemory: 30 << 30, MinMemory: 30 << 30, - MaxParallelism: -1, - CanGPU: true, + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, BaseMinMemory: 1 << 30, }, @@ -143,8 +162,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MaxMemory: 15 << 30, MinMemory: 15 << 30, - MaxParallelism: -1, - CanGPU: true, + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, BaseMinMemory: 1 << 30, }, @@ -220,8 +240,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MaxMemory: 190 << 30, // TODO: Confirm MinMemory: 60 << 30, - MaxParallelism: -1, - CanGPU: true, + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, BaseMinMemory: 64 << 30, // params }, @@ -229,8 +250,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory MinMemory: 30 << 30, - MaxParallelism: -1, - CanGPU: true, + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, BaseMinMemory: 32 << 30, // params }, @@ -239,7 +261,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 30, MaxParallelism: 1, // This is fine - CanGPU: true, + GPUUtilization: 1.0, BaseMinMemory: 10 << 30, }, @@ -248,7 +270,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 2 << 10, MaxParallelism: 1, - CanGPU: true, + GPUUtilization: 1.0, BaseMinMemory: 2 << 10, }, @@ -257,7 +279,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 8 << 20, MaxParallelism: 1, - CanGPU: true, + GPUUtilization: 1.0, BaseMinMemory: 8 << 20, }, @@ -268,7 +290,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 20, MaxParallelism: 0, - CanGPU: false, + GPUUtilization: 0, BaseMinMemory: 0, }, @@ -277,7 +299,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 20, MaxParallelism: 0, - CanGPU: false, + GPUUtilization: 0, BaseMinMemory: 0, }, @@ -286,7 +308,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 20, MaxParallelism: 0, - CanGPU: false, + GPUUtilization: 0, BaseMinMemory: 0, }, @@ -295,7 +317,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 20, MaxParallelism: 0, - CanGPU: false, + GPUUtilization: 0, BaseMinMemory: 0, }, @@ -304,7 +326,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 20, MaxParallelism: 0, - CanGPU: false, + GPUUtilization: 0, BaseMinMemory: 0, }, @@ -323,3 +345,83 @@ func init() { m[abi.RegisteredSealProof_StackedDrg64GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg64GiBV1] } } + +func ParseResourceEnv(lookup func(key, def string) (string, bool)) (map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources, error) { + out := map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{} + + for taskType, defTT := range ResourceTable { + out[taskType] = map[abi.RegisteredSealProof]Resources{} + + for spt, defRes := range defTT { + r := defRes // copy + + spsz, err := spt.SectorSize() + if err != nil { + return nil, xerrors.Errorf("getting sector size: %w", err) + } + shortSize := strings.TrimSuffix(spsz.ShortString(), "iB") + + rr := reflect.ValueOf(&r) + for i := 0; i < rr.Elem().Type().NumField(); i++ { + f := rr.Elem().Type().Field(i) + + envname := f.Tag.Get("envname") + if envname == "" { + return nil, xerrors.Errorf("no envname for field '%s'", f.Name) + } + + envval, found := lookup(taskType.Short()+"_"+shortSize+"_"+envname, fmt.Sprint(rr.Elem().Field(i).Interface())) + if !found { + // special multicore SDR handling + if (taskType == sealtasks.TTPreCommit1 || taskType == sealtasks.TTUnseal) && envname == "MAX_PARALLELISM" { + v, ok := rr.Elem().Field(i).Addr().Interface().(*int) + if !ok { + // can't happen, but let's not panic + return nil, xerrors.Errorf("res.MAX_PARALLELISM is not int (!?): %w", err) + } + *v, err = getSDRThreads(lookup) + if err != nil { + return nil, err + } + } + + continue + } + + v := rr.Elem().Field(i).Addr().Interface() + switch fv := v.(type) { + case *uint64: + *fv, err = strconv.ParseUint(envval, 10, 64) + case *int: + *fv, err = strconv.Atoi(envval) + case *float64: + *fv, err = strconv.ParseFloat(envval, 64) + default: + return nil, xerrors.Errorf("unknown resource field type") + } + } + + out[taskType][spt] = r + } + } + + return out, nil +} + +func getSDRThreads(lookup func(key, def string) (string, bool)) (_ int, err error) { + producers := 0 + + if v, _ := lookup("FIL_PROOFS_USE_MULTICORE_SDR", ""); v == "1" { + producers = 3 + + if penv, found := lookup("FIL_PROOFS_MULTICORE_SDR_PRODUCERS", ""); found { + producers, err = strconv.Atoi(penv) + if err != nil { + return 0, xerrors.Errorf("parsing (atoi) FIL_PROOFS_MULTICORE_SDR_PRODUCERS: %w", err) + } + } + } + + // producers + the one core actually doing the work + return producers + 1, nil +} diff --git a/extern/sector-storage/storiface/resources_test.go b/extern/sector-storage/storiface/resources_test.go new file mode 100644 index 000000000..bf7425d24 --- /dev/null +++ b/extern/sector-storage/storiface/resources_test.go @@ -0,0 +1,75 @@ +package storiface + +import ( + "fmt" + "testing" + + stabi "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + "github.com/stretchr/testify/require" +) + +func TestListResourceVars(t *testing.T) { + _, err := ParseResourceEnv(func(key, def string) (string, bool) { + if def != "" { + fmt.Printf("%s=%s\n", key, def) + } + + return "", false + }) + + require.NoError(t, err) +} + +func TestListResourceOverride(t *testing.T) { + rt, err := ParseResourceEnv(func(key, def string) (string, bool) { + if key == "UNS_2K_MAX_PARALLELISM" { + return "2", true + } + if key == "PC2_2K_GPU_UTILIZATION" { + return "0.4", true + } + if key == "PC2_2K_MAX_MEMORY" { + return "2222", true + } + + return "", false + }) + + require.NoError(t, err) + require.Equal(t, 2, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + require.Equal(t, 0.4, rt[sealtasks.TTPreCommit2][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].GPUUtilization) + require.Equal(t, uint64(2222), rt[sealtasks.TTPreCommit2][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxMemory) + + // check that defaults don't get mutated + require.Equal(t, 1, ResourceTable[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) +} + +func TestListResourceSDRMulticoreOverride(t *testing.T) { + rt, err := ParseResourceEnv(func(key, def string) (string, bool) { + if key == "FIL_PROOFS_USE_MULTICORE_SDR" { + return "1", true + } + + return "", false + }) + + require.NoError(t, err) + require.Equal(t, 4, rt[sealtasks.TTPreCommit1][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + require.Equal(t, 4, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + + rt, err = ParseResourceEnv(func(key, def string) (string, bool) { + if key == "FIL_PROOFS_USE_MULTICORE_SDR" { + return "1", true + } + if key == "FIL_PROOFS_MULTICORE_SDR_PRODUCERS" { + return "9000", true + } + + return "", false + }) + + require.NoError(t, err) + require.Equal(t, 9001, rt[sealtasks.TTPreCommit1][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + require.Equal(t, 9001, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) +} diff --git a/extern/sector-storage/storiface/worker.go b/extern/sector-storage/storiface/worker.go index e3374d6cf..5889701d0 100644 --- a/extern/sector-storage/storiface/worker.go +++ b/extern/sector-storage/storiface/worker.go @@ -15,6 +15,12 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" ) +type WorkerID uuid.UUID // worker session UUID + +func (w WorkerID) String() string { + return uuid.UUID(w).String() +} + type WorkerInfo struct { Hostname string @@ -28,12 +34,35 @@ type WorkerInfo struct { type WorkerResources struct { MemPhysical uint64 + MemUsed uint64 MemSwap uint64 - - MemReserved uint64 // Used by system / other processes + MemSwapUsed uint64 CPUs uint64 // Logical cores GPUs []string + + // if nil use the default resource table + Resources map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources +} + +func (wr WorkerResources) ResourceSpec(spt abi.RegisteredSealProof, tt sealtasks.TaskType) Resources { + res := ResourceTable[tt][spt] + + // if the worker specifies custom resource table, prefer that + if wr.Resources != nil { + tr, ok := wr.Resources[tt] + if !ok { + return res + } + + r, ok := tr[spt] + if ok { + return r + } + } + + // otherwise, use the default resource table + return res } type WorkerStats struct { @@ -42,8 +71,8 @@ type WorkerStats struct { MemUsedMin uint64 MemUsedMax uint64 - GpuUsed bool // nolint - CpuUse uint64 // nolint + GpuUsed float64 // nolint + CpuUse uint64 // nolint } const ( diff --git a/extern/sector-storage/tarutil/systar.go b/extern/sector-storage/tarutil/systar.go index 2329aafc7..eb958fa02 100644 --- a/extern/sector-storage/tarutil/systar.go +++ b/extern/sector-storage/tarutil/systar.go @@ -14,7 +14,7 @@ import ( var log = logging.Logger("tarutil") // nolint -func ExtractTar(body io.Reader, dir string) error { +func ExtractTar(body io.Reader, dir string, buf []byte) error { if err := os.MkdirAll(dir, 0755); err != nil { // nolint return xerrors.Errorf("mkdir: %w", err) } @@ -38,7 +38,7 @@ func ExtractTar(body io.Reader, dir string) error { // This data is coming from a trusted source, no need to check the size. //nolint:gosec - if _, err := io.Copy(f, tr); err != nil { + if _, err := io.CopyBuffer(f, tr, buf); err != nil { return err } @@ -48,17 +48,7 @@ func ExtractTar(body io.Reader, dir string) error { } } -func TarDirectory(dir string) (io.ReadCloser, error) { - r, w := io.Pipe() - - go func() { - _ = w.CloseWithError(writeTarDirectory(dir, w)) - }() - - return r, nil -} - -func writeTarDirectory(dir string, w io.Writer) error { +func TarDirectory(dir string, w io.Writer, buf []byte) error { tw := tar.NewWriter(w) files, err := ioutil.ReadDir(dir) @@ -81,7 +71,7 @@ func writeTarDirectory(dir string, w io.Writer) error { return xerrors.Errorf("opening %s for reading: %w", file.Name(), err) } - if _, err := io.Copy(tw, f); err != nil { + if _, err := io.CopyBuffer(tw, f, buf); err != nil { return xerrors.Errorf("copy data for file %s: %w", file.Name(), err) } diff --git a/extern/sector-storage/testworker_test.go b/extern/sector-storage/testworker_test.go index 2fe99f3d4..81b1daee3 100644 --- a/extern/sector-storage/testworker_test.go +++ b/extern/sector-storage/testworker_test.go @@ -102,14 +102,15 @@ func (t *testWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { } func (t *testWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { - res := ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredSealProof_StackedDrg2KiBV1] + res := storiface.ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredSealProof_StackedDrg2KiBV1] return storiface.WorkerInfo{ Hostname: "testworkerer", Resources: storiface.WorkerResources{ MemPhysical: res.MinMemory * 3, + MemUsed: res.MinMemory, + MemSwapUsed: 0, MemSwap: 0, - MemReserved: res.MinMemory, CPUs: 32, GPUs: nil, }, diff --git a/extern/sector-storage/worker_local.go b/extern/sector-storage/worker_local.go index d45d140f8..3545c50c0 100644 --- a/extern/sector-storage/worker_local.go +++ b/extern/sector-storage/worker_local.go @@ -42,6 +42,7 @@ type WorkerConfig struct { // used do provide custom proofs impl (mostly used in testing) type ExecutorFunc func() (ffiwrapper.Storage, error) +type EnvFunc func(string) (string, bool) type LocalWorker struct { storage stores.Store @@ -50,6 +51,7 @@ type LocalWorker struct { ret storiface.WorkerReturn executor ExecutorFunc noSwap bool + envLookup EnvFunc // see equivalent field on WorkerConfig. ignoreResources bool @@ -64,7 +66,7 @@ type LocalWorker struct { closing chan struct{} } -func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { +func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, envLookup EnvFunc, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { acceptTasks := map[sealtasks.TaskType]struct{}{} for _, taskType := range wcfg.TaskTypes { acceptTasks[taskType] = struct{}{} @@ -82,6 +84,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store acceptTasks: acceptTasks, executor: executor, noSwap: wcfg.NoSwap, + envLookup: envLookup, ignoreResources: wcfg.IgnoreResourceFiltering, session: uuid.New(), closing: make(chan struct{}), @@ -115,7 +118,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store } func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { - return newLocalWorker(nil, wcfg, store, local, sindex, ret, cst) + return newLocalWorker(nil, wcfg, os.LookupEnv, store, local, sindex, ret, cst) } type localWorkerPathProvider struct { @@ -482,6 +485,52 @@ func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { return l.localStore.Local(ctx) } +func (l *LocalWorker) memInfo() (memPhysical, memUsed, memSwap, memSwapUsed uint64, err error) { + h, err := sysinfo.Host() + if err != nil { + return 0, 0, 0, 0, err + } + + mem, err := h.Memory() + if err != nil { + return 0, 0, 0, 0, err + } + memPhysical = mem.Total + // mem.Available is memory available without swapping, it is more relevant for this calculation + memUsed = mem.Total - mem.Available + memSwap = mem.VirtualTotal + memSwapUsed = mem.VirtualUsed + + if cgMemMax, cgMemUsed, cgSwapMax, cgSwapUsed, err := cgroupV1Mem(); err == nil { + if cgMemMax > 0 && cgMemMax < memPhysical { + memPhysical = cgMemMax + memUsed = cgMemUsed + } + if cgSwapMax > 0 && cgSwapMax < memSwap { + memSwap = cgSwapMax + memSwapUsed = cgSwapUsed + } + } + + if cgMemMax, cgMemUsed, cgSwapMax, cgSwapUsed, err := cgroupV2Mem(); err == nil { + if cgMemMax > 0 && cgMemMax < memPhysical { + memPhysical = cgMemMax + memUsed = cgMemUsed + } + if cgSwapMax > 0 && cgSwapMax < memSwap { + memSwap = cgSwapMax + memSwapUsed = cgSwapUsed + } + } + + if l.noSwap { + memSwap = 0 + memSwapUsed = 0 + } + + return memPhysical, memUsed, memSwap, memSwapUsed, nil +} + func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { hostname, err := os.Hostname() // TODO: allow overriding from config if err != nil { @@ -493,30 +542,29 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { log.Errorf("getting gpu devices failed: %+v", err) } - h, err := sysinfo.Host() - if err != nil { - return storiface.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err) - } - - mem, err := h.Memory() + memPhysical, memUsed, memSwap, memSwapUsed, err := l.memInfo() if err != nil { return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) } - memSwap := mem.VirtualTotal - if l.noSwap { - memSwap = 0 + resEnv, err := storiface.ParseResourceEnv(func(key, def string) (string, bool) { + return l.envLookup(key) + }) + if err != nil { + return storiface.WorkerInfo{}, xerrors.Errorf("interpreting resource env vars: %w", err) } return storiface.WorkerInfo{ Hostname: hostname, IgnoreResources: l.ignoreResources, Resources: storiface.WorkerResources{ - MemPhysical: mem.Total, + MemPhysical: memPhysical, + MemUsed: memUsed, MemSwap: memSwap, - MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process + MemSwapUsed: memSwapUsed, CPUs: uint64(runtime.NumCPU()), GPUs: gpus, + Resources: resEnv, }, }, nil } diff --git a/extern/sector-storage/worker_tracked.go b/extern/sector-storage/worker_tracked.go index 5702426c3..7a88d9bd4 100644 --- a/extern/sector-storage/worker_tracked.go +++ b/extern/sector-storage/worker_tracked.go @@ -20,7 +20,7 @@ import ( type trackedWork struct { job storiface.WorkerJob - worker WorkerID + worker storiface.WorkerID workerHostname string } @@ -58,7 +58,7 @@ func (wt *workTracker) onDone(ctx context.Context, callID storiface.CallID) { delete(wt.running, callID) } -func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid WorkerID, wi storiface.WorkerInfo, sid storage.SectorRef, task sealtasks.TaskType, cb func() (storiface.CallID, error)) (storiface.CallID, error) { +func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid storiface.WorkerID, wi storiface.WorkerInfo, sid storage.SectorRef, task sealtasks.TaskType, cb func() (storiface.CallID, error)) (storiface.CallID, error) { tracked := func(rw int, callID storiface.CallID) trackedWork { return trackedWork{ job: storiface.WorkerJob{ @@ -122,7 +122,7 @@ func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid Worke return callID, err } -func (wt *workTracker) worker(wid WorkerID, wi storiface.WorkerInfo, w Worker) *trackedWorker { +func (wt *workTracker) worker(wid storiface.WorkerID, wi storiface.WorkerInfo, w Worker) *trackedWorker { return &trackedWorker{ Worker: w, wid: wid, @@ -152,7 +152,7 @@ func (wt *workTracker) Running() ([]trackedWork, []trackedWork) { type trackedWorker struct { Worker - wid WorkerID + wid storiface.WorkerID workerInfo storiface.WorkerInfo execute chan struct{} // channel blocking execution in case we're waiting for resources but the task is ready to execute diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index 9dcb779a7..c6cd0bb49 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -595,7 +595,7 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) } if err := m.checkCommit(ctx.Context(), sector, proof, tok); err != nil { - return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("commit check error: %w", err)}) + return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)}) } } diff --git a/gateway/node.go b/gateway/node.go index 56f95a31b..a0c120d39 100644 --- a/gateway/node.go +++ b/gateway/node.go @@ -33,6 +33,8 @@ const ( // (to make it easy to mock for tests) type TargetAPI interface { Version(context.Context) (api.APIVersion, error) + ChainGetParentMessages(context.Context, cid.Cid) ([]api.Message, error) + ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error) ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) @@ -44,6 +46,7 @@ type TargetAPI interface { ChainNotify(context.Context) (<-chan []*api.HeadChange, error) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainGetGenesis(context.Context) (*types.TipSet, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) @@ -144,6 +147,14 @@ func (gw *Node) Version(ctx context.Context) (api.APIVersion, error) { return gw.target.Version(ctx) } +func (gw *Node) ChainGetParentMessages(ctx context.Context, c cid.Cid) ([]api.Message, error) { + return gw.target.ChainGetParentMessages(ctx, c) +} + +func (gw *Node) ChainGetParentReceipts(ctx context.Context, c cid.Cid) ([]*types.MessageReceipt, error) { + return gw.target.ChainGetParentReceipts(ctx, c) +} + func (gw *Node) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) { return gw.target.ChainGetBlockMessages(ctx, c) } @@ -231,6 +242,10 @@ func (gw *Node) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]* return gw.target.ChainGetPath(ctx, from, to) } +func (gw *Node) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { + return gw.target.ChainGetGenesis(ctx) +} + func (gw *Node) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { return gw.target.ChainReadObj(ctx, c) } diff --git a/gen/api/proxygen.go b/gen/api/proxygen.go index 3e0766c31..df39132ff 100644 --- a/gen/api/proxygen.go +++ b/gen/api/proxygen.go @@ -10,7 +10,6 @@ import ( "path/filepath" "strings" "text/template" - "unicode" "golang.org/x/xerrors" ) @@ -71,9 +70,6 @@ func typeName(e ast.Expr, pkg string) (string, error) { return t.X.(*ast.Ident).Name + "." + t.Sel.Name, nil case *ast.Ident: pstr := t.Name - if !unicode.IsLower(rune(pstr[0])) && pkg != "api" { - pstr = "api." + pstr // todo src pkg name - } return pstr, nil case *ast.ArrayType: subt, err := typeName(t.Elt, pkg) diff --git a/go.mod b/go.mod index b44eb9835..15586dc89 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( github.com/buger/goterm v1.0.3 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 + github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 github.com/coreos/go-systemd/v22 v22.3.2 github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/dgraph-io/badger/v2 v2.2007.2 @@ -36,11 +37,11 @@ require ( github.com/filecoin-project/go-data-transfer v1.11.4 github.com/filecoin-project/go-fil-commcid v0.1.0 github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 - github.com/filecoin-project/go-fil-markets v1.13.3 + github.com/filecoin-project/go-fil-markets v1.13.4 github.com/filecoin-project/go-jsonrpc v0.1.5 github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-paramfetch v0.0.2 - github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 + github.com/filecoin-project/go-state-types v0.1.1 github.com/filecoin-project/go-statemachine v1.0.1 github.com/filecoin-project/go-statestore v0.1.1 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b @@ -77,7 +78,7 @@ require ( github.com/ipfs/go-ds-measure v0.1.0 github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.10.4 + github.com/ipfs/go-graphsync v0.10.6 github.com/ipfs/go-ipfs-blockstore v1.0.4 github.com/ipfs/go-ipfs-blocksutil v0.0.1 github.com/ipfs/go-ipfs-chunker v0.0.5 @@ -90,6 +91,7 @@ require ( github.com/ipfs/go-ipfs-util v0.0.2 github.com/ipfs/go-ipld-cbor v0.0.5 github.com/ipfs/go-ipld-format v0.2.0 + github.com/ipfs/go-ipld-legacy v0.1.1 // indirect github.com/ipfs/go-log/v2 v2.3.0 github.com/ipfs/go-merkledag v0.4.1 github.com/ipfs/go-metrics-interface v0.0.1 @@ -98,10 +100,10 @@ require ( github.com/ipfs/go-unixfs v0.2.6 github.com/ipfs/interface-go-ipfs-core v0.4.0 github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 - github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 + github.com/ipld/go-car/v2 v2.1.0 github.com/ipld/go-codec-dagpb v1.3.0 - github.com/ipld/go-ipld-prime v0.12.3 - github.com/ipld/go-ipld-selector-text-lite v0.0.0 + github.com/ipld/go-ipld-prime v0.14.2 + github.com/ipld/go-ipld-selector-text-lite v0.0.1 github.com/kelseyhightower/envconfig v1.4.0 github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-eventbus v0.2.1 @@ -110,7 +112,6 @@ require ( github.com/libp2p/go-libp2p-core v0.9.0 github.com/libp2p/go-libp2p-discovery v0.5.1 github.com/libp2p/go-libp2p-kad-dht v0.13.0 - github.com/libp2p/go-libp2p-mplex v0.4.1 github.com/libp2p/go-libp2p-noise v0.2.2 github.com/libp2p/go-libp2p-peerstore v0.3.0 github.com/libp2p/go-libp2p-pubsub v0.5.6 @@ -128,7 +129,7 @@ require ( github.com/multiformats/go-multiaddr v0.4.1 github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multibase v0.0.3 - github.com/multiformats/go-multihash v0.0.16 + github.com/multiformats/go-multihash v0.1.0 github.com/multiformats/go-varint v0.0.6 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/opentracing/opentracing-go v1.2.0 @@ -151,14 +152,16 @@ require ( go.uber.org/fx v1.9.0 go.uber.org/multierr v1.7.0 go.uber.org/zap v1.19.1 - golang.org/x/net v0.0.0-20210917221730-978cfadd31cf + golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b // indirect + golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 + golang.org/x/sys v0.0.0-20211209171907-798191bca915 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac golang.org/x/tools v0.1.5 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible + lukechampine.com/blake3 v1.1.7 // indirect ) replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi diff --git a/go.sum b/go.sum index 2997154ac..12fe4eabf 100644 --- a/go.sum +++ b/go.sum @@ -174,6 +174,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0 h1:Fv93L3KKckEcEHR3oApXVzyBTDA8WAm6VXhPE00N3f8= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= @@ -338,8 +339,8 @@ github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+ github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.13.3 h1:iMCpG7I4fb+YLcgDnMaqZiZiyFZWNvrwHqiFPHB0/tQ= -github.com/filecoin-project/go-fil-markets v1.13.3/go.mod h1:38zuj8AgDvOfdakFLpC/syYIYgXTzkq7xqBJ6T1AuG4= +github.com/filecoin-project/go-fil-markets v1.13.4 h1:NAu+ACelR2mYsj+yJ4iLu8FGqWK50OnU5VF8axkLsSc= +github.com/filecoin-project/go-fil-markets v1.13.4/go.mod h1:aANjXD2XMHWnT2zWpyGWLsWLC24C4mHm0gRm85OpPWE= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -363,8 +364,8 @@ github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 h1:UmKkt13NrtulubqfNXhG7SQ7Pjza8BeKdNBxngqAo64= -github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1 h1:LR260vya4p++atgf256W6yV3Lxl5mKrBFcEZePWQrdg= +github.com/filecoin-project/go-state-types v0.1.1/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v1.0.1 h1:LQ60+JDVjMdLxXmVFM2jjontzOYnfVE7u02CXV3WKSw= github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= @@ -407,8 +408,9 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -743,8 +745,8 @@ github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CE github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= github.com/ipfs/go-graphsync v0.10.0/go.mod h1:cKIshzTaa5rCZjryH5xmSKZVGX9uk1wvwGvz2WEha5Y= -github.com/ipfs/go-graphsync v0.10.4 h1:1WZhyOPxgxLvHTIC2GoLltaBrjZ+JuXC2oKAEiX8f3Y= -github.com/ipfs/go-graphsync v0.10.4/go.mod h1:oei4tnWAKnZ6LPnapZGPYVVbyiKV1UP3f8BeLU7Z4JQ= +github.com/ipfs/go-graphsync v0.10.6 h1:GkYan4EoDslceHaqYo/hxktWtuZ7VmsyRXLdSmoCcBQ= +github.com/ipfs/go-graphsync v0.10.6/go.mod h1:tQMjWNDD/vSz80YLT/VvzrUmy58aF9lR1uCwSLzjWzI= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= @@ -803,8 +805,9 @@ github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dC github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= -github.com/ipfs/go-ipld-legacy v0.1.0 h1:wxkkc4k8cnvIGIjPO0waJCe7SHEyFgl+yQdafdjGrpA= github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= +github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= +github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= github.com/ipfs/go-ipns v0.1.2 h1:O/s/0ht+4Jl9+VoxoUo0zaHjnZUS+aBQIKTuzdZ/ucI= github.com/ipfs/go-ipns v0.1.2/go.mod h1:ioQ0j02o6jdIVW+bmi18f4k2gRf0AV3kZ9KeHYHICnQ= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= @@ -865,8 +868,8 @@ github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 h1:8JMSJ0k71fU9lIUrp github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823/go.mod h1:jSlTph+i/q1jLFoiKKeN69KGG0fXpwrcD0izu5C1Tpo= github.com/ipld/go-car/v2 v2.0.0-beta1.0.20210721090610-5a9d1b217d25/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= github.com/ipld/go-car/v2 v2.0.2/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 h1:6Z0beJSZNsRY+7udoqUl4gQ/tqtrPuRvDySrlsvbqZA= -github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= +github.com/ipld/go-car/v2 v2.1.0 h1:t8R/WXUSkfu1K1gpPk76mytCxsEdMjGcMIgpOq3/Cnw= +github.com/ipld/go-car/v2 v2.1.0/go.mod h1:Xr6GwkDhv8dtOtgHzOynAkIOg0t0YiPc5DxBPppWqZA= github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= github.com/ipld/go-codec-dagpb v1.3.0 h1:czTcaoAuNNyIYWs6Qe01DJ+sEX7B+1Z0LcXjSatMGe8= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= @@ -879,14 +882,15 @@ github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvB github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= github.com/ipld/go-ipld-prime v0.12.3-0.20210930132912-0b3aef3ca569/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= -github.com/ipld/go-ipld-prime v0.12.3 h1:furVobw7UBLQZwlEwfE26tYORy3PAK8VYSgZOSr3JMQ= github.com/ipld/go-ipld-prime v0.12.3/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= +github.com/ipld/go-ipld-prime v0.14.2 h1:P5fO2usnisXwrN/1sR5exCgEvINg/w/27EuYPKB/zx8= +github.com/ipld/go-ipld-prime v0.14.2/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= -github.com/ipld/go-ipld-selector-text-lite v0.0.0 h1:MLU1YUAgd3Z+RfVCXUbvxH1RQjEe+larJ9jmlW1aMgA= -github.com/ipld/go-ipld-selector-text-lite v0.0.0/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= +github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= +github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= @@ -972,8 +976,9 @@ github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -1482,8 +1487,9 @@ github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPw github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4= github.com/multiformats/go-multicodec v0.2.1-0.20210713081508-b421db6850ae/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= github.com/multiformats/go-multicodec v0.2.1-0.20210714093213-b2b5bd6fe68b/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.3.0 h1:tstDwfIjiHbnIjeM5Lp+pMrSeN+LCMsEwOrkPmWm03A= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61 h1:ZrUuMKNgJ52qHPoQ+bx0h0uBfcWmN7Px+4uKSZeesiI= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1492,8 +1498,8 @@ github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpK github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.0.16 h1:D2qsyy1WVculJbGv69pWmQ36ehxFoA5NiIUr1OEs6qI= -github.com/multiformats/go-multihash v0.0.16/go.mod h1:zhfEIgVnB/rPMfxgFw15ZmGoNaKyNUIE4IWHG/kC+Ag= +github.com/multiformats/go-multihash v0.1.0 h1:CgAgwqk3//SVEw3T+6DqI4mWMyRuDwZtOWcJT0q9+EA= +github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= @@ -1670,6 +1676,8 @@ github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -1987,8 +1995,9 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5 golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 h1:3erb+vDS8lU1sxfDHF4/hhWyaXnhIaO+7RgL4fDZORA= golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b h1:QAqMVf3pSa6eeTsuklijukjXBlj7Es2QQplab+/RbQ4= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2102,8 +2111,9 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210917221730-978cfadd31cf h1:R150MpwJIv1MpS0N/pc+NhTM8ajzvlmxlY5OYsrevXQ= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2225,8 +2235,9 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211209171907-798191bca915 h1:P+8mCzuEpyszAT6T42q0sxU+eveBAF/cJ2Kp0x6/8+0= +golang.org/x/sys v0.0.0-20211209171907-798191bca915/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= @@ -2484,6 +2495,9 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= diff --git a/itests/deals_partial_retrieval_dm-level_test.go b/itests/deals_partial_retrieval_dm-level_test.go new file mode 100644 index 000000000..fd289a0ac --- /dev/null +++ b/itests/deals_partial_retrieval_dm-level_test.go @@ -0,0 +1,252 @@ +package itests + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "testing" + "time" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + api0 "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/itests/kit" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipld/go-car" + textselector "github.com/ipld/go-ipld-selector-text-lite" + "github.com/stretchr/testify/require" +) + +// please talk to @ribasushi or @mikeal before modifying these test: there are +// downstream dependencies on ADL-less operation +var ( + adlFixtureCar = "fixtures/adl_test.car" + adlFixtureRoot, _ = cid.Parse("bafybeiaigxwanoxyeuzyiknhrg6io6kobfbm37ozcips6qdwumub2gaomy") + adlFixtureCommp, _ = cid.Parse("baga6ea4seaqjnmnrv4qsfz2rnda54mvo5al22dwpguhn2pmep63gl7bbqqqraai") + adlFixturePieceSize = abi.PaddedPieceSize(1024) + dmSelector = api.Selector("Links/0/Hash") + dmTextSelector = textselector.Expression(dmSelector) + dmExpectedResult = "NO ADL" + dmExpectedCarBlockCount = 4 + dmDagSpec = []api.DagSpec{{DataSelector: &dmSelector, ExportMerkleProof: true}} +) + +func TestDMLevelPartialRetrieval(t *testing.T) { + + ctx := context.Background() + + policy.SetPreCommitChallengeDelay(2) + kit.QuietMiningLogs() + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MockProofs()) + dh := kit.NewDealHarness(t, client, miner, miner) + ens.InterconnectAll().BeginMining(50 * time.Millisecond) + + _, err := client.ClientImport(ctx, api.FileRef{Path: adlFixtureCar, IsCAR: true}) + require.NoError(t, err) + + caddr, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + // + // test retrieval from local car 1st + require.NoError(t, testDMExportAsCar( + ctx, client, api.ExportRef{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + require.NoError(t, testDMExportAsFile( + ctx, client, api.ExportRef{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + + // + // ensure V0 continues functioning as expected + require.NoError(t, tesV0RetrievalAsCar( + ctx, client, api0.RetrievalOrder{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DatamodelPathSelector: &dmTextSelector, + }, t.TempDir(), + )) + require.NoError(t, testV0RetrievalAsFile( + ctx, client, api0.RetrievalOrder{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DatamodelPathSelector: &dmTextSelector, + }, t.TempDir(), + )) + + // + // now perform a storage/retrieval deal as well, and retest + dp := dh.DefaultStartDealParams() + dp.Data = &storagemarket.DataRef{ + Root: adlFixtureRoot, + PieceCid: &adlFixtureCommp, + PieceSize: adlFixturePieceSize.Unpadded(), + } + proposalCid := dh.StartDeal(ctx, dp) + + // Wait for the deal to reach StorageDealCheckForAcceptance on the client + cd, err := client.ClientGetDealInfo(ctx, *proposalCid) + require.NoError(t, err) + require.Eventually(t, func() bool { + cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) + return cd.State == storagemarket.StorageDealCheckForAcceptance + }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) + + dh.WaitDealSealed(ctx, proposalCid, false, false, nil) + + offers, err := client.ClientFindData(ctx, adlFixtureRoot, nil) + require.NoError(t, err) + require.NotEmpty(t, offers, "no offers") + + retOrder := offers[0].Order(caddr) + retOrder.DataSelector = &dmSelector + + rr, err := client.ClientRetrieve(ctx, retOrder) + require.NoError(t, err) + + err = client.ClientRetrieveWait(ctx, rr.DealID) + require.NoError(t, err) + + require.NoError(t, testDMExportAsCar( + ctx, client, api.ExportRef{ + DealID: rr.DealID, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + require.NoError(t, testDMExportAsFile( + ctx, client, api.ExportRef{ + DealID: rr.DealID, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + +} + +func testDMExportAsFile(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + fileDest := api.FileRef{ + Path: out.Name(), + } + err = client.ClientExport(ctx, expDirective, fileDest) + if err != nil { + return err + } + return validateDMUnixFile(out) +} +func testV0RetrievalAsFile(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + cv0 := &api0.WrapperV1Full{client.FullNode} //nolint:govet + err = cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{ + Path: out.Name(), + }) + if err != nil { + return err + } + return validateDMUnixFile(out) +} +func validateDMUnixFile(r io.Reader) error { + data, err := io.ReadAll(r) + if err != nil { + return err + } + if string(data) != dmExpectedResult { + return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data) + } + + return nil +} + +func testDMExportAsCar(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + carDest := api.FileRef{ + IsCAR: true, + Path: out.Name(), + } + err = client.ClientExport(ctx, expDirective, carDest) + if err != nil { + return err + } + + return validateDMCar(out) +} +func tesV0RetrievalAsCar(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + cv0 := &api0.WrapperV1Full{client.FullNode} //nolint:govet + err = cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{ + Path: out.Name(), + IsCAR: true, + }) + if err != nil { + return err + } + + return validateDMCar(out) +} +func validateDMCar(r io.Reader) error { + cr, err := car.NewCarReader(r) + if err != nil { + return err + } + + if len(cr.Header.Roots) != 1 { + return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots)) + } else if cr.Header.Roots[0].String() != adlFixtureRoot.String() { + return fmt.Errorf("expected root cid '%s', got '%s'", adlFixtureRoot.String(), cr.Header.Roots[0].String()) + } + + blks := make([]blocks.Block, 0) + for { + b, err := cr.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + blks = append(blks, b) + } + + if len(blks) != dmExpectedCarBlockCount { + return fmt.Errorf("expected a car file with %d blocks, got one with %d instead", dmExpectedCarBlockCount, len(blks)) + } + + data := fmt.Sprintf("%s%s", blks[2].RawData(), blks[3].RawData()) + if data != dmExpectedResult { + return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data) + } + + return nil +} diff --git a/itests/deals_partial_retrieval_test.go b/itests/deals_partial_retrieval_test.go index ffc8c5e2c..b164e70d0 100644 --- a/itests/deals_partial_retrieval_test.go +++ b/itests/deals_partial_retrieval_test.go @@ -9,6 +9,8 @@ import ( "testing" "time" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -18,7 +20,6 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipld/go-car" - textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/stretchr/testify/require" ) @@ -28,10 +29,11 @@ var ( sourceCar = "../build/genesis/mainnet.car" carRoot, _ = cid.Parse("bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2") carCommp, _ = cid.Parse("baga6ea4seaqmrivgzei3fmx5qxtppwankmtou6zvigyjaveu3z2zzwhysgzuina") + selectedCid, _ = cid.Parse("bafkqaetgnfwc6mjpon2g64tbm5sxa33xmvza") carPieceSize = abi.PaddedPieceSize(2097152) - textSelector = textselector.Expression("8/1/8/1/0/1/0") - textSelectorNonLink = textselector.Expression("8/1/8/1/0/1") - textSelectorNonexistent = textselector.Expression("42") + textSelector = api.Selector("8/1/8/1/0/1/0") + textSelectorNonLink = api.Selector("8/1/8/1/0/1") + textSelectorNonexistent = api.Selector("42") expectedResult = "fil/1/storagepower" ) @@ -53,74 +55,79 @@ func TestPartialRetrieval(t *testing.T) { require.NoError(t, err) // first test retrieval from local car, then do an actual deal - for _, fullCycle := range []bool{false, true} { + for _, exportMerkleProof := range []bool{false, true} { + for _, fullCycle := range []bool{false, true} { - var retOrder api.RetrievalOrder + var retOrder api.RetrievalOrder + var eref api.ExportRef - if !fullCycle { + if !fullCycle { + eref.FromLocalCAR = sourceCar + } else { + dp := dh.DefaultStartDealParams() + dp.Data = &storagemarket.DataRef{ + // FIXME: figure out how to do this with an online partial transfer + TransferType: storagemarket.TTManual, + Root: carRoot, + PieceCid: &carCommp, + PieceSize: carPieceSize.Unpadded(), + } + proposalCid := dh.StartDeal(ctx, dp) - retOrder.FromLocalCAR = sourceCar - retOrder.Root = carRoot + // Wait for the deal to reach StorageDealCheckForAcceptance on the client + cd, err := client.ClientGetDealInfo(ctx, *proposalCid) + require.NoError(t, err) + require.Eventually(t, func() bool { + cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) + return cd.State == storagemarket.StorageDealCheckForAcceptance + }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) - } else { + err = miner.DealsImportData(ctx, *proposalCid, sourceCar) + require.NoError(t, err) - dp := dh.DefaultStartDealParams() - dp.Data = &storagemarket.DataRef{ - // FIXME: figure out how to do this with an online partial transfer - TransferType: storagemarket.TTManual, - Root: carRoot, - PieceCid: &carCommp, - PieceSize: carPieceSize.Unpadded(), + // Wait for the deal to be published, we should be able to start retrieval right away + dh.WaitDealPublished(ctx, proposalCid) + + offers, err := client.ClientFindData(ctx, carRoot, nil) + require.NoError(t, err) + require.NotEmpty(t, offers, "no offers") + + retOrder = offers[0].Order(caddr) } - proposalCid := dh.StartDeal(ctx, dp) - // Wait for the deal to reach StorageDealCheckForAcceptance on the client - cd, err := client.ClientGetDealInfo(ctx, *proposalCid) - require.NoError(t, err) - require.Eventually(t, func() bool { - cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) - return cd.State == storagemarket.StorageDealCheckForAcceptance - }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) + retOrder.DataSelector = &textSelector + eref.DAGs = append(eref.DAGs, api.DagSpec{ + DataSelector: &textSelector, + ExportMerkleProof: exportMerkleProof, + }) + eref.Root = carRoot - err = miner.DealsImportData(ctx, *proposalCid, sourceCar) - require.NoError(t, err) + // test retrieval of either data or constructing a partial selective-car + for _, retrieveAsCar := range []bool{false, true} { + outFile, err := ioutil.TempFile(t.TempDir(), "ret-file") + require.NoError(t, err) + defer outFile.Close() //nolint:errcheck - // Wait for the deal to be published, we should be able to start retrieval right away - dh.WaitDealPublished(ctx, proposalCid) + require.NoError(t, testGenesisRetrieval( + ctx, + client, + retOrder, + eref, + &api.FileRef{ + Path: outFile.Name(), + IsCAR: retrieveAsCar, + }, + outFile, + )) - offers, err := client.ClientFindData(ctx, carRoot, nil) - require.NoError(t, err) - require.NotEmpty(t, offers, "no offers") - - retOrder = offers[0].Order(caddr) - } - - retOrder.DatamodelPathSelector = &textSelector - - // test retrieval of either data or constructing a partial selective-car - for _, retrieveAsCar := range []bool{false, true} { - outFile, err := ioutil.TempFile(t.TempDir(), "ret-file") - require.NoError(t, err) - defer outFile.Close() //nolint:errcheck - - require.NoError(t, testGenesisRetrieval( - ctx, - client, - retOrder, - &api.FileRef{ - Path: outFile.Name(), - IsCAR: retrieveAsCar, - }, - outFile, - )) - - // UGH if I do not sleep here, I get things like: - /* - retrieval failed: Retrieve failed: there is an active retrieval deal with peer 12D3KooWK9fB9a3HZ4PQLVmEQ6pweMMn5CAyKtumB71CPTnuBDi6 for payload CID bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2 (retrieval deal ID 1631259332180384709, state DealStatusFinalizingBlockstore) - existing deal must be cancelled before starting a new retrieval deal: - github.com/filecoin-project/lotus/node/impl/client.(*API).ClientRetrieve - /home/circleci/project/node/impl/client/client.go:774 - */ - time.Sleep(time.Second) + // UGH if I do not sleep here, I get things like: + /* + retrieval failed: Retrieve failed: there is an active retrieval deal with peer 12D3KooWK9fB9a3HZ4PQLVmEQ6pweMMn5CAyKtumB71CPTnuBDi6 for payload CID bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2 (retrieval deal ID 1631259332180384709, state DealStatusFinalizingBlockstore) - existing deal must be cancelled before starting a new retrieval deal: + github.com/filecoin-project/lotus/node/impl/client.(*API).ClientRetrieve + /home/circleci/project/node/impl/client/client.go:774 + */ + time.Sleep(time.Second) + } } } @@ -131,14 +138,18 @@ func TestPartialRetrieval(t *testing.T) { ctx, client, api.RetrievalOrder{ - FromLocalCAR: sourceCar, - Root: carRoot, - DatamodelPathSelector: &textSelectorNonexistent, + Root: carRoot, + DataSelector: &textSelectorNonexistent, + }, + api.ExportRef{ + Root: carRoot, + FromLocalCAR: sourceCar, + DAGs: []api.DagSpec{{DataSelector: &textSelectorNonexistent}}, }, &api.FileRef{}, nil, ), - fmt.Sprintf("retrieval failed: path selection '%s' does not match a node within %s", textSelectorNonexistent, carRoot), + fmt.Sprintf("parsing dag spec: path selection does not match a node within %s", carRoot), ) // ensure non-boundary retrievals fail @@ -148,18 +159,22 @@ func TestPartialRetrieval(t *testing.T) { ctx, client, api.RetrievalOrder{ - FromLocalCAR: sourceCar, - Root: carRoot, - DatamodelPathSelector: &textSelectorNonLink, + Root: carRoot, + DataSelector: &textSelectorNonLink, + }, + api.ExportRef{ + Root: carRoot, + FromLocalCAR: sourceCar, + DAGs: []api.DagSpec{{DataSelector: &textSelectorNonLink}}, }, &api.FileRef{}, nil, ), - fmt.Sprintf("retrieval failed: error while locating partial retrieval sub-root: unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", textSelectorNonLink), + fmt.Sprintf("parsing dag spec: error while locating partial retrieval sub-root: unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", textSelectorNonLink), ) } -func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrder api.RetrievalOrder, retRef *api.FileRef, outFile *os.File) error { +func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrder api.RetrievalOrder, eref api.ExportRef, retRef *api.FileRef, outFile *os.File) error { if retOrder.Total.Nil() { retOrder.Total = big.Zero() @@ -168,7 +183,19 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde retOrder.UnsealPrice = big.Zero() } - err := client.ClientRetrieve(ctx, retOrder, retRef) + if eref.FromLocalCAR == "" { + rr, err := client.ClientRetrieve(ctx, retOrder) + if err != nil { + return err + } + eref.DealID = rr.DealID + + if err := client.ClientRetrieveWait(ctx, rr.DealID); err != nil { + return xerrors.Errorf("retrieval wait: %w", err) + } + } + + err := client.ClientExport(ctx, eref, *retRef) if err != nil { return err } @@ -190,8 +217,10 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde if len(cr.Header.Roots) != 1 { return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots)) - } else if cr.Header.Roots[0].String() != carRoot.String() { + } else if eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != carRoot.String() { return fmt.Errorf("expected root cid '%s', got '%s'", carRoot.String(), cr.Header.Roots[0].String()) + } else if !eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != selectedCid.String() { + return fmt.Errorf("expected root cid '%s', got '%s'", selectedCid.String(), cr.Header.Roots[0].String()) } blks := make([]blocks.Block, 0) @@ -206,11 +235,11 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde blks = append(blks, b) } - if len(blks) != 3 { - return fmt.Errorf("expected a car file with 3 blocks, got one with %d instead", len(blks)) + if (eref.DAGs[0].ExportMerkleProof && len(blks) != 3) || (!eref.DAGs[0].ExportMerkleProof && len(blks) != 1) { + return fmt.Errorf("expected a car file with 3/1 blocks, got one with %d instead", len(blks)) } - data = blks[2].RawData() + data = blks[len(blks)-1].RawData() } if string(data) != expectedResult { diff --git a/itests/fixtures/adl_test.car b/itests/fixtures/adl_test.car new file mode 100644 index 000000000..d00ca0915 Binary files /dev/null and b/itests/fixtures/adl_test.car differ diff --git a/itests/kit/deals.go b/itests/kit/deals.go index 4a9af69e6..651c15901 100644 --- a/itests/kit/deals.go +++ b/itests/kit/deals.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" @@ -320,17 +321,45 @@ func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root caddr, err := dh.client.WalletDefaultAddress(ctx) require.NoError(dh.t, err) - ref := &api.FileRef{ - Path: carFile.Name(), - IsCAR: carExport, - } - - updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref) + updatesCtx, cancel := context.WithCancel(ctx) + updates, err := dh.client.ClientGetRetrievalUpdates(updatesCtx) require.NoError(dh.t, err) - for update := range updates { - require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err) + retrievalRes, err := dh.client.ClientRetrieve(ctx, offers[0].Order(caddr)) + require.NoError(dh.t, err) +consumeEvents: + for { + var evt api.RetrievalInfo + select { + case <-updatesCtx.Done(): + dh.t.Fatal("Retrieval Timed Out") + case evt = <-updates: + if evt.ID != retrievalRes.DealID { + continue + } + } + switch evt.Status { + case retrievalmarket.DealStatusCompleted: + break consumeEvents + case retrievalmarket.DealStatusRejected: + dh.t.Fatalf("Retrieval Proposal Rejected: %s", evt.Message) + case + retrievalmarket.DealStatusDealNotFound, + retrievalmarket.DealStatusErrored: + dh.t.Fatalf("Retrieval Error: %s", evt.Message) + } } + cancel() + + require.NoError(dh.t, dh.client.ClientExport(ctx, + api.ExportRef{ + Root: root, + DealID: retrievalRes.DealID, + }, + api.FileRef{ + Path: carFile.Name(), + IsCAR: carExport, + })) ret := carFile.Name() if carExport { diff --git a/lotuspond/front/package-lock.json b/lotuspond/front/package-lock.json index 8df204f2e..252a42a6d 100644 --- a/lotuspond/front/package-lock.json +++ b/lotuspond/front/package-lock.json @@ -3569,9 +3569,9 @@ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" }, "color-string": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.3.tgz", - "integrity": "sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.6.0.tgz", + "integrity": "sha512-c/hGS+kRWJutUBEngKKmk4iH3sD59MBkoxVapS/0wgpCz2u7XsNloxknyvBhzwEs1IbV36D9PwqLPJ2DTu3vMA==", "requires": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" @@ -7814,9 +7814,9 @@ "integrity": "sha512-T/zvzYRfbVojPWahDsE5evJdHb3oJoQfFbsrKM7w5Zcs++Tr257tia3BmMP8XYVjp1S9RZXQMh7gao96BlqZOw==" }, "ws": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz", - "integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.3.tgz", + "integrity": "sha512-jZArVERrMsKUatIdnLzqvcfydI85dvd/Fp1u/VOpfdDWQ4c9qWXe+VIeAbQ5FrDwciAkr+lzofXLz3Kuf26AOA==", "requires": { "async-limiter": "~1.0.0" } @@ -9194,9 +9194,9 @@ "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" }, "path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, "path-to-regexp": { "version": "0.1.7", @@ -9228,6 +9228,11 @@ "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" }, + "picocolors": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", + "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==" + }, "pify": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", @@ -9354,27 +9359,18 @@ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" }, "postcss": { - "version": "7.0.17", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.17.tgz", - "integrity": "sha512-546ZowA+KZ3OasvQZHsbuEpysvwTZNGJv9EfyCQdsIDltPSWHAeTQ5fQy/Npi2ZDtLI3zs7Ps/p6wThErhm9fQ==", + "version": "7.0.39", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", + "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", "requires": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" + "picocolors": "^0.2.1", + "source-map": "^0.6.1" }, "dependencies": { "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - }, - "supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "requires": { - "has-flag": "^3.0.0" - } } } }, @@ -11057,9 +11053,9 @@ } }, "ws": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz", - "integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.3.tgz", + "integrity": "sha512-jZArVERrMsKUatIdnLzqvcfydI85dvd/Fp1u/VOpfdDWQ4c9qWXe+VIeAbQ5FrDwciAkr+lzofXLz3Kuf26AOA==", "requires": { "async-limiter": "~1.0.0" } @@ -12203,9 +12199,9 @@ } }, "tmpl": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.4.tgz", - "integrity": "sha1-I2QN17QtAEM5ERQIIOXPRA5SHdE=" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==" }, "to-arraybuffer": { "version": "1.0.1", @@ -12523,9 +12519,9 @@ } }, "url-parse": { - "version": "1.4.7", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.4.7.tgz", - "integrity": "sha512-d3uaVyzDB9tQoSXFvuSUNFibTd9zxd2bkVrDRvF5TmvWWQwqE4lgYJ5m+x1DbecWkw+LK4RNl2CU1hHuOKPVlg==", + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.3.tgz", + "integrity": "sha512-IIORyIQD9rvj0A4CLWsHkBBJuNqWpFQe224b6j9t/ABmquIS0qDU2pY6kl6AuOrL5OkCXHMCFNe1jBcuAggjvQ==", "requires": { "querystringify": "^2.1.1", "requires-port": "^1.0.0" @@ -13164,9 +13160,9 @@ } }, "ws": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz", - "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.2.tgz", + "integrity": "sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw==", "requires": { "async-limiter": "~1.0.0" } diff --git a/markets/dagstore/miner_api.go b/markets/dagstore/miner_api.go index afe623eb2..77b4b97bf 100644 --- a/markets/dagstore/miner_api.go +++ b/markets/dagstore/miner_api.go @@ -3,34 +3,43 @@ package dagstore import ( "context" "fmt" - "io" - "github.com/filecoin-project/dagstore/throttle" "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/dagstore/mount" + "github.com/filecoin-project/dagstore/throttle" "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-state-types/abi" ) +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_lotus_accessor.go -package=mock_dagstore . MinerAPI + type MinerAPI interface { - FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) + FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) Start(ctx context.Context) error } +type SectorAccessor interface { + retrievalmarket.SectorAccessor + + UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) +} + type minerAPI struct { pieceStore piecestore.PieceStore - sa retrievalmarket.SectorAccessor + sa SectorAccessor throttle throttle.Throttler readyMgr *shared.ReadyManager } var _ MinerAPI = (*minerAPI)(nil) -func NewMinerAPI(store piecestore.PieceStore, sa retrievalmarket.SectorAccessor, concurrency int) MinerAPI { +func NewMinerAPI(store piecestore.PieceStore, sa SectorAccessor, concurrency int) MinerAPI { return &minerAPI{ pieceStore: store, sa: sa, @@ -91,7 +100,7 @@ func (m *minerAPI) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, erro return false, nil } -func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) { +func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error) { err := m.readyMgr.AwaitReady() if err != nil { return nil, err @@ -117,7 +126,7 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io deal := deal // Throttle this path to avoid flooding the storage subsystem. - var reader io.ReadCloser + var reader mount.Reader err := m.throttle.Do(ctx, func(ctx context.Context) (err error) { isUnsealed, err := m.sa.IsUnsealed(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) if err != nil { @@ -127,7 +136,7 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io return nil } // Because we know we have an unsealed copy, this UnsealSector call will actually not perform any unsealing. - reader, err = m.sa.UnsealSector(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) + reader, err = m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) return err }) @@ -149,7 +158,7 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io // block for a long time with the current PoRep // // This path is unthrottled. - reader, err := m.sa.UnsealSector(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) + reader, err := m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) if err != nil { lastErr = xerrors.Errorf("failed to unseal deal %d: %w", deal.DealID, err) log.Warn(lastErr.Error()) diff --git a/markets/dagstore/miner_api_test.go b/markets/dagstore/miner_api_test.go index 4a61c62a8..45cbf2461 100644 --- a/markets/dagstore/miner_api_test.go +++ b/markets/dagstore/miner_api_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" + "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-actors/actors/builtin/paych" @@ -203,6 +204,10 @@ type mockRPN struct { } func (m *mockRPN) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { + return m.UnsealSectorAt(ctx, sectorID, offset, length) +} + +func (m *mockRPN) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { atomic.AddInt32(&m.calls, 1) m.lk.RLock() defer m.lk.RUnlock() @@ -211,7 +216,13 @@ func (m *mockRPN) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, o if !ok { panic("sector not found") } - return io.NopCloser(bytes.NewBuffer([]byte(data))), nil + return struct { + io.ReadCloser + io.ReaderAt + io.Seeker + }{ + ReadCloser: io.NopCloser(bytes.NewBuffer([]byte(data[:]))), + }, nil } func (m *mockRPN) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { diff --git a/markets/dagstore/mocks/mock_lotus_accessor.go b/markets/dagstore/mocks/mock_lotus_accessor.go index 2e19b4482..19923cc2a 100644 --- a/markets/dagstore/mocks/mock_lotus_accessor.go +++ b/markets/dagstore/mocks/mock_lotus_accessor.go @@ -1,96 +1,96 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: lotusaccessor.go +// Source: github.com/filecoin-project/lotus/markets/dagstore (interfaces: MinerAPI) // Package mock_dagstore is a generated GoMock package. package mock_dagstore import ( context "context" - io "io" reflect "reflect" + mount "github.com/filecoin-project/dagstore/mount" gomock "github.com/golang/mock/gomock" cid "github.com/ipfs/go-cid" ) -// MockLotusAccessor is a mock of LotusAccessor interface. -type MockLotusAccessor struct { +// MockMinerAPI is a mock of MinerAPI interface. +type MockMinerAPI struct { ctrl *gomock.Controller - recorder *MockLotusAccessorMockRecorder + recorder *MockMinerAPIMockRecorder } -// MockLotusAccessorMockRecorder is the mock recorder for MockLotusAccessor. -type MockLotusAccessorMockRecorder struct { - mock *MockLotusAccessor +// MockMinerAPIMockRecorder is the mock recorder for MockMinerAPI. +type MockMinerAPIMockRecorder struct { + mock *MockMinerAPI } -// NewMockLotusAccessor creates a new mock instance. -func NewMockLotusAccessor(ctrl *gomock.Controller) *MockLotusAccessor { - mock := &MockLotusAccessor{ctrl: ctrl} - mock.recorder = &MockLotusAccessorMockRecorder{mock} +// NewMockMinerAPI creates a new mock instance. +func NewMockMinerAPI(ctrl *gomock.Controller) *MockMinerAPI { + mock := &MockMinerAPI{ctrl: ctrl} + mock.recorder = &MockMinerAPIMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockLotusAccessor) EXPECT() *MockLotusAccessorMockRecorder { +func (m *MockMinerAPI) EXPECT() *MockMinerAPIMockRecorder { return m.recorder } // FetchUnsealedPiece mocks base method. -func (m *MockLotusAccessor) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) { +func (m *MockMinerAPI) FetchUnsealedPiece(arg0 context.Context, arg1 cid.Cid) (mount.Reader, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchUnsealedPiece", ctx, pieceCid) - ret0, _ := ret[0].(io.ReadCloser) + ret := m.ctrl.Call(m, "FetchUnsealedPiece", arg0, arg1) + ret0, _ := ret[0].(mount.Reader) ret1, _ := ret[1].(error) return ret0, ret1 } // FetchUnsealedPiece indicates an expected call of FetchUnsealedPiece. -func (mr *MockLotusAccessorMockRecorder) FetchUnsealedPiece(ctx, pieceCid interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) FetchUnsealedPiece(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUnsealedPiece", reflect.TypeOf((*MockLotusAccessor)(nil).FetchUnsealedPiece), ctx, pieceCid) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUnsealedPiece", reflect.TypeOf((*MockMinerAPI)(nil).FetchUnsealedPiece), arg0, arg1) } // GetUnpaddedCARSize mocks base method. -func (m *MockLotusAccessor) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) { +func (m *MockMinerAPI) GetUnpaddedCARSize(arg0 context.Context, arg1 cid.Cid) (uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUnpaddedCARSize", ctx, pieceCid) + ret := m.ctrl.Call(m, "GetUnpaddedCARSize", arg0, arg1) ret0, _ := ret[0].(uint64) ret1, _ := ret[1].(error) return ret0, ret1 } // GetUnpaddedCARSize indicates an expected call of GetUnpaddedCARSize. -func (mr *MockLotusAccessorMockRecorder) GetUnpaddedCARSize(ctx, pieceCid interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) GetUnpaddedCARSize(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnpaddedCARSize", reflect.TypeOf((*MockLotusAccessor)(nil).GetUnpaddedCARSize), ctx, pieceCid) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnpaddedCARSize", reflect.TypeOf((*MockMinerAPI)(nil).GetUnpaddedCARSize), arg0, arg1) } // IsUnsealed mocks base method. -func (m *MockLotusAccessor) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) { +func (m *MockMinerAPI) IsUnsealed(arg0 context.Context, arg1 cid.Cid) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsUnsealed", ctx, pieceCid) + ret := m.ctrl.Call(m, "IsUnsealed", arg0, arg1) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // IsUnsealed indicates an expected call of IsUnsealed. -func (mr *MockLotusAccessorMockRecorder) IsUnsealed(ctx, pieceCid interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) IsUnsealed(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockLotusAccessor)(nil).IsUnsealed), ctx, pieceCid) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockMinerAPI)(nil).IsUnsealed), arg0, arg1) } // Start mocks base method. -func (m *MockLotusAccessor) Start(ctx context.Context) error { +func (m *MockMinerAPI) Start(arg0 context.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Start", ctx) + ret := m.ctrl.Call(m, "Start", arg0) ret0, _ := ret[0].(error) return ret0 } // Start indicates an expected call of Start. -func (mr *MockLotusAccessorMockRecorder) Start(ctx interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) Start(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockLotusAccessor)(nil).Start), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockMinerAPI)(nil).Start), arg0) } diff --git a/markets/dagstore/mount.go b/markets/dagstore/mount.go index c97dcbf86..0ecdc9808 100644 --- a/markets/dagstore/mount.go +++ b/markets/dagstore/mount.go @@ -2,7 +2,6 @@ package dagstore import ( "context" - "io" "net/url" "github.com/ipfs/go-cid" @@ -57,19 +56,15 @@ func (l *LotusMount) Deserialize(u *url.URL) error { } func (l *LotusMount) Fetch(ctx context.Context) (mount.Reader, error) { - r, err := l.API.FetchUnsealedPiece(ctx, l.PieceCid) - if err != nil { - return nil, xerrors.Errorf("failed to fetch unsealed piece %s: %w", l.PieceCid, err) - } - return &readCloser{r}, nil + return l.API.FetchUnsealedPiece(ctx, l.PieceCid) } func (l *LotusMount) Info() mount.Info { return mount.Info{ Kind: mount.KindRemote, AccessSequential: true, - AccessSeek: false, - AccessRandom: false, + AccessSeek: true, + AccessRandom: true, } } @@ -94,17 +89,3 @@ func (l *LotusMount) Stat(ctx context.Context) (mount.Stat, error) { Ready: isUnsealed, }, nil } - -type readCloser struct { - io.ReadCloser -} - -var _ mount.Reader = (*readCloser)(nil) - -func (r *readCloser) ReadAt(p []byte, off int64) (n int, err error) { - return 0, xerrors.Errorf("ReadAt called but not implemented") -} - -func (r *readCloser) Seek(offset int64, whence int) (int64, error) { - return 0, xerrors.Errorf("Seek called but not implemented") -} diff --git a/markets/dagstore/mount_test.go b/markets/dagstore/mount_test.go index 09b255d6a..d6ea54964 100644 --- a/markets/dagstore/mount_test.go +++ b/markets/dagstore/mount_test.go @@ -2,6 +2,7 @@ package dagstore import ( "context" + "io" "io/ioutil" "net/url" "strings" @@ -12,7 +13,6 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/dagstore/mount" - mock_dagstore "github.com/filecoin-project/lotus/markets/dagstore/mocks" ) @@ -26,12 +26,31 @@ func TestLotusMount(t *testing.T) { defer mockCtrl.Finish() // create a mock lotus api that returns the reader we want - mockLotusMountAPI := mock_dagstore.NewMockLotusAccessor(mockCtrl) + mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl) mockLotusMountAPI.EXPECT().IsUnsealed(gomock.Any(), cid).Return(true, nil).Times(1) - mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(&readCloser{ioutil.NopCloser(strings.NewReader("testing"))}, nil).Times(1) - mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(&readCloser{ioutil.NopCloser(strings.NewReader("testing"))}, nil).Times(1) + mr1 := struct { + io.ReadCloser + io.ReaderAt + io.Seeker + }{ + ReadCloser: ioutil.NopCloser(strings.NewReader("testing")), + ReaderAt: nil, + Seeker: nil, + } + mr2 := struct { + io.ReadCloser + io.ReaderAt + io.Seeker + }{ + ReadCloser: ioutil.NopCloser(strings.NewReader("testing")), + ReaderAt: nil, + Seeker: nil, + } + + mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr1, nil).Times(1) + mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr2, nil).Times(1) mockLotusMountAPI.EXPECT().GetUnpaddedCARSize(ctx, cid).Return(uint64(100), nil).Times(1) mnt, err := NewLotusMount(cid, mockLotusMountAPI) @@ -109,7 +128,7 @@ func TestLotusMountRegistration(t *testing.T) { // when test is done, assert expectations on all mock objects. defer mockCtrl.Finish() - mockLotusMountAPI := mock_dagstore.NewMockLotusAccessor(mockCtrl) + mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl) registry := mount.NewRegistry() err = registry.Register(lotusScheme, mountTemplate(mockLotusMountAPI)) require.NoError(t, err) diff --git a/markets/dagstore/wrapper_migration_test.go b/markets/dagstore/wrapper_migration_test.go index 13d8db876..e46f8779b 100644 --- a/markets/dagstore/wrapper_migration_test.go +++ b/markets/dagstore/wrapper_migration_test.go @@ -2,13 +2,16 @@ package dagstore import ( "context" + "io" "testing" - "github.com/filecoin-project/dagstore" "github.com/stretchr/testify/require" + "github.com/filecoin-project/dagstore" + "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" tut "github.com/filecoin-project/go-fil-markets/shared_testutil" "github.com/filecoin-project/go-fil-markets/storagemarket" @@ -93,7 +96,7 @@ func TestShardRegistration(t *testing.T) { cfg := config.DefaultStorageMiner().DAGStore cfg.RootDir = t.TempDir() - mapi := NewMinerAPI(ps, sa, 10) + mapi := NewMinerAPI(ps, &wrappedSA{sa}, 10) dagst, w, err := NewDAGStore(cfg, mapi) require.NoError(t, err) require.NotNil(t, dagst) @@ -119,3 +122,25 @@ func TestShardRegistration(t *testing.T) { // ps.VerifyExpectations(t) } + +type wrappedSA struct { + retrievalmarket.SectorAccessor +} + +func (w *wrappedSA) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { + r, err := w.UnsealSector(ctx, sectorID, pieceOffset, length) + if err != nil { + return nil, err + } + return struct { + io.ReadCloser + io.Seeker + io.ReaderAt + }{ + ReadCloser: r, + Seeker: nil, + ReaderAt: nil, + }, err +} + +var _ SectorAccessor = &wrappedSA{} diff --git a/markets/dagstore/wrapper_test.go b/markets/dagstore/wrapper_test.go index 9d3e6939e..48e01100b 100644 --- a/markets/dagstore/wrapper_test.go +++ b/markets/dagstore/wrapper_test.go @@ -3,21 +3,19 @@ package dagstore import ( "bytes" "context" - "io" "os" "testing" "time" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/dagstore" "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/dagstore/shard" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" + "github.com/filecoin-project/lotus/node/config" ) // TestWrapperAcquireRecovery verifies that if acquire shard returns a "not found" @@ -191,7 +189,7 @@ func (m mockLotusMount) Start(ctx context.Context) error { return nil } -func (m mockLotusMount) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) { +func (m mockLotusMount) FetchUnsealedPiece(context.Context, cid.Cid) (mount.Reader, error) { panic("implement me") } diff --git a/markets/sectoraccessor/sectoraccessor.go b/markets/sectoraccessor/sectoraccessor.go index 1304a3a00..4320e3fb1 100644 --- a/markets/sectoraccessor/sectoraccessor.go +++ b/markets/sectoraccessor/sectoraccessor.go @@ -4,23 +4,24 @@ import ( "context" "io" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/dagstore/mount" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/abi" + specstorage "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/markets/dagstore" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/storage/sectorblocks" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-state-types/abi" - specstorage "github.com/filecoin-project/specs-storage/storage" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("sectoraccessor") @@ -34,12 +35,16 @@ type sectorAccessor struct { var _ retrievalmarket.SectorAccessor = (*sectorAccessor)(nil) -func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.SectorAccessor { +func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) dagstore.SectorAccessor { return §orAccessor{address.Address(maddr), secb, pp, full} } -func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { - log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length) +func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { + return sa.UnsealSectorAt(ctx, sectorID, pieceOffset, length) +} + +func (sa *sectorAccessor) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { + log.Debugf("get sector %d, pieceOffset %d, length %d", sectorID, pieceOffset, length) si, err := sa.sectorsStatus(ctx, sectorID, false) if err != nil { return nil, err @@ -64,8 +69,8 @@ func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorN } // Get a reader for the piece, unsealing the piece if necessary - log.Debugf("read piece in sector %d, offset %d, length %d from miner %d", sectorID, offset, length, mid) - r, unsealed, err := sa.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.Ticket.Value, commD) + log.Debugf("read piece in sector %d, pieceOffset %d, length %d from miner %d", sectorID, pieceOffset, length, mid) + r, unsealed, err := sa.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(pieceOffset), length, si.Ticket.Value, commD) if err != nil { return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err) } diff --git a/metrics/metrics.go b/metrics/metrics.go index b969a4422..b4032bb1d 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -128,6 +128,15 @@ var ( StorageLimitUsedBytes = stats.Int64("storage/path_limit_used_bytes", "used optional storage limit bytes", stats.UnitBytes) StorageLimitMaxBytes = stats.Int64("storage/path_limit_max_bytes", "optional storage limit", stats.UnitBytes) + DagStorePRInitCount = stats.Int64("dagstore/pr_init_count", "PieceReader init count", stats.UnitDimensionless) + DagStorePRBytesRequested = stats.Int64("dagstore/pr_requested_bytes", "PieceReader requested bytes", stats.UnitBytes) + DagStorePRBytesDiscarded = stats.Int64("dagstore/pr_discarded_bytes", "PieceReader discarded bytes", stats.UnitBytes) + DagStorePRDiscardCount = stats.Int64("dagstore/pr_discard_count", "PieceReader discard count", stats.UnitDimensionless) + DagStorePRSeekBackCount = stats.Int64("dagstore/pr_seek_back_count", "PieceReader seek back count", stats.UnitDimensionless) + DagStorePRSeekForwardCount = stats.Int64("dagstore/pr_seek_forward_count", "PieceReader seek forward count", stats.UnitDimensionless) + DagStorePRSeekBackBytes = stats.Int64("dagstore/pr_seek_back_bytes", "PieceReader seek back bytes", stats.UnitBytes) + DagStorePRSeekForwardBytes = stats.Int64("dagstore/pr_seek_forward_bytes", "PieceReader seek forward bytes", stats.UnitBytes) + // splitstore SplitstoreMiss = stats.Int64("splitstore/miss", "Number of misses in hotstre access", stats.UnitDimensionless) SplitstoreCompactionTimeSeconds = stats.Float64("splitstore/compaction_time", "Compaction time in seconds", stats.UnitSeconds) @@ -142,7 +151,7 @@ var ( Description: "Lotus node information", Measure: LotusInfo, Aggregation: view.LastValue(), - TagKeys: []tag.Key{Version, Commit}, + TagKeys: []tag.Key{Version, Commit, NodeType}, } ChainNodeHeightView = &view.View{ Measure: ChainNodeHeight, @@ -383,6 +392,39 @@ var ( TagKeys: []tag.Key{StorageID}, } + DagStorePRInitCountView = &view.View{ + Measure: DagStorePRInitCount, + Aggregation: view.Count(), + } + DagStorePRBytesRequestedView = &view.View{ + Measure: DagStorePRBytesRequested, + Aggregation: view.Sum(), + } + DagStorePRBytesDiscardedView = &view.View{ + Measure: DagStorePRBytesDiscarded, + Aggregation: view.Sum(), + } + DagStorePRDiscardCountView = &view.View{ + Measure: DagStorePRDiscardCount, + Aggregation: view.Count(), + } + DagStorePRSeekBackCountView = &view.View{ + Measure: DagStorePRSeekBackCount, + Aggregation: view.Count(), + } + DagStorePRSeekForwardCountView = &view.View{ + Measure: DagStorePRSeekForwardCount, + Aggregation: view.Count(), + } + DagStorePRSeekBackBytesView = &view.View{ + Measure: DagStorePRSeekBackBytes, + Aggregation: view.Sum(), + } + DagStorePRSeekForwardBytesView = &view.View{ + Measure: DagStorePRSeekForwardBytes, + Aggregation: view.Sum(), + } + // splitstore SplitstoreMissView = &view.View{ Measure: SplitstoreMiss, @@ -539,6 +581,14 @@ var MinerNodeViews = append([]*view.View{ StorageReservedBytesView, StorageLimitUsedBytesView, StorageLimitMaxBytesView, + DagStorePRInitCountView, + DagStorePRBytesRequestedView, + DagStorePRBytesDiscardedView, + DagStorePRDiscardCountView, + DagStorePRSeekBackCountView, + DagStorePRSeekForwardCountView, + DagStorePRSeekBackBytesView, + DagStorePRSeekForwardBytesView, }, DefaultViews...) // SinceInMilliseconds returns the duration of time since the provide time as a float64. diff --git a/node/builder.go b/node/builder.go index 3f2e59503..0520d62dd 100644 --- a/node/builder.go +++ b/node/builder.go @@ -177,7 +177,7 @@ var LibP2P = Options( // Host settings Override(DefaultTransportsKey, lp2p.DefaultTransports), Override(AddrsFactoryKey, lp2p.AddrsFactory(nil, nil)), - Override(SmuxTransportKey, lp2p.SmuxTransport(true)), + Override(SmuxTransportKey, lp2p.SmuxTransport()), Override(RelayKey, lp2p.NoRelay()), Override(SecurityKey, lp2p.Security(true, false)), diff --git a/node/builder_miner.go b/node/builder_miner.go index 3447eb3e6..74b0c5558 100644 --- a/node/builder_miner.go +++ b/node/builder_miner.go @@ -155,7 +155,8 @@ func ConfigStorageMiner(c interface{}) Option { Override(DAGStoreKey, modules.DAGStore), // Markets (retrieval) - Override(new(retrievalmarket.SectorAccessor), sectoraccessor.NewSectorAccessor), + Override(new(dagstore.SectorAccessor), sectoraccessor.NewSectorAccessor), + Override(new(retrievalmarket.SectorAccessor), From(new(dagstore.SectorAccessor))), Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode), Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork), Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider), diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 199a2122d..4b6903d94 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -4,17 +4,22 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "io" "os" "sort" + "strings" "time" bstore "github.com/ipfs/go-ipfs-blockstore" + format "github.com/ipfs/go-ipld-format" unixfile "github.com/ipfs/go-unixfs/file" "github.com/ipld/go-car" + "github.com/ipld/go-car/util" carv2 "github.com/ipld/go-car/v2" carv2bs "github.com/ipld/go-car/v2/blockstore" + "github.com/ipld/go-ipld-prime/datamodel" "golang.org/x/xerrors" "github.com/filecoin-project/go-padreader" @@ -58,7 +63,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/repo/imports" @@ -760,325 +764,405 @@ func (a *API) ClientCancelRetrievalDeal(ctx context.Context, dealID rm.DealID) e } } -func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error { - events := make(chan marketevents.RetrievalEvent) - go a.clientRetrieve(ctx, order, ref, events) - - for { - select { - case evt, ok := <-events: - if !ok { // done successfully - return nil - } - - if evt.Err != "" { - return xerrors.Errorf("retrieval failed: %s", evt.Err) - } - case <-ctx.Done(): - return xerrors.Errorf("retrieval timed out") - } - } -} - -func (a *API) ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { - events := make(chan marketevents.RetrievalEvent) - go a.clientRetrieve(ctx, order, ref, events) - return events, nil -} - -type retrievalSubscribeEvent struct { - event rm.ClientEvent - state rm.ClientDealState -} - -func consumeAllEvents(ctx context.Context, dealID rm.DealID, subscribeEvents chan retrievalSubscribeEvent, events chan marketevents.RetrievalEvent) error { - for { - var subscribeEvent retrievalSubscribeEvent - select { - case <-ctx.Done(): - return xerrors.New("Retrieval Timed Out") - case subscribeEvent = <-subscribeEvents: - if subscribeEvent.state.ID != dealID { - // we can't check the deal ID ahead of time because: - // 1. We need to subscribe before retrieving. - // 2. We won't know the deal ID until after retrieving. - continue - } - } - - select { - case <-ctx.Done(): - return xerrors.New("Retrieval Timed Out") - case events <- marketevents.RetrievalEvent{ - Event: subscribeEvent.event, - Status: subscribeEvent.state.Status, - BytesReceived: subscribeEvent.state.TotalReceived, - FundsSpent: subscribeEvent.state.FundsSpent, - }: - } - - state := subscribeEvent.state - switch state.Status { - case rm.DealStatusCompleted: - return nil - case rm.DealStatusRejected: - return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message) - case rm.DealStatusCancelled: - return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message) - case - rm.DealStatusDealNotFound, - rm.DealStatusErrored: - return xerrors.Errorf("Retrieval Error: %s", state.Message) - } - } -} - -func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) { - defer close(events) - - finish := func(e error) { - if e != nil { - events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()} - } - } - +func getDataSelector(dps *api.Selector, matchPath bool) (datamodel.Node, error) { sel := selectorparse.CommonSelector_ExploreAllRecursively - if order.DatamodelPathSelector != nil { + if dps != nil { - ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + if strings.HasPrefix(string(*dps), "{") { + var err error + sel, err = selectorparse.ParseJSONSelector(string(*dps)) + if err != nil { + return nil, xerrors.Errorf("failed to parse json-selector '%s': %w", *dps, err) + } + } else { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) - selspec, err := textselector.SelectorSpecFromPath( + selspec, err := textselector.SelectorSpecFromPath( + textselector.Expression(*dps), matchPath, - *order.DatamodelPathSelector, + ssb.ExploreRecursive( + selector.RecursionLimitNone(), + ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreAll(ssb.ExploreRecursiveEdge())), + ), + ) + if err != nil { + return nil, xerrors.Errorf("failed to parse text-selector '%s': %w", *dps, err) + } - // URGH - this is a direct copy from https://github.com/filecoin-project/go-fil-markets/blob/v1.12.0/shared/selectors.go#L10-L16 - // Unable to use it because we need the SelectorSpec, and markets exposes just a reified node - ssb.ExploreRecursive( - selector.RecursionLimitNone(), - ssb.ExploreAll(ssb.ExploreRecursiveEdge()), - ), - ) - if err != nil { - finish(xerrors.Errorf("failed to parse text-selector '%s': %w", *order.DatamodelPathSelector, err)) - return + sel = selspec.Node() + log.Infof("partial retrieval of datamodel-path-selector %s/*", *dps) } - - sel = selspec.Node() - log.Infof("partial retrieval of datamodel-path-selector %s/*", *order.DatamodelPathSelector) } - // summary: - // 1. if we're retrieving from an import, FromLocalCAR will be set. - // Skip the retrieval itself, and use the provided car as a blockstore further down - // to extract a CAR or UnixFS export from. - // 2. if we're using an IPFS blockstore for retrieval, retrieve into it, - // then use the virtual blockstore to extract a CAR or UnixFS export from it. - // 3. if we have to retrieve, perform a CARv2 retrieval, then either - // extract the CARv1 (with ExtractV1File) or use it as a blockstore further down. + return sel, nil +} - // this indicates we're proxying to IPFS. +func (a *API) ClientRetrieve(ctx context.Context, params api.RetrievalOrder) (*api.RestrievalRes, error) { + sel, err := getDataSelector(params.DataSelector, false) + if err != nil { + return nil, err + } + + di, err := a.doRetrieval(ctx, params, sel) + if err != nil { + return nil, err + } + + return &api.RestrievalRes{ + DealID: di, + }, nil +} + +func (a *API) doRetrieval(ctx context.Context, order api.RetrievalOrder, sel datamodel.Node) (rm.DealID, error) { + if order.MinerPeer == nil || order.MinerPeer.ID == "" { + mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK) + if err != nil { + return 0, err + } + + order.MinerPeer = &rm.RetrievalPeer{ + ID: *mi.PeerId, + Address: order.Miner, + } + } + + if order.Total.Int == nil { + return 0, xerrors.Errorf("cannot make retrieval deal for null total") + } + + if order.Size == 0 { + return 0, xerrors.Errorf("cannot make retrieval deal for zero bytes") + } + + ppb := types.BigDiv(order.Total, types.NewInt(order.Size)) + + params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, sel, order.Piece, order.UnsealPrice) + if err != nil { + return 0, xerrors.Errorf("Error in retrieval params: %s", err) + } + + id := a.Retrieval.NextID() + id, err = a.Retrieval.Retrieve( + ctx, + id, + order.Root, + params, + order.Total, + *order.MinerPeer, + order.Client, + order.Miner, + ) + + if err != nil { + return 0, xerrors.Errorf("Retrieve failed: %w", err) + } + + return id, nil +} + +func (a *API) ClientRetrieveWait(ctx context.Context, deal rm.DealID) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + subscribeEvents := make(chan rm.ClientDealState, 1) + + unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) { + // We'll check the deal IDs inside consumeAllEvents. + if state.ID != deal { + return + } + select { + case <-ctx.Done(): + case subscribeEvents <- state: + } + }) + defer unsubscribe() + + { + state, err := a.Retrieval.GetDeal(deal) + if err != nil { + return xerrors.Errorf("getting deal state: %w", err) + } + select { + case subscribeEvents <- state: + default: // already have an event queued from the subscription + } + } + + for { + select { + case <-ctx.Done(): + return xerrors.New("Retrieval Timed Out") + case state := <-subscribeEvents: + switch state.Status { + case rm.DealStatusCompleted: + return nil + case rm.DealStatusRejected: + return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message) + case rm.DealStatusCancelled: + return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message) + case + rm.DealStatusDealNotFound, + rm.DealStatusErrored: + return xerrors.Errorf("Retrieval Error: %s", state.Message) + } + } + } +} + +type ExportDest struct { + Writer io.Writer + Path string +} + +func (ed *ExportDest) doWrite(cb func(io.Writer) error) error { + if ed.Writer != nil { + return cb(ed.Writer) + } + + f, err := os.OpenFile(ed.Path, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + + if err := cb(f); err != nil { + _ = f.Close() + return err + } + + return f.Close() +} + +func (a *API) ClientExport(ctx context.Context, exportRef api.ExportRef, ref api.FileRef) error { + return a.ClientExportInto(ctx, exportRef, ref.IsCAR, ExportDest{Path: ref.Path}) +} + +func (a *API) ClientExportInto(ctx context.Context, exportRef api.ExportRef, car bool, dest ExportDest) error { proxyBss, retrieveIntoIPFS := a.RtvlBlockstoreAccessor.(*retrievaladapter.ProxyBlockstoreAccessor) - carBss, retrieveIntoCAR := a.RtvlBlockstoreAccessor.(*retrievaladapter.CARBlockstoreAccessor) + carPath := exportRef.FromLocalCAR - carPath := order.FromLocalCAR - - // we actually need to retrieve from the network if carPath == "" { - if !retrieveIntoIPFS && !retrieveIntoCAR { - // we don't recognize the blockstore accessor. - finish(xerrors.Errorf("unsupported retrieval blockstore accessor")) - return - } - - if order.MinerPeer == nil || order.MinerPeer.ID == "" { - mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK) - if err != nil { - finish(err) - return - } - - order.MinerPeer = &rm.RetrievalPeer{ - ID: *mi.PeerId, - Address: order.Miner, - } - } - - if order.Total.Int == nil { - finish(xerrors.Errorf("cannot make retrieval deal for null total")) - return - } - - if order.Size == 0 { - finish(xerrors.Errorf("cannot make retrieval deal for zero bytes")) - return - } - - ppb := types.BigDiv(order.Total, types.NewInt(order.Size)) - - params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, sel, order.Piece, order.UnsealPrice) - if err != nil { - finish(xerrors.Errorf("Error in retrieval params: %s", err)) - return - } - - // Subscribe to events before retrieving to avoid losing events. - subscribeEvents := make(chan retrievalSubscribeEvent, 1) - subscribeCtx, cancel := context.WithCancel(ctx) - defer cancel() - unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) { - // We'll check the deal IDs inside consumeAllEvents. - if state.PayloadCID.Equals(order.Root) { - select { - case <-subscribeCtx.Done(): - case subscribeEvents <- retrievalSubscribeEvent{event, state}: - } - } - }) - - id := a.Retrieval.NextID() - id, err = a.Retrieval.Retrieve( - ctx, - id, - order.Root, - params, - order.Total, - *order.MinerPeer, - order.Client, - order.Miner, - ) - - if err != nil { - unsubscribe() - finish(xerrors.Errorf("Retrieve failed: %w", err)) - return - } - - err = consumeAllEvents(ctx, id, subscribeEvents, events) - - unsubscribe() - if err != nil { - finish(xerrors.Errorf("Retrieve: %w", err)) - return + return xerrors.Errorf("unsupported retrieval blockstore accessor") } if retrieveIntoCAR { - carPath = carBss.PathFor(id) + carPath = carBss.PathFor(exportRef.DealID) } } - if ref == nil { - // If ref is nil, it only fetches the data into the configured blockstore - // (if fetching from network). - finish(nil) - return - } - - // determine where did the retrieval go var retrievalBs bstore.Blockstore if retrieveIntoIPFS { retrievalBs = proxyBss.Blockstore } else { cbs, err := stores.ReadOnlyFilestore(carPath) if err != nil { - finish(err) - return + return err } defer cbs.Close() //nolint:errcheck retrievalBs = cbs } + dserv := merkledag.NewDAGService(blockservice.New(retrievalBs, offline.Exchange(retrievalBs))) + // Are we outputting a CAR? - if ref.IsCAR { - + if car { // not IPFS and we do full selection - just extract the CARv1 from the CARv2 we stored the retrieval in - if !retrieveIntoIPFS && order.DatamodelPathSelector == nil { - finish(carv2.ExtractV1File(carPath, ref.Path)) - return + if !retrieveIntoIPFS && len(exportRef.DAGs) == 0 && dest.Writer == nil { + return carv2.ExtractV1File(carPath, dest.Path) } - - // generating a CARv1 from the configured blockstore - f, err := os.OpenFile(ref.Path, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - finish(err) - return - } - - err = car.NewSelectiveCar( - ctx, - retrievalBs, - []car.Dag{{ - Root: order.Root, - Selector: sel, - }}, - car.MaxTraversalLinks(config.MaxTraversalLinks), - ).Write(f) - if err != nil { - finish(err) - return - } - - finish(f.Close()) - return } - // we are extracting a UnixFS file. - ds := merkledag.NewDAGService(blockservice.New(retrievalBs, offline.Exchange(retrievalBs))) - root := order.Root + roots, err := parseDagSpec(ctx, exportRef.Root, exportRef.DAGs, dserv, car) + if err != nil { + return xerrors.Errorf("parsing dag spec: %w", err) + } + if car { + return a.outputCAR(ctx, dserv, retrievalBs, exportRef.Root, roots, dest) + } - // if we used a selector - need to find the sub-root the user actually wanted to retrieve - if order.DatamodelPathSelector != nil { + if len(roots) != 1 { + return xerrors.Errorf("unixfs retrieval requires one root node, got %d", len(roots)) + } - var subRootFound bool + return a.outputUnixFS(ctx, roots[0].root, dserv, dest) +} - // no err check - we just compiled this before starting, but now we do not wrap a `*` - selspec, _ := textselector.SelectorSpecFromPath(*order.DatamodelPathSelector, nil) //nolint:errcheck +func (a *API) outputCAR(ctx context.Context, ds format.DAGService, bs bstore.Blockstore, root cid.Cid, dags []dagSpec, dest ExportDest) error { + // generating a CARv1 from the configured blockstore + roots := make([]cid.Cid, len(dags)) + for i, dag := range dags { + roots[i] = dag.root + } + + return dest.doWrite(func(w io.Writer) error { + + if err := car.WriteHeader(&car.CarHeader{ + Roots: roots, + Version: 1, + }, w); err != nil { + return fmt.Errorf("failed to write car header: %s", err) + } + + cs := cid.NewSet() + + for _, dagSpec := range dags { + if err := utils.TraverseDag( + ctx, + ds, + root, + dagSpec.selector, + func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { + if r == traversal.VisitReason_SelectionMatch { + var c cid.Cid + if p.LastBlock.Link == nil { + c = root + } else { + cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) + if !castOK { + return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link) + } + + c = cidLnk.Cid + } + + if cs.Visit(c) { + nb, err := bs.Get(c) + if err != nil { + return xerrors.Errorf("getting block data: %w", err) + } + + err = util.LdWrite(w, c.Bytes(), nb.RawData()) + if err != nil { + return xerrors.Errorf("writing block data: %w", err) + } + } + + return nil + } + return nil + }, + ); err != nil { + return xerrors.Errorf("error while traversing car dag: %w", err) + } + } + + return nil + }) +} + +func (a *API) outputUnixFS(ctx context.Context, root cid.Cid, ds format.DAGService, dest ExportDest) error { + nd, err := ds.Get(ctx, root) + if err != nil { + return xerrors.Errorf("ClientRetrieve: %w", err) + } + file, err := unixfile.NewUnixfsFile(ctx, ds, nd) + if err != nil { + return xerrors.Errorf("ClientRetrieve: %w", err) + } + + if dest.Writer == nil { + return files.WriteTo(file, dest.Path) + } + + switch f := file.(type) { + case files.File: + _, err = io.Copy(dest.Writer, f) + if err != nil { + return err + } + return nil + default: + return fmt.Errorf("file type %T is not supported", nd) + } +} + +type dagSpec struct { + root cid.Cid + selector ipld.Node +} + +func parseDagSpec(ctx context.Context, root cid.Cid, dsp []api.DagSpec, ds format.DAGService, car bool) ([]dagSpec, error) { + if len(dsp) == 0 { + return []dagSpec{ + { + root: root, + selector: nil, + }, + }, nil + } + + out := make([]dagSpec, len(dsp)) + for i, spec := range dsp { + + if spec.DataSelector == nil { + return nil, xerrors.Errorf("invalid DagSpec at position %d: `DataSelector` can not be nil", i) + } + + // reify selector + var err error + out[i].selector, err = getDataSelector(spec.DataSelector, car && spec.ExportMerkleProof) + if err != nil { + return nil, err + } + + // find the pointed-at root node within the containing ds + var rsn ipld.Node + + if strings.HasPrefix(string(*spec.DataSelector), "{") { + var err error + rsn, err = selectorparse.ParseJSONSelector(string(*spec.DataSelector)) + if err != nil { + return nil, xerrors.Errorf("failed to parse json-selector '%s': %w", *spec.DataSelector, err) + } + } else { + selspec, _ := textselector.SelectorSpecFromPath(textselector.Expression(*spec.DataSelector), car && spec.ExportMerkleProof, nil) //nolint:errcheck + rsn = selspec.Node() + } + + var newRoot cid.Cid + var errHalt = errors.New("halt walk") if err := utils.TraverseDag( ctx, ds, root, - selspec.Node(), + rsn, func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { if r == traversal.VisitReason_SelectionMatch { - - if p.LastBlock.Path.String() != p.Path.String() { + if !car && p.LastBlock.Path.String() != p.Path.String() { return xerrors.Errorf("unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", p.Path.String()) } + if p.LastBlock.Link == nil { + // this is likely the root node that we've matched here + newRoot = root + return errHalt + } + cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) if !castOK { - return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link.String()) + return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link) } - root = cidLnk.Cid - subRootFound = true + newRoot = cidLnk.Cid + + return errHalt } return nil }, - ); err != nil { - finish(xerrors.Errorf("error while locating partial retrieval sub-root: %w", err)) - return + ); err != nil && err != errHalt { + return nil, xerrors.Errorf("error while locating partial retrieval sub-root: %w", err) } - if !subRootFound { - finish(xerrors.Errorf("path selection '%s' does not match a node within %s", *order.DatamodelPathSelector, root)) - return + if newRoot == cid.Undef { + return nil, xerrors.Errorf("path selection does not match a node within %s", root) } + + out[i].root = newRoot } - nd, err := ds.Get(ctx, root) - if err != nil { - finish(xerrors.Errorf("ClientRetrieve: %w", err)) - return - } - file, err := unixfile.NewUnixfsFile(ctx, ds, nd) - if err != nil { - finish(xerrors.Errorf("ClientRetrieve: %w", err)) - return - } - - finish(files.WriteTo(file, ref.Path)) + return out, nil } func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) { @@ -1110,8 +1194,13 @@ func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, er func (a *API) ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) { updates := make(chan api.RetrievalInfo) - unsub := a.Retrieval.SubscribeToEvents(func(_ rm.ClientEvent, deal rm.ClientDealState) { - updates <- a.newRetrievalInfo(ctx, deal) + unsub := a.Retrieval.SubscribeToEvents(func(evt rm.ClientEvent, deal rm.ClientDealState) { + update := a.newRetrievalInfo(ctx, deal) + update.Event = &evt + select { + case updates <- update: + case <-ctx.Done(): + } }) go func() { diff --git a/node/impl/client/client_test.go b/node/impl/client/client_test.go index 834c980ab..bf7ff7735 100644 --- a/node/impl/client/client_test.go +++ b/node/impl/client/client_test.go @@ -60,7 +60,7 @@ func TestImportLocal(t *testing.T) { require.NoError(t, err) require.True(t, local) - order := api.RetrievalOrder{ + order := api.ExportRef{ Root: root, FromLocalCAR: it.CARPath, } @@ -68,7 +68,7 @@ func TestImportLocal(t *testing.T) { // retrieve as UnixFS. out1 := filepath.Join(dir, "retrieval1.data") // as unixfs out2 := filepath.Join(dir, "retrieval2.data") // as car - err = a.ClientRetrieve(ctx, order, &api.FileRef{ + err = a.ClientExport(ctx, order, api.FileRef{ Path: out1, }) require.NoError(t, err) @@ -77,7 +77,7 @@ func TestImportLocal(t *testing.T) { require.NoError(t, err) require.Equal(t, b, outBytes) - err = a.ClientRetrieve(ctx, order, &api.FileRef{ + err = a.ClientExport(ctx, order, api.FileRef{ Path: out2, IsCAR: true, }) diff --git a/node/impl/full/multisig.go b/node/impl/full/multisig.go index 0d20c3f03..edc67ec9e 100644 --- a/node/impl/full/multisig.go +++ b/node/impl/full/multisig.go @@ -100,7 +100,7 @@ func (a *MsigAPI) MsigAddCancel(ctx context.Context, msig address.Address, src a return nil, actErr } - return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) + return a.MsigCancelTxnHash(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (*api.MessagePrototype, error) { @@ -127,7 +127,7 @@ func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src return nil, actErr } - return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) + return a.MsigCancelTxnHash(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) { @@ -138,7 +138,11 @@ func (a *MsigAPI) MsigApproveTxnHash(ctx context.Context, msig address.Address, return a.msigApproveOrCancelTxnHash(ctx, api.MsigApprove, msig, txID, proposer, to, amt, src, method, params) } -func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { +func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) { + return a.msigApproveOrCancelSimple(ctx, api.MsigCancel, msig, txID, src) +} + +func (a *MsigAPI) MsigCancelTxnHash(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { return a.msigApproveOrCancelTxnHash(ctx, api.MsigCancel, msig, txID, src, to, amt, src, method, params) } diff --git a/node/modules/lp2p/smux.go b/node/modules/lp2p/smux.go index f5c74e18b..608467255 100644 --- a/node/modules/lp2p/smux.go +++ b/node/modules/lp2p/smux.go @@ -2,17 +2,13 @@ package lp2p import ( "os" - "strings" "github.com/libp2p/go-libp2p" - smux "github.com/libp2p/go-libp2p-core/mux" - mplex "github.com/libp2p/go-libp2p-mplex" yamux "github.com/libp2p/go-libp2p-yamux" ) -func makeSmuxTransportOption(mplexExp bool) libp2p.Option { +func makeSmuxTransportOption() libp2p.Option { const yamuxID = "/yamux/1.0.0" - const mplexID = "/mplex/6.7.0" ymxtpt := *yamux.DefaultTransport ymxtpt.AcceptBacklog = 512 @@ -21,34 +17,12 @@ func makeSmuxTransportOption(mplexExp bool) libp2p.Option { ymxtpt.LogOutput = os.Stderr } - muxers := map[string]smux.Multiplexer{yamuxID: &ymxtpt} - if mplexExp { - muxers[mplexID] = mplex.DefaultTransport - } - - // Allow muxer preference order overriding - order := []string{yamuxID, mplexID} - if prefs := os.Getenv("LIBP2P_MUX_PREFS"); prefs != "" { - order = strings.Fields(prefs) - } - - opts := make([]libp2p.Option, 0, len(order)) - for _, id := range order { - tpt, ok := muxers[id] - if !ok { - log.Warnf("unknown or duplicate muxer in LIBP2P_MUX_PREFS: %s", id) - continue - } - delete(muxers, id) - opts = append(opts, libp2p.Muxer(id, tpt)) - } - - return libp2p.ChainOptions(opts...) + return libp2p.Muxer(yamuxID, &ymxtpt) } -func SmuxTransport(mplex bool) func() (opts Libp2pOpts, err error) { +func SmuxTransport() func() (opts Libp2pOpts, err error) { return func() (opts Libp2pOpts, err error) { - opts.Opts = append(opts.Opts, makeSmuxTransportOption(mplex)) + opts.Opts = append(opts.Opts, makeSmuxTransportOption()) return } } diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 1a2dfc19f..f4d00606f 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -32,6 +32,7 @@ import ( "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/go-storedcounter" "github.com/ipfs/go-cid" @@ -683,6 +684,9 @@ func RetrievalProvider( dagStore *dagstore.Wrapper, ) (retrievalmarket.RetrievalProvider, error) { opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter)) + + retrievalmarket.DefaultPricePerByte = big.Zero() // todo: for whatever reason this is a global var in markets + return retrievalimpl.NewProvider( address.Address(maddr), adapter, diff --git a/node/modules/storageminer_dagstore.go b/node/modules/storageminer_dagstore.go index 1f72a49b9..b4f5d3535 100644 --- a/node/modules/storageminer_dagstore.go +++ b/node/modules/storageminer_dagstore.go @@ -11,8 +11,6 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - mdagstore "github.com/filecoin-project/lotus/markets/dagstore" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -25,7 +23,7 @@ const ( ) // NewMinerAPI creates a new MinerAPI adaptor for the dagstore mounts. -func NewMinerAPI(lc fx.Lifecycle, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, sa retrievalmarket.SectorAccessor) (mdagstore.MinerAPI, error) { +func NewMinerAPI(lc fx.Lifecycle, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, sa mdagstore.SectorAccessor) (mdagstore.MinerAPI, error) { cfg, err := extractDAGStoreConfig(r) if err != nil { return nil, err diff --git a/node/rpc.go b/node/rpc.go index 9bcdb7388..6a3e55115 100644 --- a/node/rpc.go +++ b/node/rpc.go @@ -27,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/metrics/proxy" "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/impl/client" ) var rpclog = logging.Logger("rpc") @@ -89,14 +90,22 @@ func FullNodeHandler(a v1api.FullNode, permissioned bool, opts ...jsonrpc.Server // Import handler handleImportFunc := handleImport(a.(*impl.FullNodeAPI)) + handleExportFunc := handleExport(a.(*impl.FullNodeAPI)) if permissioned { importAH := &auth.Handler{ Verify: a.AuthVerify, Next: handleImportFunc, } m.Handle("/rest/v0/import", importAH) + + exportAH := &auth.Handler{ + Verify: a.AuthVerify, + Next: handleExportFunc, + } + m.Handle("/rest/v0/export", exportAH) } else { m.HandleFunc("/rest/v0/import", handleImportFunc) + m.HandleFunc("/rest/v0/export", handleExportFunc) } // debugging @@ -169,6 +178,34 @@ func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Reque } } +func handleExport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(404) + return + } + if !auth.HasPerm(r.Context(), nil, api.PermWrite) { + w.WriteHeader(401) + _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) + return + } + + var eref api.ExportRef + if err := json.Unmarshal([]byte(r.FormValue("export")), &eref); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + car := r.FormValue("car") == "true" + + err := a.ClientExportInto(r.Context(), eref, car, client.ExportDest{Writer: w}) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } +} + func handleFractionOpt(name string, setter func(int)) http.HandlerFunc { return func(rw http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { diff --git a/scripts/archive-branches.sh b/scripts/archive-branches.sh index 98fdfaeb8..7eb680641 100755 --- a/scripts/archive-branches.sh +++ b/scripts/archive-branches.sh @@ -9,10 +9,12 @@ api_repo="repos/$org/$repo" exclusions=( 'master' + 'main' + 'releases' ) gh_api_next() { - links=$(grep '^Link:' | sed -e 's/Link: //' -e 's/, /\n/g') + links=$(grep '^link:' | sed -e 's/link: //' -e 's/, /\n/g') echo "$links" | grep '; rel="next"' >/dev/null || return link=$(echo "$links" | grep '; rel="next"' | sed -e 's/^.*//') @@ -43,7 +45,7 @@ active_branches() { git remote add archived "git@github.com:$arch_repo.git" || true -branches_to_move="$(cat <(active_branches) <(pr_branches) <((IFS=$'\n'; echo "${exclusions[*]}")) | sort -u | comm - <(origin_refs | sort) -13)" +branches_to_move="$(cat <(active_branches) <(pr_branches) <((IFS=$'\n'; echo "${exclusions[*]}")) | sort -u | comm - <(origin_refs | sort) -13 | grep -v -e '^release/' -e '^ntwk-')" echo "================" printf "%s\n" "$branches_to_move" diff --git a/scripts/build-bundle.sh b/scripts/build-bundle.sh index fe1c88611..550c80554 100755 --- a/scripts/build-bundle.sh +++ b/scripts/build-bundle.sh @@ -49,7 +49,4 @@ do ipfs add -q "lotus_${CIRCLE_TAG}_${ARCH}-amd64.tar.gz" > "lotus_${CIRCLE_TAG}_${ARCH}-amd64.tar.gz.cid" done -cp "../appimage/Lotus-${CIRCLE_TAG}-x86_64.AppImage" . -sha512sum "Lotus-${CIRCLE_TAG}-x86_64.AppImage" > "Lotus-${CIRCLE_TAG}-x86_64.AppImage.sha512" -ipfs add -q "Lotus-${CIRCLE_TAG}-x86_64.AppImage" > "Lotus-${CIRCLE_TAG}-x86_64.AppImage.cid" -popd +popd \ No newline at end of file diff --git a/scripts/docker-lotus-miner-entrypoint.sh b/scripts/docker-lotus-miner-entrypoint.sh index 8cdbaecce..a8f2a5540 100755 --- a/scripts/docker-lotus-miner-entrypoint.sh +++ b/scripts/docker-lotus-miner-entrypoint.sh @@ -1,19 +1,24 @@ #!/usr/bin/env bash if [ ! -z $DOCKER_LOTUS_MINER_INIT ]; then - GATE="$LOTUS_PATH"/date_initialized + GATE="${LOTUS_MINER_PATH}/date_initialized" # Don't init if already initialized. - if [ -f "$GATE" ]; then + if [ ! -f "${GATE}" ]; then + echo starting init + eval "/usr/local/bin/lotus-miner init ${DOCKER_LOTUS_MINER_INIT_ARGS}" + if [ $? == 0 ] + then + echo lotus-miner init successful + date > "$GATE" + else + echo lotus-miner init unsuccessful + exit 1 + fi + else echo lotus-miner already initialized. - exit 0 fi - echo starting init - /usr/local/bin/lotus-miner init - - # Block future inits - date > "$GATE" fi exec /usr/local/bin/lotus-miner $@ diff --git a/scripts/generate-lotus-cli.py b/scripts/generate-lotus-cli.py index 1d33687ae..7999603b2 100644 --- a/scripts/generate-lotus-cli.py +++ b/scripts/generate-lotus-cli.py @@ -31,12 +31,11 @@ def generate_lotus_cli(prog): if cmd_flag is True and line == '': cmd_flag = False if cmd_flag is True and line[-1] != ':' and 'help, h' not in line: - gap_pos = 0 + gap_pos = None sub_cmd = line if ' ' in line: gap_pos = sub_cmd.index(' ') - if gap_pos: - sub_cmd = cur_cmd + ' ' + sub_cmd[:gap_pos] + sub_cmd = cur_cmd + ' ' + sub_cmd[:gap_pos] get_cmd_recursively(sub_cmd) except Exception as e: print('Fail to deal with "%s" with error:\n%s' % (line, e)) diff --git a/scripts/publish-release.sh b/scripts/publish-release.sh index 22572de60..ad2a52dcf 100755 --- a/scripts/publish-release.sh +++ b/scripts/publish-release.sh @@ -68,9 +68,6 @@ artifacts=( "lotus_${CIRCLE_TAG}_darwin-amd64.tar.gz" "lotus_${CIRCLE_TAG}_darwin-amd64.tar.gz.cid" "lotus_${CIRCLE_TAG}_darwin-amd64.tar.gz.sha512" - "Lotus-${CIRCLE_TAG}-x86_64.AppImage" - "Lotus-${CIRCLE_TAG}-x86_64.AppImage.cid" - "Lotus-${CIRCLE_TAG}-x86_64.AppImage.sha512" ) for RELEASE_FILE in "${artifacts[@]}" diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 472621c2a..25b84058d 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -17,7 +17,6 @@ description: | https://github.com/filecoin-project/lotus -grade: devel confinement: strict parts: diff --git a/storage/wdpost_changehandler.go b/storage/wdpost_changehandler.go index 7b80f2744..9540182b5 100644 --- a/storage/wdpost_changehandler.go +++ b/storage/wdpost_changehandler.go @@ -15,7 +15,7 @@ import ( const ( SubmitConfidence = 4 - ChallengeConfidence = 10 + ChallengeConfidence = 1 ) type CompleteGeneratePoSTCb func(posts []miner.SubmitWindowedPoStParams, err error) diff --git a/storage/wdpost_sched.go b/storage/wdpost_sched.go index 88357c5b3..53801e362 100644 --- a/storage/wdpost_sched.go +++ b/storage/wdpost_sched.go @@ -115,6 +115,7 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) { } gotCur = false + log.Info("restarting window post scheduler") } select { diff --git a/testplans/lotus-soup/go.mod b/testplans/lotus-soup/go.mod index 7f4a53630..9c0dc8136 100644 --- a/testplans/lotus-soup/go.mod +++ b/testplans/lotus-soup/go.mod @@ -3,14 +3,14 @@ module github.com/filecoin-project/lotus/testplans/lotus-soup go 1.16 require ( - contrib.go.opencensus.io/exporter/prometheus v0.1.0 + contrib.go.opencensus.io/exporter/prometheus v0.4.0 github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe github.com/davecgh/go-spew v1.1.1 github.com/drand/drand v1.2.1 - github.com/filecoin-project/go-address v0.0.5 - github.com/filecoin-project/go-data-transfer v1.10.1 - github.com/filecoin-project/go-fil-markets v1.12.0 - github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec + github.com/filecoin-project/go-address v0.0.6 + github.com/filecoin-project/go-data-transfer v1.11.4 + github.com/filecoin-project/go-fil-markets v1.13.3 + github.com/filecoin-project/go-jsonrpc v0.1.5 github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b github.com/filecoin-project/lotus v0.0.0-00010101000000-000000000000 @@ -21,17 +21,17 @@ require ( github.com/influxdata/influxdb v1.9.4 // indirect github.com/ipfs/go-cid v0.1.0 github.com/ipfs/go-datastore v0.4.6 - github.com/ipfs/go-ipfs-files v0.0.8 + github.com/ipfs/go-ipfs-files v0.0.9 github.com/ipfs/go-ipld-format v0.2.0 github.com/ipfs/go-log/v2 v2.3.0 - github.com/ipfs/go-merkledag v0.3.2 + github.com/ipfs/go-merkledag v0.4.1 github.com/ipfs/go-unixfs v0.2.6 - github.com/ipld/go-car v0.3.1-null-padded-files + github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c github.com/libp2p/go-libp2p v0.15.0 github.com/libp2p/go-libp2p-core v0.9.0 github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 - github.com/multiformats/go-multiaddr v0.4.0 + github.com/multiformats/go-multiaddr v0.4.1 github.com/testground/sdk-go v0.2.6 go.opencensus.io v0.23.0 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c diff --git a/testplans/lotus-soup/go.sum b/testplans/lotus-soup/go.sum index b6246d634..70da04be2 100644 --- a/testplans/lotus-soup/go.sum +++ b/testplans/lotus-soup/go.sum @@ -37,9 +37,9 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -contrib.go.opencensus.io/exporter/jaeger v0.1.0/go.mod h1:VYianECmuFPwU37O699Vc1GOcy+y8kOsfaxHRImmjbA= -contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg= -contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= +contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= +contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -78,8 +78,9 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -88,12 +89,13 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= -github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ= -github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/GeertJohan/go.rice v1.0.2 h1:PtRw+Tg3oa3HYwiDBZyvOJ8LdIyf6lAovJJtr7YOAYk= +github.com/GeertJohan/go.rice v1.0.2/go.mod h1:af5vUNlDNkCjOZeSGFgIJxDje9qdjsO6hshx0gTmZt4= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K172oDhSKU0dJ/miJramo9NITOMyZQ= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= -github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgRRqzCuPshRkQ7I= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa h1:1PPxEyGdIGVkX/kqMvLJ95a1dGS1Sz7tpNEgehEYYt0= @@ -113,8 +115,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -146,7 +148,9 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -159,12 +163,15 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.32.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= +github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g= @@ -172,6 +179,7 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= @@ -212,12 +220,13 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 h1:gfAMKE626QEuKG3si0pdTRcr/YEbBoxY+3GOH3gWvl4= -github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129/go.mod h1:u9UyCz2eTrSGy6fbupqJ54eY5c4IC8gREQ1053dK12U= +github.com/buger/goterm v1.0.3 h1:7V/HeAQHrzPk/U4BvyH2g9u+xbUW9nr4yRPyG59W4fM= +github.com/buger/goterm v1.0.3/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= @@ -226,8 +235,9 @@ github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6 github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= @@ -239,6 +249,7 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -269,8 +280,9 @@ github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= @@ -344,12 +356,13 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= -github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-sysinfo v1.7.0 h1:4vVvcfi255+8+TyQ7TYUTEK3A+G8v5FLE+ZKYL1z1Dg= +github.com/elastic/go-sysinfo v1.7.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= -github.com/elastic/gosigar v0.12.0 h1:AsdhYCJlTudhfOYQyFNgx+fIVTfrDO0V1ST0vHgiapU= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.1 h1:T0aQ7n/n2ZA9W7DmAnj60v+qzqKERdBgJBO1CG2W6rc= +github.com/elastic/gosigar v0.14.1/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -358,6 +371,7 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etclabscore/go-jsonschema-walk v0.0.6/go.mod h1:VdfDY72AFAiUhy0ZXEaWSpveGjMT5JcDIm903NGqFwQ= @@ -367,14 +381,17 @@ github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/filecoin-project/dagstore v0.4.2/go.mod h1:WY5OoLfnwISCk6eASSF927KKPqLPIlTwmG1qHpA08KY= github.com/filecoin-project/dagstore v0.4.3 h1:yeFl6+2BRY1gOVp/hrZuFa24s7LY0Qqkqx/Gh8lidZs= github.com/filecoin-project/dagstore v0.4.3/go.mod h1:dm/91AO5UaDd3bABFjg/5fmRH99vvpS7g1mykqvz6KQ= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= -github.com/filecoin-project/go-address v0.0.5 h1:SSaFT/5aLfPXycUlFyemoHYhRgdyXClXCyDdNJKPlDM= github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.6 h1:DWQtj38ax+ogHwyH3VULRIoT8E6loyXqsk/p81xoY7M= +github.com/filecoin-project/go-address v0.0.6/go.mod h1:7B0/5DA13n6nHkB8bbGx1gWzG/dbTsZ0fgOJVGsM3TE= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= @@ -384,16 +401,18 @@ github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQj github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= -github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= -github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 h1:U9Z+76pHCKBmtdxFV7JFZJj7OVm12I6dEKwtMVbq5p0= +github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CYAVPiMx8EiV/VAs= +github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= -github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= +github.com/filecoin-project/go-commp-utils v0.1.2 h1:SKLRuGdx/6WlolaWKaUzzUYWGGePuARyO4guxOPxvt4= +github.com/filecoin-project/go-commp-utils v0.1.2/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= +github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.10.0/go.mod h1:uQtqy6vUAY5v70ZHdkF5mJ8CjVtjj/JA3aOoaqzWTVw= -github.com/filecoin-project/go-data-transfer v1.10.1 h1:YQNLwhizxkdfFxegAyrnn3l7WjgMjqDlqFzr18iWiYI= -github.com/filecoin-project/go-data-transfer v1.10.1/go.mod h1:CSDMCrPK2lVGodNB1wPEogjFvM9nVGyiL1GNbBRTSdw= +github.com/filecoin-project/go-data-transfer v1.11.4 h1:jKvlx0/C8HSyLRn/G1P9TjtfBtFU9jbCvCVFmWbyYVQ= +github.com/filecoin-project/go-data-transfer v1.11.4/go.mod h1:2MitLI0ebCkLlPKM7NRggP/t9d+gCcREUKkCKqWRCwU= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= @@ -403,8 +422,8 @@ github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+ github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.12.0 h1:RpU5bLaMADVrU4CgLxKMGHC2ZUocNV35uINxogQCf00= -github.com/filecoin-project/go-fil-markets v1.12.0/go.mod h1:XuuZFaFujI47nrgfQJiq7jWB+6rRya6nm7Sj6uXQ80U= +github.com/filecoin-project/go-fil-markets v1.13.3 h1:iMCpG7I4fb+YLcgDnMaqZiZiyFZWNvrwHqiFPHB0/tQ= +github.com/filecoin-project/go-fil-markets v1.13.3/go.mod h1:38zuj8AgDvOfdakFLpC/syYIYgXTzkq7xqBJ6T1AuG4= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -412,12 +431,13 @@ github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+ github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= -github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec h1:rGI5I7fdU4viManxmDdbk5deZO7afe6L1Wc04dAmlOM= -github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= +github.com/filecoin-project/go-jsonrpc v0.1.5 h1:ckxqZ09ivBAVf5CSmxxrqqNHC7PJm3GYGtYKiNQ+vGk= +github.com/filecoin-project/go-jsonrpc v0.1.5/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= -github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1 h1:0BogtftbcgyBx4lP2JWM00ZK7/pXmgnrDqKp9aLTgVs= github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= +github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= +github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-paramfetch v0.0.2 h1:a6W3Ij6CKhwHYYlx+5mqvBIyw4CabZH2ojdEaoAZ6/g= github.com/filecoin-project/go-paramfetch v0.0.2/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= @@ -426,8 +446,9 @@ github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e h1:XAgb6HmgXaGRklNjhZoNMSIYriKLqjWXIqYMotg6iSs= github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 h1:UmKkt13NrtulubqfNXhG7SQ7Pjza8BeKdNBxngqAo64= +github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v1.0.1 h1:LQ60+JDVjMdLxXmVFM2jjontzOYnfVE7u02CXV3WKSw= github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= @@ -454,6 +475,8 @@ github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIP github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= github.com/filecoin-project/specs-actors/v5 v5.0.4 h1:OY7BdxJWlUfUFXWV/kpNBYGXNPasDIedf42T3sGx08s= github.com/filecoin-project/specs-actors/v5 v5.0.4/go.mod h1:5BAKRAMsOOlD8+qCw4UvT/lTLInCJ3JwOWZbX8Ipwq4= +github.com/filecoin-project/specs-actors/v6 v6.0.0 h1:i+16MFE8GScWWUF0kG7x2RZ5Hqpz0CeyBHTpnijCJ6I= +github.com/filecoin-project/specs-actors/v6 v6.0.0/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= @@ -467,14 +490,15 @@ github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0= -github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= +github.com/gbrlsnchs/jwt/v3 v3.0.1 h1:lbUmgAKpxnClrKloyIwpxm4OuWeDl5wLk52G91ODPw4= +github.com/gbrlsnchs/jwt/v3 v3.0.1/go.mod h1:AncDcjXz18xetI3A6STfXq2w+LuTx8pQ8bGEwRN8zVM= github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= @@ -498,17 +522,21 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -577,10 +605,10 @@ github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRf github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -607,8 +635,9 @@ github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/V github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 h1:s+PDl6lozQ+dEUtUtQnO7+A2iPG3sK1pI4liU+jxn90= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -627,6 +656,7 @@ github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRs github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= @@ -635,8 +665,9 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -668,8 +699,9 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -685,8 +717,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -759,16 +792,21 @@ github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4n github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= @@ -796,12 +834,15 @@ github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= @@ -810,6 +851,10 @@ github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0 github.com/iancoleman/orderedmap v0.1.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= +github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 h1:9tcYMdi+7Rb1y0E9Del1DRHui7Ne3za5lLw6CjMJv/M= +github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94/go.mod h1:GYeBD1CF7AqnKZK+UCytLcY3G+UKo0ByXX/3xfdNyqQ= +github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6 h1:8UsGZ2rr2ksmEru6lToqnXgA8Mz1DP11X4zSJ159C3k= +github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6/go.mod h1:xQig96I1VNBDIWGCdTt54nHt6EeI639SmHycLYL7FkA= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -821,8 +866,9 @@ github.com/influxdata/influxdb v1.9.4 h1:hZMq5fd4enVnruYHd7qCHsqG7kWQ/msA6x+kCvG github.com/influxdata/influxdb v1.9.4/go.mod h1:dR0WCHqaHPpJLaqWnRSl/QHsbXJR+QpofbZXyTc8ccw= github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxdb1-client v0.0.0-20200515024757-02f0bf5dbca3 h1:k3/6a1Shi7GGCp9QpyYuXsMM6ncTOjCzOE9Fd6CDA+Q= github.com/influxdata/influxdb1-client v0.0.0-20200515024757-02f0bf5dbca3/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= @@ -850,8 +896,9 @@ github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbR github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.5 h1:euqZu96CCbToPyYVwVshu8ENURi8BhFd7FUFfTLi+fQ= github.com/ipfs/go-blockservice v0.1.5/go.mod h1:yLk8lBJCBRWRqerqCSVi3cE/Dncdt3vGC/PJMVKhLTY= +github.com/ipfs/go-blockservice v0.1.7 h1:yVe9te0M7ow8i+PPkx03YFSpxqzXx594d6h+34D6qMg= +github.com/ipfs/go-blockservice v0.1.7/go.mod h1:GmS+BAt4hrwBKkzE11AFDQUrnvqjwFatGS2MY7wOjEM= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -907,13 +954,14 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.9.0/go.mod h1:J62ahWT9JbPsFL2UWsUM5rOu0lZJ0LOIH1chHdxGGcw= -github.com/ipfs/go-graphsync v0.9.1 h1:jo7ZaAZ3lal89RhKxKoRkPzIO8lmOY6KUWA1mDRZ2+U= -github.com/ipfs/go-graphsync v0.9.1/go.mod h1:J62ahWT9JbPsFL2UWsUM5rOu0lZJ0LOIH1chHdxGGcw= +github.com/ipfs/go-graphsync v0.10.0/go.mod h1:cKIshzTaa5rCZjryH5xmSKZVGX9uk1wvwGvz2WEha5Y= +github.com/ipfs/go-graphsync v0.10.4 h1:1WZhyOPxgxLvHTIC2GoLltaBrjZ+JuXC2oKAEiX8f3Y= +github.com/ipfs/go-graphsync v0.10.4/go.mod h1:oei4tnWAKnZ6LPnapZGPYVVbyiKV1UP3f8BeLU7Z4JQ= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blockstore v0.1.6/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= @@ -941,8 +989,9 @@ github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-files v0.0.9 h1:OFyOfmuVDu9c5YtjSDORmwXzE6fmZikzZpzsnNkgFEg= +github.com/ipfs/go-ipfs-files v0.0.9/go.mod h1:aFv2uQ/qxWpL/6lidWvnSQmaVqCrf0TBGoUr+C1Fo84= github.com/ipfs/go-ipfs-http-client v0.0.6 h1:k2QllZyP7Fz5hMgsX5hvHfn1WPG9Ngdy5WknQ7JNhBM= github.com/ipfs/go-ipfs-http-client v0.0.6/go.mod h1:8e2dQbntMZKxLfny+tyXJ7bJHZFERp/2vyzZdvkeLMc= github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= @@ -966,6 +1015,8 @@ github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dC github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-ipld-legacy v0.1.0 h1:wxkkc4k8cnvIGIjPO0waJCe7SHEyFgl+yQdafdjGrpA= +github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= github.com/ipfs/go-ipns v0.1.2 h1:O/s/0ht+4Jl9+VoxoUo0zaHjnZUS+aBQIKTuzdZ/ucI= github.com/ipfs/go-ipns v0.1.2/go.mod h1:ioQ0j02o6jdIVW+bmi18f4k2gRf0AV3kZ9KeHYHICnQ= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= @@ -991,8 +1042,9 @@ github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKy github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.4.1 h1:CEEQZnwRkszN06oezuasHwDD823Xcr4p4zluUN9vXqs= +github.com/ipfs/go-merkledag v0.4.1/go.mod h1:56biPaS6e+IS0eXkEt6A8tG+BUQaEIFqDqJuFfQDBoE= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= @@ -1001,8 +1053,9 @@ github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= +github.com/ipfs/go-peertaskqueue v0.6.0 h1:BT1/PuNViVomiz1PnnP5+WmKsTNHrxIDvkZrkj4JhOg= +github.com/ipfs/go-peertaskqueue v0.6.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= @@ -1019,9 +1072,8 @@ github.com/ipfs/iptb-plugins v0.3.0/go.mod h1:5QtOvckeIw4bY86gSH4fgh3p3gCSMn3FmI github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ= -github.com/ipld/go-car v0.3.1-0.20210601190600-f512dac51e8e/go.mod h1:wUxBdwOLA9/0HZBi3fnTBzla0MuwlqgJLyrhOg1XaKI= -github.com/ipld/go-car v0.3.1-null-padded-files h1:FMD0Ce4tAM9P5aq7yklw2jnVK3ZuoJ4xK6vkL9VLmxs= -github.com/ipld/go-car v0.3.1-null-padded-files/go.mod h1:wUxBdwOLA9/0HZBi3fnTBzla0MuwlqgJLyrhOg1XaKI= +github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 h1:8JMSJ0k71fU9lIUrpVwEdoX4KoxiTEX8cZG97v/hTDw= +github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823/go.mod h1:jSlTph+i/q1jLFoiKKeN69KGG0fXpwrcD0izu5C1Tpo= github.com/ipld/go-car/v2 v2.0.0-beta1.0.20210721090610-5a9d1b217d25/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= github.com/ipld/go-car/v2 v2.0.2/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 h1:6Z0beJSZNsRY+7udoqUl4gQ/tqtrPuRvDySrlsvbqZA= @@ -1034,13 +1086,18 @@ github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVI github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.12.0 h1:JapyKWTsJgmhrPI7hfx4V798c/RClr85sXfBZnH1VIw= -github.com/ipld/go-ipld-prime v0.12.0/go.mod h1:hy8b93WleDMRKumOJnTIrr0MbbFbx9GD6Kzxa53Xppc= +github.com/ipld/go-ipld-prime v0.12.3-0.20210930132912-0b3aef3ca569/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= +github.com/ipld/go-ipld-prime v0.12.3 h1:furVobw7UBLQZwlEwfE26tYORy3PAK8VYSgZOSr3JMQ= +github.com/ipld/go-ipld-prime v0.12.3/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= +github.com/ipld/go-ipld-selector-text-lite v0.0.0 h1:MLU1YUAgd3Z+RfVCXUbvxH1RQjEe+larJ9jmlW1aMgA= +github.com/ipld/go-ipld-selector-text-lite v0.0.0/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= @@ -1086,6 +1143,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -1115,8 +1173,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -1322,8 +1382,8 @@ github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuD github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.8/go.mod h1:gGiPlXdz7mIHd2vfAsHzBNAMqSDkt2UBFwgcITgw1lA= -github.com/libp2p/go-libp2p-peerstore v0.2.9 h1:tVa7siDymmzOl3b3+SxPYpQUCnicmK13y6Re1PqWK+g= -github.com/libp2p/go-libp2p-peerstore v0.2.9/go.mod h1:zhBaLzxiWpNGQ3+uI17G/OIjmOD8GxKyFuHbrZbgs0w= +github.com/libp2p/go-libp2p-peerstore v0.3.0 h1:wp/G0+37+GLr7tu+wE+4GWNrA3uxKg6IPRigIMSS5oQ= +github.com/libp2p/go-libp2p-peerstore v0.3.0/go.mod h1:fNX9WlOENMvdx/YD7YO/5Hkrn8+lQIk5A39BHa1HIrM= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= @@ -1507,6 +1567,8 @@ github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= +github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1540,8 +1602,9 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -1551,8 +1614,9 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -1590,6 +1654,8 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4S github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= @@ -1598,6 +1664,7 @@ github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1608,10 +1675,12 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= @@ -1620,8 +1689,9 @@ github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjW github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRVMN9mjSE= +github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= @@ -1635,8 +1705,9 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.4.0 h1:hL/K4ZJhJ5PTw3nwylq9lGU5yArzcAroZmex1ghSEkQ= github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= +github.com/multiformats/go-multiaddr v0.4.1 h1:Pq37uLx3hsyNlTDir7FZyU8+cFCTqd5y1KiM2IzOutI= +github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= @@ -1671,8 +1742,9 @@ github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.15 h1:hWOPdrNqDjwHDx82vsYGSDZNyktOJJ2dzZJzFkOV1jM= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= +github.com/multiformats/go-multihash v0.0.16 h1:D2qsyy1WVculJbGv69pWmQ36ehxFoA5NiIUr1OEs6qI= +github.com/multiformats/go-multihash v0.0.16/go.mod h1:zhfEIgVnB/rPMfxgFw15ZmGoNaKyNUIE4IWHG/kC+Ag= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= @@ -1692,10 +1764,16 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= @@ -1703,8 +1781,8 @@ github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= -github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg= -github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nkovacs/streamquote v1.0.0 h1:PmVIV08Zlx2lZK5fFZlMZ04eHcDTIFJCv/5/0twVUow= +github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -1758,6 +1836,7 @@ github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1766,6 +1845,7 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -1789,6 +1869,7 @@ github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXx github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1825,6 +1906,7 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= @@ -1845,6 +1927,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v0.0.0-20200609090129-a6600f564e3c/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY= +github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4= @@ -1957,7 +2041,9 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1993,6 +2079,7 @@ github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixU github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.28.0+incompatible h1:G4QSBfvPKvg5ZM2j9MrJFdfI5iSljY/WnJqOGFao6HI= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= @@ -2045,6 +2132,7 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:f github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8 h1:TEv7MId88TyIqIUL4hbf9otOookIolMxlEbN0ro671Y= github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= @@ -2103,6 +2191,10 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -2138,8 +2230,9 @@ go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY= go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -2155,9 +2248,11 @@ go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= @@ -2187,6 +2282,7 @@ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -2195,6 +2291,7 @@ golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -2206,12 +2303,15 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e h1:VvfwVmMH40bpMeizC9/K7ipM5Qjucuu16RWfneFPyhQ= golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 h1:3erb+vDS8lU1sxfDHF4/hhWyaXnhIaO+7RgL4fDZORA= +golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2245,8 +2345,9 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= @@ -2329,8 +2430,10 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf h1:R150MpwJIv1MpS0N/pc+NhTM8ajzvlmxlY5OYsrevXQ= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2367,6 +2470,7 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2399,6 +2503,7 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2454,6 +2559,8 @@ golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2463,8 +2570,9 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= @@ -2486,6 +2594,7 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2518,6 +2627,7 @@ golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2566,6 +2676,7 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210225150353-54dc8c5edb56/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2584,7 +2695,6 @@ google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+ google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -2649,8 +2759,10 @@ google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhKa1AAvY53xsvLB1cWorMjslvY3VA8= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 h1:ysnBoUyeL/H6RCvNRhWHjKoDEmguI+mPU+qHgK8qv/w= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -2678,6 +2790,7 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2774,6 +2887,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/testplans/lotus-soup/rfwp/chain_state.go b/testplans/lotus-soup/rfwp/chain_state.go index d91acdff9..38b8b504e 100644 --- a/testplans/lotus-soup/rfwp/chain_state.go +++ b/testplans/lotus-soup/rfwp/chain_state.go @@ -31,7 +31,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - tstats "github.com/filecoin-project/lotus/tools/stats" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" ) func UpdateChainState(t *testkit.TestEnvironment, m *testkit.LotusMiner) error { @@ -40,7 +40,7 @@ func UpdateChainState(t *testkit.TestEnvironment, m *testkit.LotusMiner) error { ctx := context.Background() - tipsetsCh, err := tstats.GetTips(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) + tipsetsCh, err := tsync.BufferedTipsetChannel(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) if err != nil { return err } diff --git a/testplans/lotus-soup/rfwp/html_chain_state.go b/testplans/lotus-soup/rfwp/html_chain_state.go index 7a3d56be4..3c840facd 100644 --- a/testplans/lotus-soup/rfwp/html_chain_state.go +++ b/testplans/lotus-soup/rfwp/html_chain_state.go @@ -11,7 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/cli" - tstats "github.com/filecoin-project/lotus/tools/stats" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" "github.com/ipfs/go-cid" ) @@ -22,8 +22,9 @@ func FetchChainState(t *testkit.TestEnvironment, m *testkit.LotusMiner) error { ctx := context.Background() api := m.FullApi - tipsetsCh, err := tstats.GetTips(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) + tipsetsCh, err := tsync.BufferedTipsetChannel(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) if err != nil { + return err } diff --git a/testplans/lotus-soup/testkit/deals.go b/testplans/lotus-soup/testkit/deals.go index f0910537d..703e6888a 100644 --- a/testplans/lotus-soup/testkit/deals.go +++ b/testplans/lotus-soup/testkit/deals.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" - tstats "github.com/filecoin-project/lotus/tools/stats" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" ) func StartDeal(ctx context.Context, minerActorAddr address.Address, client api.FullNode, fcid cid.Cid, fastRetrieval bool) *cid.Cid { @@ -46,7 +46,7 @@ func WaitDealSealed(t *TestEnvironment, ctx context.Context, client api.FullNode cctx, cancel := context.WithCancel(ctx) defer cancel() - tipsetsCh, err := tstats.GetTips(cctx, &v0api.WrapperV1Full{FullNode: client}, abi.ChainEpoch(height), headlag) + tipsetsCh, err := tsync.BufferedTipsetChannel(cctx, &v0api.WrapperV1Full{FullNode: client}, abi.ChainEpoch(height), headlag) if err != nil { panic(err) } diff --git a/testplans/lotus-soup/testkit/node.go b/testplans/lotus-soup/testkit/node.go index e70f58e38..9506c4bf4 100644 --- a/testplans/lotus-soup/testkit/node.go +++ b/testplans/lotus-soup/testkit/node.go @@ -8,8 +8,8 @@ import ( "sort" "time" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/metrics" @@ -17,7 +17,11 @@ import ( "github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node/modules/dtypes" modtest "github.com/filecoin-project/lotus/node/modules/testing" - tstats "github.com/filecoin-project/lotus/tools/stats" + + tinflux "github.com/filecoin-project/lotus/tools/stats/influx" + tipldstore "github.com/filecoin-project/lotus/tools/stats/ipldstore" + tpoints "github.com/filecoin-project/lotus/tools/stats/points" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" influxdb "github.com/kpacha/opencensus-influxdb" ma "github.com/multiformats/go-multiaddr" @@ -234,7 +238,7 @@ func collectStats(t *TestEnvironment, ctx context.Context, api api.FullNode) err influxPass := "" influxDb := "testground" - influx, err := tstats.InfluxClient(influxAddr, influxUser, influxPass) + influxClient, err := tinflux.NewClient(influxAddr, influxUser, influxPass) if err != nil { t.RecordMessage(err.Error()) return err @@ -246,7 +250,38 @@ func collectStats(t *TestEnvironment, ctx context.Context, api api.FullNode) err go func() { time.Sleep(15 * time.Second) t.RecordMessage("calling tstats.Collect") - tstats.Collect(context.Background(), &v0api.WrapperV1Full{FullNode: api}, influx, influxDb, height, headlag) + + store, err := tipldstore.NewApiIpldStore(ctx, api, 1024) + if err != nil { + t.RecordMessage(err.Error()) + return + } + + collector, err := tpoints.NewChainPointCollector(ctx, store, api) + if err != nil { + t.RecordMessage(err.Error()) + return + } + + tipsets, err := tsync.BufferedTipsetChannel(ctx, api, abi.ChainEpoch(height), headlag) + if err != nil { + t.RecordMessage(err.Error()) + return + } + + wq := tinflux.NewWriteQueue(ctx, influxClient) + defer wq.Close() + + for tipset := range tipsets { + if nb, err := collector.Collect(ctx, tipset); err != nil { + t.RecordMessage(err.Error()) + return + } else { + nb.SetDatabase(influxDb) + wq.AddBatch(nb) + } + } + }() return nil diff --git a/testplans/lotus-soup/testkit/retrieval.go b/testplans/lotus-soup/testkit/retrieval.go index de3dee6be..3d6683d00 100644 --- a/testplans/lotus-soup/testkit/retrieval.go +++ b/testplans/lotus-soup/testkit/retrieval.go @@ -11,6 +11,7 @@ import ( "time" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/ipfs/go-cid" files "github.com/ipfs/go-ipfs-files" ipld "github.com/ipfs/go-ipld-format" @@ -51,7 +52,7 @@ func RetrieveData(t *TestEnvironment, ctx context.Context, client api.FullNode, IsCAR: carExport, } t1 = time.Now() - err = client.ClientRetrieve(ctx, offers[0].Order(caddr), ref) + err = (&v0api.WrapperV1Full{FullNode: client}).ClientRetrieve(ctx, v0api.OfferOrder(offers[0], caddr), ref) if err != nil { return err } diff --git a/tools/stats/collect.go b/tools/stats/collect.go deleted file mode 100644 index e33ec994b..000000000 --- a/tools/stats/collect.go +++ /dev/null @@ -1,63 +0,0 @@ -package stats - -import ( - "context" - "time" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api/v0api" - client "github.com/influxdata/influxdb1-client/v2" -) - -func Collect(ctx context.Context, api v0api.FullNode, influx client.Client, database string, height int64, headlag int) { - tipsetsCh, err := GetTips(ctx, api, abi.ChainEpoch(height), headlag) - if err != nil { - log.Fatal(err) - } - - wq := NewInfluxWriteQueue(ctx, influx) - defer wq.Close() - - for tipset := range tipsetsCh { - log.Infow("Collect stats", "height", tipset.Height()) - pl := NewPointList() - height := tipset.Height() - - if err := RecordTipsetPoints(ctx, api, pl, tipset); err != nil { - log.Warnw("Failed to record tipset", "height", height, "error", err) - continue - } - - if err := RecordTipsetMessagesPoints(ctx, api, pl, tipset); err != nil { - log.Warnw("Failed to record messages", "height", height, "error", err) - continue - } - - if err := RecordTipsetStatePoints(ctx, api, pl, tipset); err != nil { - log.Warnw("Failed to record state", "height", height, "error", err) - continue - } - - // Instead of having to pass around a bunch of generic stuff we want for each point - // we will just add them at the end. - - tsTimestamp := time.Unix(int64(tipset.MinTimestamp()), int64(0)) - - nb, err := InfluxNewBatch() - if err != nil { - log.Fatal(err) - } - - for _, pt := range pl.Points() { - pt.SetTime(tsTimestamp) - - nb.AddPoint(NewPointFrom(pt)) - } - - nb.SetDatabase(database) - - log.Infow("Adding points", "count", len(nb.Points()), "height", tipset.Height()) - - wq.AddBatch(nb) - } -} diff --git a/tools/stats/head_buffer.go b/tools/stats/head_buffer.go deleted file mode 100644 index 0a7c63e6e..000000000 --- a/tools/stats/head_buffer.go +++ /dev/null @@ -1,47 +0,0 @@ -package stats - -import ( - "container/list" - - "github.com/filecoin-project/lotus/api" -) - -type headBuffer struct { - buffer *list.List - size int -} - -func newHeadBuffer(size int) *headBuffer { - buffer := list.New() - buffer.Init() - - return &headBuffer{ - buffer: buffer, - size: size, - } -} - -func (h *headBuffer) push(hc *api.HeadChange) (rethc *api.HeadChange) { - if h.buffer.Len() == h.size { - var ok bool - - el := h.buffer.Front() - rethc, ok = el.Value.(*api.HeadChange) - if !ok { - panic("Value from list is not the correct type") - } - - h.buffer.Remove(el) - } - - h.buffer.PushBack(hc) - - return -} - -func (h *headBuffer) pop() { - el := h.buffer.Back() - if el != nil { - h.buffer.Remove(el) - } -} diff --git a/tools/stats/head_buffer_test.go b/tools/stats/head_buffer_test.go deleted file mode 100644 index 4059f730e..000000000 --- a/tools/stats/head_buffer_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package stats - -import ( - "testing" - - "github.com/filecoin-project/lotus/api" - "github.com/stretchr/testify/require" -) - -func TestHeadBuffer(t *testing.T) { - - t.Run("Straight push through", func(t *testing.T) { - hb := newHeadBuffer(5) - require.Nil(t, hb.push(&api.HeadChange{Type: "1"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "2"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "3"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "4"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "5"})) - - hc := hb.push(&api.HeadChange{Type: "6"}) - require.Equal(t, hc.Type, "1") - }) - - t.Run("Reverts", func(t *testing.T) { - hb := newHeadBuffer(5) - require.Nil(t, hb.push(&api.HeadChange{Type: "1"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "2"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "3"})) - hb.pop() - require.Nil(t, hb.push(&api.HeadChange{Type: "3a"})) - hb.pop() - require.Nil(t, hb.push(&api.HeadChange{Type: "3b"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "4"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "5"})) - - hc := hb.push(&api.HeadChange{Type: "6"}) - require.Equal(t, hc.Type, "1") - hc = hb.push(&api.HeadChange{Type: "7"}) - require.Equal(t, hc.Type, "2") - hc = hb.push(&api.HeadChange{Type: "8"}) - require.Equal(t, hc.Type, "3b") - }) -} diff --git a/tools/stats/headbuffer/head_buffer.go b/tools/stats/headbuffer/head_buffer.go new file mode 100644 index 000000000..5f668ab6e --- /dev/null +++ b/tools/stats/headbuffer/head_buffer.go @@ -0,0 +1,56 @@ +package headbuffer + +import ( + "container/list" + + "github.com/filecoin-project/lotus/api" +) + +type HeadChangeStackBuffer struct { + buffer *list.List + size int +} + +// NewHeadChangeStackBuffer buffer HeadChange events to avoid having to +// deal with revert changes. Initialized size should be the average reorg +// size + 1 +func NewHeadChangeStackBuffer(size int) *HeadChangeStackBuffer { + buffer := list.New() + buffer.Init() + + return &HeadChangeStackBuffer{ + buffer: buffer, + size: size, + } +} + +// Push adds a HeadChange to stack buffer. If the length of +// the stack buffer grows larger than the initizlized size, the +// oldest HeadChange is returned. +func (h *HeadChangeStackBuffer) Push(hc *api.HeadChange) (rethc *api.HeadChange) { + if h.buffer.Len() >= h.size { + var ok bool + + el := h.buffer.Front() + rethc, ok = el.Value.(*api.HeadChange) + if !ok { + // This shouldn't be possible, this method is typed and is the only place data + // pushed to the buffer. + panic("A cosmic ray made me do it") + } + + h.buffer.Remove(el) + } + + h.buffer.PushBack(hc) + + return +} + +// Pop removes the last added HeadChange +func (h *HeadChangeStackBuffer) Pop() { + el := h.buffer.Back() + if el != nil { + h.buffer.Remove(el) + } +} diff --git a/tools/stats/headbuffer/head_buffer_test.go b/tools/stats/headbuffer/head_buffer_test.go new file mode 100644 index 000000000..8a748c714 --- /dev/null +++ b/tools/stats/headbuffer/head_buffer_test.go @@ -0,0 +1,42 @@ +package headbuffer + +import ( + "testing" + + "github.com/filecoin-project/lotus/api" + "github.com/stretchr/testify/require" +) + +func TestHeadBuffer(t *testing.T) { + t.Run("Straight Push through", func(t *testing.T) { + hb := NewHeadChangeStackBuffer(5) + require.Nil(t, hb.Push(&api.HeadChange{Type: "1"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "2"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "3"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "4"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "5"})) + + hc := hb.Push(&api.HeadChange{Type: "6"}) + require.Equal(t, hc.Type, "1") + }) + + t.Run("Reverts", func(t *testing.T) { + hb := NewHeadChangeStackBuffer(5) + require.Nil(t, hb.Push(&api.HeadChange{Type: "1"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "2"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "3"})) + hb.Pop() + require.Nil(t, hb.Push(&api.HeadChange{Type: "3a"})) + hb.Pop() + require.Nil(t, hb.Push(&api.HeadChange{Type: "3b"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "4"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "5"})) + + hc := hb.Push(&api.HeadChange{Type: "6"}) + require.Equal(t, hc.Type, "1") + hc = hb.Push(&api.HeadChange{Type: "7"}) + require.Equal(t, hc.Type, "2") + hc = hb.Push(&api.HeadChange{Type: "8"}) + require.Equal(t, hc.Type, "3b") + }) +} diff --git a/tools/stats/influx/influx.go b/tools/stats/influx/influx.go new file mode 100644 index 000000000..65fb4c0b9 --- /dev/null +++ b/tools/stats/influx/influx.go @@ -0,0 +1,133 @@ +package influx + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/filecoin-project/lotus/build" + + _ "github.com/influxdata/influxdb1-client" + models "github.com/influxdata/influxdb1-client/models" + client "github.com/influxdata/influxdb1-client/v2" +) + +type PointList struct { + points []models.Point +} + +func NewPointList() *PointList { + return &PointList{} +} + +func (pl *PointList) AddPoint(p models.Point) { + pl.points = append(pl.points, p) +} + +func (pl *PointList) Points() []models.Point { + return pl.points +} + +type WriteQueue struct { + ch chan client.BatchPoints +} + +func NewWriteQueue(ctx context.Context, influx client.Client) *WriteQueue { + ch := make(chan client.BatchPoints, 128) + + maxRetries := 10 + + go func() { + main: + for { + select { + case <-ctx.Done(): + return + case batch := <-ch: + for i := 0; i < maxRetries; i++ { + if err := influx.Write(batch); err != nil { + log.Warnw("Failed to write batch", "error", err) + build.Clock.Sleep(3 * time.Second) + continue + } + + continue main + } + + log.Error("dropping batch due to failure to write") + } + } + }() + + return &WriteQueue{ + ch: ch, + } +} + +func (i *WriteQueue) AddBatch(bp client.BatchPoints) { + i.ch <- bp +} + +func (i *WriteQueue) Close() { + close(i.ch) +} + +func NewClient(addr, user, pass string) (client.Client, error) { + return client.NewHTTPClient(client.HTTPConfig{ + Addr: addr, + Username: user, + Password: pass, + }) +} + +func NewBatch() (client.BatchPoints, error) { + return client.NewBatchPoints(client.BatchPointsConfig{}) +} + +func NewPoint(name string, value interface{}) models.Point { + pt, _ := models.NewPoint(name, models.Tags{}, + map[string]interface{}{"value": value}, build.Clock.Now().UTC()) + return pt +} + +func NewPointFrom(p models.Point) *client.Point { + return client.NewPointFrom(p) +} + +func ResetDatabase(influx client.Client, database string) error { + log.Debug("resetting database") + q := client.NewQuery(fmt.Sprintf(`DROP DATABASE "%s"; CREATE DATABASE "%s";`, database, database), "", "") + _, err := influx.Query(q) + if err != nil { + return err + } + log.Infow("database reset", "database", database) + return nil +} + +func GetLastRecordedHeight(influx client.Client, database string) (int64, error) { + log.Debug("retrieving last record height") + q := client.NewQuery(`SELECT "value" FROM "chain.height" ORDER BY time DESC LIMIT 1`, database, "") + res, err := influx.Query(q) + if err != nil { + return 0, err + } + + if len(res.Results) == 0 { + return 0, fmt.Errorf("No results found for last recorded height") + } + + if len(res.Results[0].Series) == 0 { + return 0, fmt.Errorf("No results found for last recorded height") + } + + height, err := (res.Results[0].Series[0].Values[0][1].(json.Number)).Int64() + if err != nil { + return 0, err + } + + log.Infow("last record height", "height", height) + + return height, nil +} diff --git a/tools/stats/influx/log.go b/tools/stats/influx/log.go new file mode 100644 index 000000000..b3637d6b0 --- /dev/null +++ b/tools/stats/influx/log.go @@ -0,0 +1,7 @@ +package influx + +import ( + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("stats/influx") diff --git a/tools/stats/ipldstore/ipldstore.go b/tools/stats/ipldstore/ipldstore.go new file mode 100644 index 000000000..9adc599fd --- /dev/null +++ b/tools/stats/ipldstore/ipldstore.go @@ -0,0 +1,92 @@ +package ipldstore + +import ( + "bytes" + "context" + "fmt" + + "github.com/filecoin-project/lotus/tools/stats/metrics" + + lru "github.com/hashicorp/golang-lru" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/stats" +) + +type ApiIpldStore struct { + ctx context.Context + api apiIpldStoreApi + cache *lru.TwoQueueCache + cacheSize int +} + +type apiIpldStoreApi interface { + ChainReadObj(context.Context, cid.Cid) ([]byte, error) +} + +func NewApiIpldStore(ctx context.Context, api apiIpldStoreApi, cacheSize int) (*ApiIpldStore, error) { + store := &ApiIpldStore{ + ctx: ctx, + api: api, + cacheSize: cacheSize, + } + + cache, err := lru.New2Q(store.cacheSize) + if err != nil { + return nil, err + } + + store.cache = cache + + return store, nil +} + +func (ht *ApiIpldStore) Context() context.Context { + return ht.ctx +} + +func (ht *ApiIpldStore) read(ctx context.Context, c cid.Cid) ([]byte, error) { + stats.Record(ctx, metrics.IpldStoreCacheMiss.M(1)) + done := metrics.Timer(ctx, metrics.IpldStoreReadDuration) + defer done() + return ht.api.ChainReadObj(ctx, c) +} + +func (ht *ApiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { + done := metrics.Timer(ctx, metrics.IpldStoreGetDuration) + defer done() + defer func() { + stats.Record(ctx, metrics.IpldStoreCacheSize.M(int64(ht.cacheSize))) + stats.Record(ctx, metrics.IpldStoreCacheLength.M(int64(ht.cache.Len()))) + }() + + var raw []byte + + if a, ok := ht.cache.Get(c); ok { + stats.Record(ctx, metrics.IpldStoreCacheHit.M(1)) + raw = a.([]byte) + } else { + bs, err := ht.read(ctx, c) + if err != nil { + return err + } + + raw = bs + } + + cu, ok := out.(cbg.CBORUnmarshaler) + if ok { + if err := cu.UnmarshalCBOR(bytes.NewReader(raw)); err != nil { + return err + } + + ht.cache.Add(c, raw) + return nil + } + + return fmt.Errorf("Object does not implement CBORUnmarshaler") +} + +func (ht *ApiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { + return cid.Undef, fmt.Errorf("Put is not implemented on ApiIpldStore") +} diff --git a/tools/stats/metrics.go b/tools/stats/metrics.go deleted file mode 100644 index ca3f26336..000000000 --- a/tools/stats/metrics.go +++ /dev/null @@ -1,418 +0,0 @@ -package stats - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "math" - "math/big" - "strings" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/power" - "github.com/filecoin-project/lotus/chain/actors/builtin/reward" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - - "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" - "golang.org/x/xerrors" - - cbg "github.com/whyrusleeping/cbor-gen" - - _ "github.com/influxdata/influxdb1-client" - models "github.com/influxdata/influxdb1-client/models" - client "github.com/influxdata/influxdb1-client/v2" - - logging "github.com/ipfs/go-log/v2" -) - -var log = logging.Logger("stats") - -type PointList struct { - points []models.Point -} - -func NewPointList() *PointList { - return &PointList{} -} - -func (pl *PointList) AddPoint(p models.Point) { - pl.points = append(pl.points, p) -} - -func (pl *PointList) Points() []models.Point { - return pl.points -} - -type InfluxWriteQueue struct { - ch chan client.BatchPoints -} - -func NewInfluxWriteQueue(ctx context.Context, influx client.Client) *InfluxWriteQueue { - ch := make(chan client.BatchPoints, 128) - - maxRetries := 10 - - go func() { - main: - for { - select { - case <-ctx.Done(): - return - case batch := <-ch: - for i := 0; i < maxRetries; i++ { - if err := influx.Write(batch); err != nil { - log.Warnw("Failed to write batch", "error", err) - build.Clock.Sleep(15 * time.Second) - continue - } - - continue main - } - - log.Error("Dropping batch due to failure to write") - } - } - }() - - return &InfluxWriteQueue{ - ch: ch, - } -} - -func (i *InfluxWriteQueue) AddBatch(bp client.BatchPoints) { - i.ch <- bp -} - -func (i *InfluxWriteQueue) Close() { - close(i.ch) -} - -func InfluxClient(addr, user, pass string) (client.Client, error) { - return client.NewHTTPClient(client.HTTPConfig{ - Addr: addr, - Username: user, - Password: pass, - }) -} - -func InfluxNewBatch() (client.BatchPoints, error) { - return client.NewBatchPoints(client.BatchPointsConfig{}) -} - -func NewPoint(name string, value interface{}) models.Point { - pt, _ := models.NewPoint(name, models.Tags{}, - map[string]interface{}{"value": value}, build.Clock.Now().UTC()) - return pt -} - -func NewPointFrom(p models.Point) *client.Point { - return client.NewPointFrom(p) -} - -func RecordTipsetPoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { - cids := []string{} - for _, cid := range tipset.Cids() { - cids = append(cids, cid.String()) - } - - p := NewPoint("chain.height", int64(tipset.Height())) - p.AddTag("tipset", strings.Join(cids, " ")) - pl.AddPoint(p) - - p = NewPoint("chain.block_count", len(cids)) - pl.AddPoint(p) - - tsTime := time.Unix(int64(tipset.MinTimestamp()), int64(0)) - p = NewPoint("chain.blocktime", tsTime.Unix()) - pl.AddPoint(p) - - totalGasLimit := int64(0) - totalUniqGasLimit := int64(0) - seen := make(map[cid.Cid]struct{}) - for _, blockheader := range tipset.Blocks() { - bs, err := blockheader.Serialize() - if err != nil { - return err - } - p := NewPoint("chain.election", blockheader.ElectionProof.WinCount) - p.AddTag("miner", blockheader.Miner.String()) - pl.AddPoint(p) - - p = NewPoint("chain.blockheader_size", len(bs)) - pl.AddPoint(p) - - msgs, err := api.ChainGetBlockMessages(ctx, blockheader.Cid()) - if err != nil { - return xerrors.Errorf("ChainGetBlockMessages failed: %w", msgs) - } - for _, m := range msgs.BlsMessages { - c := m.Cid() - totalGasLimit += m.GasLimit - if _, ok := seen[c]; !ok { - totalUniqGasLimit += m.GasLimit - seen[c] = struct{}{} - } - } - for _, m := range msgs.SecpkMessages { - c := m.Cid() - totalGasLimit += m.Message.GasLimit - if _, ok := seen[c]; !ok { - totalUniqGasLimit += m.Message.GasLimit - seen[c] = struct{}{} - } - } - } - p = NewPoint("chain.gas_limit_total", totalGasLimit) - pl.AddPoint(p) - p = NewPoint("chain.gas_limit_uniq_total", totalUniqGasLimit) - pl.AddPoint(p) - - { - baseFeeIn := tipset.Blocks()[0].ParentBaseFee - newBaseFee := store.ComputeNextBaseFee(baseFeeIn, totalUniqGasLimit, len(tipset.Blocks()), tipset.Height()) - - baseFeeRat := new(big.Rat).SetFrac(newBaseFee.Int, new(big.Int).SetUint64(build.FilecoinPrecision)) - baseFeeFloat, _ := baseFeeRat.Float64() - p = NewPoint("chain.basefee", baseFeeFloat) - pl.AddPoint(p) - - baseFeeChange := new(big.Rat).SetFrac(newBaseFee.Int, baseFeeIn.Int) - baseFeeChangeF, _ := baseFeeChange.Float64() - p = NewPoint("chain.basefee_change_log", math.Log(baseFeeChangeF)/math.Log(1.125)) - pl.AddPoint(p) - } - { - blks := int64(len(cids)) - p = NewPoint("chain.gas_fill_ratio", float64(totalGasLimit)/float64(blks*build.BlockGasTarget)) - pl.AddPoint(p) - p = NewPoint("chain.gas_capacity_ratio", float64(totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) - pl.AddPoint(p) - p = NewPoint("chain.gas_waste_ratio", float64(totalGasLimit-totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) - pl.AddPoint(p) - } - - return nil -} - -type ApiIpldStore struct { - ctx context.Context - api apiIpldStoreApi -} - -type apiIpldStoreApi interface { - ChainReadObj(context.Context, cid.Cid) ([]byte, error) -} - -func NewApiIpldStore(ctx context.Context, api apiIpldStoreApi) *ApiIpldStore { - return &ApiIpldStore{ctx, api} -} - -func (ht *ApiIpldStore) Context() context.Context { - return ht.ctx -} - -func (ht *ApiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { - raw, err := ht.api.ChainReadObj(ctx, c) - if err != nil { - return err - } - - cu, ok := out.(cbg.CBORUnmarshaler) - if ok { - if err := cu.UnmarshalCBOR(bytes.NewReader(raw)); err != nil { - return err - } - return nil - } - - return fmt.Errorf("Object does not implement CBORUnmarshaler") -} - -func (ht *ApiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { - return cid.Undef, fmt.Errorf("Put is not implemented on ApiIpldStore") -} - -func RecordTipsetStatePoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { - attoFil := types.NewInt(build.FilecoinPrecision).Int - - //TODO: StatePledgeCollateral API is not implemented and is commented out - re-enable this block once the API is implemented again. - //pc, err := api.StatePledgeCollateral(ctx, tipset.Key()) - //if err != nil { - //return err - //} - - //pcFil := new(big.Rat).SetFrac(pc.Int, attoFil) - //pcFilFloat, _ := pcFil.Float64() - //p := NewPoint("chain.pledge_collateral", pcFilFloat) - //pl.AddPoint(p) - - netBal, err := api.WalletBalance(ctx, reward.Address) - if err != nil { - return err - } - - netBalFil := new(big.Rat).SetFrac(netBal.Int, attoFil) - netBalFilFloat, _ := netBalFil.Float64() - p := NewPoint("network.balance", netBalFilFloat) - pl.AddPoint(p) - - totalPower, err := api.StateMinerPower(ctx, address.Address{}, tipset.Key()) - if err != nil { - return err - } - - // We divide the power into gibibytes because 2^63 bytes is 8 exbibytes which is smaller than the Filecoin Mainnet. - // Dividing by a gibibyte gives us more room to work with. This will allow the dashboard to report network and miner - // sizes up to 8192 yobibytes. - gibi := types.NewInt(1024 * 1024 * 1024) - p = NewPoint("chain.power", types.BigDiv(totalPower.TotalPower.QualityAdjPower, gibi).Int64()) - pl.AddPoint(p) - - powerActor, err := api.StateGetActor(ctx, power.Address, tipset.Key()) - if err != nil { - return err - } - - powerActorState, err := power.Load(&ApiIpldStore{ctx, api}, powerActor) - if err != nil { - return err - } - - return powerActorState.ForEachClaim(func(addr address.Address, claim power.Claim) error { - // BigCmp returns 0 if values are equal - if types.BigCmp(claim.QualityAdjPower, types.NewInt(0)) == 0 { - return nil - } - - p = NewPoint("chain.miner_power", types.BigDiv(claim.QualityAdjPower, gibi).Int64()) - p.AddTag("miner", addr.String()) - pl.AddPoint(p) - - return nil - }) -} - -type msgTag struct { - actor string - method uint64 - exitcode uint8 -} - -func RecordTipsetMessagesPoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { - cids := tipset.Cids() - if len(cids) == 0 { - return fmt.Errorf("no cids in tipset") - } - - msgs, err := api.ChainGetParentMessages(ctx, cids[0]) - if err != nil { - return err - } - - recp, err := api.ChainGetParentReceipts(ctx, cids[0]) - if err != nil { - return err - } - - msgn := make(map[msgTag][]cid.Cid) - - totalGasUsed := int64(0) - for _, r := range recp { - totalGasUsed += r.GasUsed - } - p := NewPoint("chain.gas_used_total", totalGasUsed) - pl.AddPoint(p) - - for i, msg := range msgs { - // FIXME: use float so this doesn't overflow - // FIXME: this doesn't work as time points get overridden - p := NewPoint("chain.message_gaspremium", msg.Message.GasPremium.Int64()) - pl.AddPoint(p) - p = NewPoint("chain.message_gasfeecap", msg.Message.GasFeeCap.Int64()) - pl.AddPoint(p) - - bs, err := msg.Message.Serialize() - if err != nil { - return err - } - - p = NewPoint("chain.message_size", len(bs)) - pl.AddPoint(p) - - actor, err := api.StateGetActor(ctx, msg.Message.To, tipset.Key()) - if err != nil { - return err - } - - dm, err := multihash.Decode(actor.Code.Hash()) - if err != nil { - continue - } - tag := msgTag{ - actor: string(dm.Digest), - method: uint64(msg.Message.Method), - exitcode: uint8(recp[i].ExitCode), - } - - found := false - for _, c := range msgn[tag] { - if c.Equals(msg.Cid) { - found = true - break - } - } - if !found { - msgn[tag] = append(msgn[tag], msg.Cid) - } - } - - for t, m := range msgn { - p := NewPoint("chain.message_count", len(m)) - p.AddTag("actor", t.actor) - p.AddTag("method", fmt.Sprintf("%d", t.method)) - p.AddTag("exitcode", fmt.Sprintf("%d", t.exitcode)) - pl.AddPoint(p) - - } - - return nil -} - -func ResetDatabase(influx client.Client, database string) error { - log.Info("Resetting database") - q := client.NewQuery(fmt.Sprintf(`DROP DATABASE "%s"; CREATE DATABASE "%s";`, database, database), "", "") - _, err := influx.Query(q) - return err -} - -func GetLastRecordedHeight(influx client.Client, database string) (int64, error) { - log.Info("Retrieving last record height") - q := client.NewQuery(`SELECT "value" FROM "chain.height" ORDER BY time DESC LIMIT 1`, database, "") - res, err := influx.Query(q) - if err != nil { - return 0, err - } - - if len(res.Results) == 0 { - return 0, fmt.Errorf("No results found for last recorded height") - } - - if len(res.Results[0].Series) == 0 { - return 0, fmt.Errorf("No results found for last recorded height") - } - - height, err := (res.Results[0].Series[0].Values[0][1].(json.Number)).Int64() - if err != nil { - return 0, err - } - - log.Infow("Last record height", "height", height) - - return height, nil -} diff --git a/tools/stats/metrics/metrics.go b/tools/stats/metrics/metrics.go new file mode 100644 index 000000000..e5178def1 --- /dev/null +++ b/tools/stats/metrics/metrics.go @@ -0,0 +1,110 @@ +package metrics + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + + "github.com/filecoin-project/lotus/metrics" +) + +var Timer = metrics.Timer +var SinceInMilliseconds = metrics.SinceInMilliseconds + +// Distribution +var ( + defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 32, 64, 128, 256, 500, 1000, 2000, 3000, 5000, 10000, 20000, 30000, 40000, 50000, 60000) +) + +// Global Tags +var () + +// Measures +var ( + TipsetCollectionHeight = stats.Int64("tipset_collection/height", "Current Height of the node", stats.UnitDimensionless) + TipsetCollectionHeightExpected = stats.Int64("tipset_collection/height_expected", "Current Height of the node", stats.UnitDimensionless) + TipsetCollectionPoints = stats.Int64("tipset_collection/points", "Counter for total number of points collected", stats.UnitDimensionless) + TipsetCollectionDuration = stats.Float64("tipset_collection/total_ms", "Duration of tipset point collection", stats.UnitMilliseconds) + TipsetCollectionBlockHeaderDuration = stats.Float64("tipset_collection/block_header_ms", "Duration of block header point collection", stats.UnitMilliseconds) + TipsetCollectionMessageDuration = stats.Float64("tipset_collection/message_ms", "Duration of message point collection", stats.UnitMilliseconds) + TipsetCollectionStaterootDuration = stats.Float64("tipset_collection/stateroot_ms", "Duration of stateroot point collection", stats.UnitMilliseconds) + IpldStoreCacheSize = stats.Int64("ipld_store/cache_size", "Initialized size of the object read cache", stats.UnitDimensionless) + IpldStoreCacheLength = stats.Int64("ipld_store/cache_length", "Current length of object read cache", stats.UnitDimensionless) + IpldStoreCacheHit = stats.Int64("ipld_store/cache_hit", "Counter for total cache hits", stats.UnitDimensionless) + IpldStoreCacheMiss = stats.Int64("ipld_store/cache_miss", "Counter for total cache misses", stats.UnitDimensionless) + IpldStoreReadDuration = stats.Float64("ipld_store/read_ms", "Duration of object read request to lotus", stats.UnitMilliseconds) + IpldStoreGetDuration = stats.Float64("ipld_store/get_ms", "Duration of object get from store", stats.UnitMilliseconds) + WriteQueueSize = stats.Int64("write_queue/length", "Current length of the write queue", stats.UnitDimensionless) +) + +// Views +var ( + TipsetCollectionHeightView = &view.View{ + Measure: TipsetCollectionHeight, + Aggregation: view.LastValue(), + } + TipsetCollectionHeightExpectedView = &view.View{ + Measure: TipsetCollectionHeightExpected, + Aggregation: view.LastValue(), + } + TipsetCollectionPointsView = &view.View{ + Measure: TipsetCollectionPoints, + Aggregation: view.Sum(), + } + TipsetCollectionDurationView = &view.View{ + Measure: TipsetCollectionDuration, + Aggregation: defaultMillisecondsDistribution, + } + TipsetCollectionBlockHeaderDurationView = &view.View{ + Measure: TipsetCollectionBlockHeaderDuration, + Aggregation: defaultMillisecondsDistribution, + } + TipsetCollectionMessageDurationView = &view.View{ + Measure: TipsetCollectionMessageDuration, + Aggregation: defaultMillisecondsDistribution, + } + TipsetCollectionStaterootDurationView = &view.View{ + Measure: TipsetCollectionStaterootDuration, + Aggregation: defaultMillisecondsDistribution, + } + IpldStoreCacheSizeView = &view.View{ + Measure: IpldStoreCacheSize, + Aggregation: view.LastValue(), + } + IpldStoreCacheLengthView = &view.View{ + Measure: IpldStoreCacheLength, + Aggregation: view.LastValue(), + } + IpldStoreCacheHitView = &view.View{ + Measure: IpldStoreCacheHit, + Aggregation: view.Count(), + } + IpldStoreCacheMissView = &view.View{ + Measure: IpldStoreCacheMiss, + Aggregation: view.Count(), + } + IpldStoreReadDurationView = &view.View{ + Measure: IpldStoreReadDuration, + Aggregation: defaultMillisecondsDistribution, + } + IpldStoreGetDurationView = &view.View{ + Measure: IpldStoreGetDuration, + Aggregation: defaultMillisecondsDistribution, + } +) + +// DefaultViews is an array of OpenCensus views for metric gathering purposes +var DefaultViews = []*view.View{ + TipsetCollectionHeightView, + TipsetCollectionHeightExpectedView, + TipsetCollectionPointsView, + TipsetCollectionDurationView, + TipsetCollectionBlockHeaderDurationView, + TipsetCollectionMessageDurationView, + TipsetCollectionStaterootDurationView, + IpldStoreCacheSizeView, + IpldStoreCacheLengthView, + IpldStoreCacheHitView, + IpldStoreCacheMissView, + IpldStoreReadDurationView, + IpldStoreGetDurationView, +} diff --git a/tools/stats/points/collect.go b/tools/stats/points/collect.go new file mode 100644 index 000000000..a7c37fcd9 --- /dev/null +++ b/tools/stats/points/collect.go @@ -0,0 +1,363 @@ +package points + +import ( + "context" + "fmt" + "math" + "math/big" + "strings" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/tools/stats/influx" + "github.com/filecoin-project/lotus/tools/stats/metrics" + + lru "github.com/hashicorp/golang-lru" + client "github.com/influxdata/influxdb1-client/v2" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + "go.opencensus.io/stats" + "golang.org/x/xerrors" +) + +type LotusApi interface { + WalletBalance(context.Context, address.Address) (types.BigInt, error) + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) + ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) + ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) + ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*api.BlockMessages, error) +} + +type ChainPointCollector struct { + ctx context.Context + api LotusApi + store adt.Store + actorDigestCache *lru.TwoQueueCache +} + +func NewChainPointCollector(ctx context.Context, store adt.Store, api LotusApi) (*ChainPointCollector, error) { + actorDigestCache, err := lru.New2Q(2 << 15) + if err != nil { + return nil, err + } + + collector := &ChainPointCollector{ + ctx: ctx, + store: store, + actorDigestCache: actorDigestCache, + api: api, + } + + return collector, nil +} + +func (c *ChainPointCollector) actorDigest(ctx context.Context, addr address.Address, tipset *types.TipSet) (string, error) { + if code, ok := c.actorDigestCache.Get(addr); ok { + return code.(string), nil + } + + actor, err := c.api.StateGetActor(ctx, addr, tipset.Key()) + if err != nil { + return "", err + } + + dm, err := multihash.Decode(actor.Code.Hash()) + if err != nil { + return "", err + } + + digest := string(dm.Digest) + c.actorDigestCache.Add(addr, digest) + + return digest, nil +} + +func (c *ChainPointCollector) Collect(ctx context.Context, tipset *types.TipSet) (client.BatchPoints, error) { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionDuration) + defer func() { + log.Infow("record tipset", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + pl := influx.NewPointList() + height := tipset.Height() + + log.Debugw("collecting tipset points", "height", tipset.Height()) + stats.Record(ctx, metrics.TipsetCollectionHeight.M(int64(height))) + + if err := c.collectBlockheaderPoints(ctx, pl, tipset); err != nil { + log.Errorw("failed to record tipset", "height", height, "error", err, "tipset", tipset.Key()) + } + + if err := c.collectMessagePoints(ctx, pl, tipset); err != nil { + log.Errorw("failed to record messages", "height", height, "error", err, "tipset", tipset.Key()) + } + + if err := c.collectStaterootPoints(ctx, pl, tipset); err != nil { + log.Errorw("failed to record state", "height", height, "error", err, "tipset", tipset.Key()) + } + + tsTimestamp := time.Unix(int64(tipset.MinTimestamp()), int64(0)) + + nb, err := influx.NewBatch() + if err != nil { + return nil, err + } + + for _, pt := range pl.Points() { + pt.SetTime(tsTimestamp) + nb.AddPoint(influx.NewPointFrom(pt)) + } + + log.Infow("collected tipset points", "count", len(nb.Points()), "height", tipset.Height()) + + stats.Record(ctx, metrics.TipsetCollectionPoints.M(int64(len(nb.Points())))) + + return nb, nil +} + +func (c *ChainPointCollector) collectBlockheaderPoints(ctx context.Context, pl *influx.PointList, tipset *types.TipSet) error { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionBlockHeaderDuration) + defer func() { + log.Infow("collect blockheader points", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + cids := []string{} + for _, cid := range tipset.Cids() { + cids = append(cids, cid.String()) + } + + p := influx.NewPoint("chain.height", int64(tipset.Height())) + p.AddTag("tipset", strings.Join(cids, " ")) + pl.AddPoint(p) + + p = influx.NewPoint("chain.block_count", len(cids)) + pl.AddPoint(p) + + tsTime := time.Unix(int64(tipset.MinTimestamp()), int64(0)) + p = influx.NewPoint("chain.blocktime", tsTime.Unix()) + pl.AddPoint(p) + + totalGasLimit := int64(0) + totalUniqGasLimit := int64(0) + seen := make(map[cid.Cid]struct{}) + for _, blockheader := range tipset.Blocks() { + bs, err := blockheader.Serialize() + if err != nil { + return err + } + p := influx.NewPoint("chain.election", blockheader.ElectionProof.WinCount) + p.AddTag("miner", blockheader.Miner.String()) + pl.AddPoint(p) + + p = influx.NewPoint("chain.blockheader_size", len(bs)) + pl.AddPoint(p) + + msgs, err := c.api.ChainGetBlockMessages(ctx, blockheader.Cid()) + if err != nil { + return xerrors.Errorf("ChainGetBlockMessages failed: %w", msgs) + } + for _, m := range msgs.BlsMessages { + c := m.Cid() + totalGasLimit += m.GasLimit + if _, ok := seen[c]; !ok { + totalUniqGasLimit += m.GasLimit + seen[c] = struct{}{} + } + } + for _, m := range msgs.SecpkMessages { + c := m.Cid() + totalGasLimit += m.Message.GasLimit + if _, ok := seen[c]; !ok { + totalUniqGasLimit += m.Message.GasLimit + seen[c] = struct{}{} + } + } + } + p = influx.NewPoint("chain.gas_limit_total", totalGasLimit) + pl.AddPoint(p) + p = influx.NewPoint("chain.gas_limit_uniq_total", totalUniqGasLimit) + pl.AddPoint(p) + + { + baseFeeIn := tipset.Blocks()[0].ParentBaseFee + newBaseFee := store.ComputeNextBaseFee(baseFeeIn, totalUniqGasLimit, len(tipset.Blocks()), tipset.Height()) + + baseFeeRat := new(big.Rat).SetFrac(newBaseFee.Int, new(big.Int).SetUint64(build.FilecoinPrecision)) + baseFeeFloat, _ := baseFeeRat.Float64() + p = influx.NewPoint("chain.basefee", baseFeeFloat) + pl.AddPoint(p) + + baseFeeChange := new(big.Rat).SetFrac(newBaseFee.Int, baseFeeIn.Int) + baseFeeChangeF, _ := baseFeeChange.Float64() + p = influx.NewPoint("chain.basefee_change_log", math.Log(baseFeeChangeF)/math.Log(1.125)) + pl.AddPoint(p) + } + { + blks := int64(len(cids)) + p = influx.NewPoint("chain.gas_fill_ratio", float64(totalGasLimit)/float64(blks*build.BlockGasTarget)) + pl.AddPoint(p) + p = influx.NewPoint("chain.gas_capacity_ratio", float64(totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) + pl.AddPoint(p) + p = influx.NewPoint("chain.gas_waste_ratio", float64(totalGasLimit-totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) + pl.AddPoint(p) + } + + return nil +} + +func (c *ChainPointCollector) collectStaterootPoints(ctx context.Context, pl *influx.PointList, tipset *types.TipSet) error { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionStaterootDuration) + defer func() { + log.Infow("collect stateroot points", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + attoFil := types.NewInt(build.FilecoinPrecision).Int + + netBal, err := c.api.WalletBalance(ctx, reward.Address) + if err != nil { + return err + } + + netBalFil := new(big.Rat).SetFrac(netBal.Int, attoFil) + netBalFilFloat, _ := netBalFil.Float64() + p := influx.NewPoint("network.balance", netBalFilFloat) + pl.AddPoint(p) + + totalPower, err := c.api.StateMinerPower(ctx, address.Address{}, tipset.Key()) + if err != nil { + return err + } + + // We divide the power into gibibytes because 2^63 bytes is 8 exbibytes which is smaller than the Filecoin Mainnet. + // Dividing by a gibibyte gives us more room to work with. This will allow the dashboard to report network and miner + // sizes up to 8192 yobibytes. + gibi := types.NewInt(1024 * 1024 * 1024) + p = influx.NewPoint("chain.power", types.BigDiv(totalPower.TotalPower.QualityAdjPower, gibi).Int64()) + pl.AddPoint(p) + + powerActor, err := c.api.StateGetActor(ctx, power.Address, tipset.Key()) + if err != nil { + return err + } + + powerActorState, err := power.Load(c.store, powerActor) + if err != nil { + return err + } + + return powerActorState.ForEachClaim(func(addr address.Address, claim power.Claim) error { + // BigCmp returns 0 if values are equal + if types.BigCmp(claim.QualityAdjPower, types.NewInt(0)) == 0 { + return nil + } + + p = influx.NewPoint("chain.miner_power", types.BigDiv(claim.QualityAdjPower, gibi).Int64()) + p.AddTag("miner", addr.String()) + pl.AddPoint(p) + + return nil + }) +} + +type msgTag struct { + actor string + method uint64 + exitcode uint8 +} + +func (c *ChainPointCollector) collectMessagePoints(ctx context.Context, pl *influx.PointList, tipset *types.TipSet) error { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionMessageDuration) + defer func() { + log.Infow("collect message points", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + cids := tipset.Cids() + if len(cids) == 0 { + return fmt.Errorf("no cids in tipset") + } + + msgs, err := c.api.ChainGetParentMessages(ctx, cids[0]) + if err != nil { + return err + } + + recp, err := c.api.ChainGetParentReceipts(ctx, cids[0]) + if err != nil { + return err + } + + msgn := make(map[msgTag][]cid.Cid) + + totalGasUsed := int64(0) + for _, r := range recp { + totalGasUsed += r.GasUsed + } + p := influx.NewPoint("chain.gas_used_total", totalGasUsed) + pl.AddPoint(p) + + for i, msg := range msgs { + digest, err := c.actorDigest(ctx, msg.Message.To, tipset) + if err != nil { + continue + } + + // FIXME: use float so this doesn't overflow + // FIXME: this doesn't work as time points get overridden + p := influx.NewPoint("chain.message_gaspremium", msg.Message.GasPremium.Int64()) + pl.AddPoint(p) + p = influx.NewPoint("chain.message_gasfeecap", msg.Message.GasFeeCap.Int64()) + pl.AddPoint(p) + + bs, err := msg.Message.Serialize() + if err != nil { + return err + } + + p = influx.NewPoint("chain.message_size", len(bs)) + pl.AddPoint(p) + + tag := msgTag{ + actor: digest, + method: uint64(msg.Message.Method), + exitcode: uint8(recp[i].ExitCode), + } + + found := false + for _, c := range msgn[tag] { + if c.Equals(msg.Cid) { + found = true + break + } + } + if !found { + msgn[tag] = append(msgn[tag], msg.Cid) + } + } + + for t, m := range msgn { + p := influx.NewPoint("chain.message_count", len(m)) + p.AddTag("actor", t.actor) + p.AddTag("method", fmt.Sprintf("%d", t.method)) + p.AddTag("exitcode", fmt.Sprintf("%d", t.exitcode)) + pl.AddPoint(p) + + } + + return nil +} diff --git a/tools/stats/points/log.go b/tools/stats/points/log.go new file mode 100644 index 000000000..e0cb795c0 --- /dev/null +++ b/tools/stats/points/log.go @@ -0,0 +1,7 @@ +package points + +import ( + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("stats/points") diff --git a/tools/stats/rpc.go b/tools/stats/rpc.go deleted file mode 100644 index 4e503cb39..000000000 --- a/tools/stats/rpc.go +++ /dev/null @@ -1,228 +0,0 @@ -package stats - -import ( - "context" - "net/http" - "time" - - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-state-types/abi" - manet "github.com/multiformats/go-multiaddr/net" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/repo" -) - -func getAPI(path string) (string, http.Header, error) { - r, err := repo.NewFS(path) - if err != nil { - return "", nil, err - } - - ma, err := r.APIEndpoint() - if err != nil { - return "", nil, xerrors.Errorf("failed to get api endpoint: %w", err) - } - _, addr, err := manet.DialArgs(ma) - if err != nil { - return "", nil, err - } - var headers http.Header - token, err := r.APIToken() - if err != nil { - log.Warnw("Couldn't load CLI token, capabilities may be limited", "error", err) - } else { - headers = http.Header{} - headers.Add("Authorization", "Bearer "+string(token)) - } - - return "ws://" + addr + "/rpc/v0", headers, nil -} - -func WaitForSyncComplete(ctx context.Context, napi v0api.FullNode) error { -sync_complete: - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-build.Clock.After(5 * time.Second): - state, err := napi.SyncState(ctx) - if err != nil { - return err - } - - for i, w := range state.ActiveSyncs { - if w.Target == nil { - continue - } - - if w.Stage == api.StageSyncErrored { - log.Errorw( - "Syncing", - "worker", i, - "base", w.Base.Key(), - "target", w.Target.Key(), - "target_height", w.Target.Height(), - "height", w.Height, - "error", w.Message, - "stage", w.Stage.String(), - ) - } else { - log.Infow( - "Syncing", - "worker", i, - "base", w.Base.Key(), - "target", w.Target.Key(), - "target_height", w.Target.Height(), - "height", w.Height, - "stage", w.Stage.String(), - ) - } - - if w.Stage == api.StageSyncComplete { - break sync_complete - } - } - } - } - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-build.Clock.After(5 * time.Second): - head, err := napi.ChainHead(ctx) - if err != nil { - return err - } - - timestampDelta := build.Clock.Now().Unix() - int64(head.MinTimestamp()) - - log.Infow( - "Waiting for reasonable head height", - "height", head.Height(), - "timestamp_delta", timestampDelta, - ) - - // If we get within 20 blocks of the current exected block height we - // consider sync complete. Block propagation is not always great but we still - // want to be recording stats as soon as we can - if timestampDelta < int64(build.BlockDelaySecs)*20 { - return nil - } - } - } -} - -func GetTips(ctx context.Context, api v0api.FullNode, lastHeight abi.ChainEpoch, headlag int) (<-chan *types.TipSet, error) { - chmain := make(chan *types.TipSet) - - hb := newHeadBuffer(headlag) - - notif, err := api.ChainNotify(ctx) - if err != nil { - return nil, err - } - - go func() { - defer close(chmain) - - ticker := time.NewTicker(30 * time.Second) - defer ticker.Stop() - - for { - select { - case changes, ok := <-notif: - if !ok { - return - } - for _, change := range changes { - log.Infow("Head event", "height", change.Val.Height(), "type", change.Type) - - switch change.Type { - case store.HCCurrent: - tipsets, err := loadTipsets(ctx, api, change.Val, lastHeight) - if err != nil { - log.Info(err) - return - } - - for _, tipset := range tipsets { - chmain <- tipset - } - case store.HCApply: - if out := hb.push(change); out != nil { - chmain <- out.Val - } - case store.HCRevert: - hb.pop() - } - } - case <-ticker.C: - log.Info("Running health check") - - cctx, cancel := context.WithTimeout(ctx, 5*time.Second) - - if _, err := api.ID(cctx); err != nil { - log.Error("Health check failed") - cancel() - return - } - - cancel() - - log.Info("Node online") - case <-ctx.Done(): - return - } - } - }() - - return chmain, nil -} - -func loadTipsets(ctx context.Context, api v0api.FullNode, curr *types.TipSet, lowestHeight abi.ChainEpoch) ([]*types.TipSet, error) { - tipsets := []*types.TipSet{} - for { - if curr.Height() == 0 { - break - } - - if curr.Height() <= lowestHeight { - break - } - - log.Infow("Walking back", "height", curr.Height()) - tipsets = append(tipsets, curr) - - tsk := curr.Parents() - prev, err := api.ChainGetTipSet(ctx, tsk) - if err != nil { - return tipsets, err - } - - curr = prev - } - - for i, j := 0, len(tipsets)-1; i < j; i, j = i+1, j-1 { - tipsets[i], tipsets[j] = tipsets[j], tipsets[i] - } - - return tipsets, nil -} - -func GetFullNodeAPI(ctx context.Context, repo string) (v0api.FullNode, jsonrpc.ClientCloser, error) { - addr, headers, err := getAPI(repo) - if err != nil { - return nil, nil, err - } - - return client.NewFullNodeRPCV0(ctx, addr, headers) -} diff --git a/tools/stats/sync/log.go b/tools/stats/sync/log.go new file mode 100644 index 000000000..1c2233cc8 --- /dev/null +++ b/tools/stats/sync/log.go @@ -0,0 +1,7 @@ +package sync + +import ( + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("stats/sync") diff --git a/tools/stats/sync/sync.go b/tools/stats/sync/sync.go new file mode 100644 index 000000000..c8db1c543 --- /dev/null +++ b/tools/stats/sync/sync.go @@ -0,0 +1,192 @@ +package sync + +import ( + "context" + "time" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/tools/stats/headbuffer" +) + +type SyncWaitApi interface { + SyncState(context.Context) (*api.SyncState, error) + ChainHead(context.Context) (*types.TipSet, error) +} + +// SyncWait returns when ChainHead is within 20 epochs of the expected height +func SyncWait(ctx context.Context, napi SyncWaitApi) error { + for { + state, err := napi.SyncState(ctx) + if err != nil { + return err + } + + if len(state.ActiveSyncs) == 0 { + build.Clock.Sleep(time.Second) + continue + } + + head, err := napi.ChainHead(ctx) + if err != nil { + return err + } + + working := -1 + for i, ss := range state.ActiveSyncs { + switch ss.Stage { + case api.StageSyncComplete: + default: + working = i + case api.StageIdle: + // not complete, not actively working + } + } + + if working == -1 { + working = len(state.ActiveSyncs) - 1 + } + + ss := state.ActiveSyncs[working] + + if ss.Base == nil || ss.Target == nil { + log.Infow( + "syncing", + "height", ss.Height, + "stage", ss.Stage.String(), + ) + } else { + log.Infow( + "syncing", + "base", ss.Base.Key(), + "target", ss.Target.Key(), + "target_height", ss.Target.Height(), + "height", ss.Height, + "stage", ss.Stage.String(), + ) + } + + if build.Clock.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs)*30 { + break + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-build.Clock.After(time.Duration(int64(build.BlockDelaySecs) * int64(time.Second))): + } + } + + return nil +} + +type BufferedTipsetChannelApi interface { + ChainNotify(context.Context) (<-chan []*api.HeadChange, error) + Version(context.Context) (api.APIVersion, error) + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) +} + +// BufferedTipsetChannel returns an unbuffered channel of tipsets. Buffering occurs internally to handle revert +// ChainNotify changes. The returned channel can output tipsets at the same height twice if a reorg larger the the +// provided `size` occurs. +func BufferedTipsetChannel(ctx context.Context, api BufferedTipsetChannelApi, lastHeight abi.ChainEpoch, size int) (<-chan *types.TipSet, error) { + chmain := make(chan *types.TipSet) + + hb := headbuffer.NewHeadChangeStackBuffer(size) + + notif, err := api.ChainNotify(ctx) + if err != nil { + return nil, err + } + + go func() { + defer close(chmain) + + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + for { + select { + case changes, ok := <-notif: + if !ok { + return + } + for _, change := range changes { + log.Debugw("head event", "height", change.Val.Height(), "type", change.Type) + + switch change.Type { + case store.HCCurrent: + tipsets, err := loadTipsets(ctx, api, change.Val, lastHeight) + if err != nil { + log.Info(err) + return + } + + for _, tipset := range tipsets { + chmain <- tipset + } + case store.HCApply: + if out := hb.Push(change); out != nil { + chmain <- out.Val + } + case store.HCRevert: + hb.Pop() + } + } + case <-ticker.C: + log.Debug("running health check") + + cctx, cancel := context.WithTimeout(ctx, 5*time.Second) + + if _, err := api.Version(cctx); err != nil { + log.Error("health check failed") + cancel() + return + } + + cancel() + + log.Debug("node online") + case <-ctx.Done(): + return + } + } + }() + + return chmain, nil +} + +func loadTipsets(ctx context.Context, api BufferedTipsetChannelApi, curr *types.TipSet, lowestHeight abi.ChainEpoch) ([]*types.TipSet, error) { + log.Infow("loading tipsets", "to_height", lowestHeight, "from_height", curr.Height()) + tipsets := []*types.TipSet{} + for { + if curr.Height() == 0 { + break + } + + if curr.Height() <= lowestHeight { + break + } + + log.Debugw("walking back", "height", curr.Height()) + tipsets = append(tipsets, curr) + + tsk := curr.Parents() + prev, err := api.ChainGetTipSet(ctx, tsk) + if err != nil { + return tipsets, err + } + + curr = prev + } + + for i, j := 0, len(tipsets)-1; i < j; i, j = i+1, j-1 { + tipsets[i], tipsets[j] = tipsets[j], tipsets[i] + } + + return tipsets, nil +}